2 * Copyright (c) 2014 Andrew Turner
3 * Copyright (c) 2014 The FreeBSD Foundation
6 * Portions of this software were developed by Semihalf
7 * under sponsorship of the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/kernel.h>
40 #include <sys/sysctl.h>
41 #include <sys/systm.h>
43 #include <machine/atomic.h>
44 #include <machine/cpu.h>
45 #include <machine/cpufunc.h>
46 #include <machine/undefined.h>
47 #include <machine/elf.h>
49 static int ident_lock;
50 static void print_cpu_features(u_int cpu);
51 static u_long parse_cpu_features_hwcap(u_int cpu);
53 char machine[] = "arm64";
56 extern int adaptive_machine_arch;
60 sysctl_hw_machine(SYSCTL_HANDLER_ARGS)
63 static const char machine32[] = "arm";
67 if ((req->flags & SCTL_MASK32) != 0 && adaptive_machine_arch)
68 error = SYSCTL_OUT(req, machine32, sizeof(machine32));
71 error = SYSCTL_OUT(req, machine, sizeof(machine));
75 SYSCTL_PROC(_hw, HW_MACHINE, machine, CTLTYPE_STRING | CTLFLAG_RD |
76 CTLFLAG_MPSAFE, NULL, 0, sysctl_hw_machine, "A", "Machine class");
78 static char cpu_model[64];
79 SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD,
80 cpu_model, sizeof(cpu_model), "Machine model");
83 * Per-CPU affinity as provided in MPIDR_EL1
84 * Indexed by CPU number in logical order selected by the system.
85 * Relevant fields can be extracted using CPU_AFFn macros,
86 * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system.
89 * Aff1 - Cluster number
90 * Aff0 - CPU number in Aff1 cluster
92 uint64_t __cpu_affinity[MAXCPU];
93 static u_int cpu_aff_levels;
100 const char *cpu_impl_name;
101 const char *cpu_part_name;
104 uint64_t id_aa64afr0;
105 uint64_t id_aa64afr1;
106 uint64_t id_aa64dfr0;
107 uint64_t id_aa64dfr1;
108 uint64_t id_aa64isar0;
109 uint64_t id_aa64isar1;
110 uint64_t id_aa64mmfr0;
111 uint64_t id_aa64mmfr1;
112 uint64_t id_aa64mmfr2;
113 uint64_t id_aa64pfr0;
114 uint64_t id_aa64pfr1;
117 static struct cpu_desc cpu_desc[MAXCPU];
118 static struct cpu_desc user_cpu_desc;
119 static u_int cpu_print_regs;
120 #define PRINT_ID_AA64_AFR0 0x00000001
121 #define PRINT_ID_AA64_AFR1 0x00000002
122 #define PRINT_ID_AA64_DFR0 0x00000010
123 #define PRINT_ID_AA64_DFR1 0x00000020
124 #define PRINT_ID_AA64_ISAR0 0x00000100
125 #define PRINT_ID_AA64_ISAR1 0x00000200
126 #define PRINT_ID_AA64_MMFR0 0x00001000
127 #define PRINT_ID_AA64_MMFR1 0x00002000
128 #define PRINT_ID_AA64_MMFR2 0x00004000
129 #define PRINT_ID_AA64_PFR0 0x00010000
130 #define PRINT_ID_AA64_PFR1 0x00020000
134 const char *part_name;
136 #define CPU_PART_NONE { 0, "Unknown Processor" }
138 struct cpu_implementers {
140 const char *impl_name;
142 * Part number is implementation defined
143 * so each vendor will have its own set of values and names.
145 const struct cpu_parts *cpu_parts;
147 #define CPU_IMPLEMENTER_NONE { 0, "Unknown Implementer", cpu_parts_none }
150 * Per-implementer table of (PartNum, CPU Name) pairs.
153 static const struct cpu_parts cpu_parts_arm[] = {
154 { CPU_PART_FOUNDATION, "Foundation-Model" },
155 { CPU_PART_CORTEX_A35, "Cortex-A35" },
156 { CPU_PART_CORTEX_A53, "Cortex-A53" },
157 { CPU_PART_CORTEX_A55, "Cortex-A55" },
158 { CPU_PART_CORTEX_A57, "Cortex-A57" },
159 { CPU_PART_CORTEX_A65, "Cortex-A65" },
160 { CPU_PART_CORTEX_A72, "Cortex-A72" },
161 { CPU_PART_CORTEX_A73, "Cortex-A73" },
162 { CPU_PART_CORTEX_A75, "Cortex-A75" },
163 { CPU_PART_CORTEX_A76, "Cortex-A76" },
164 { CPU_PART_CORTEX_A76AE, "Cortex-A76AE" },
165 { CPU_PART_CORTEX_A77, "Cortex-A77" },
166 { CPU_PART_NEOVERSE_N1, "Neoverse-N1" },
170 static const struct cpu_parts cpu_parts_cavium[] = {
171 { CPU_PART_THUNDERX, "ThunderX" },
172 { CPU_PART_THUNDERX2, "ThunderX2" },
177 static const struct cpu_parts cpu_parts_apm[] = {
178 { CPU_PART_EMAG8180, "eMAG 8180" },
183 static const struct cpu_parts cpu_parts_none[] = {
188 * Implementers table.
190 const struct cpu_implementers cpu_implementers[] = {
191 { CPU_IMPL_ARM, "ARM", cpu_parts_arm },
192 { CPU_IMPL_BROADCOM, "Broadcom", cpu_parts_none },
193 { CPU_IMPL_CAVIUM, "Cavium", cpu_parts_cavium },
194 { CPU_IMPL_DEC, "DEC", cpu_parts_none },
195 { CPU_IMPL_INFINEON, "IFX", cpu_parts_none },
196 { CPU_IMPL_FREESCALE, "Freescale", cpu_parts_none },
197 { CPU_IMPL_NVIDIA, "NVIDIA", cpu_parts_none },
198 { CPU_IMPL_APM, "APM", cpu_parts_apm },
199 { CPU_IMPL_QUALCOMM, "Qualcomm", cpu_parts_none },
200 { CPU_IMPL_MARVELL, "Marvell", cpu_parts_none },
201 { CPU_IMPL_INTEL, "Intel", cpu_parts_none },
202 CPU_IMPLEMENTER_NONE,
205 #define MRS_TYPE_MASK 0xf
206 #define MRS_INVALID 0
208 #define MRS_EXACT_VAL(x) (MRS_EXACT | ((x) << 4))
209 #define MRS_EXACT_FIELD(x) ((x) >> 4)
212 struct mrs_field_value {
217 #define MRS_FIELD_VALUE(_value, _desc) \
223 #define MRS_FIELD_VALUE_NONE_IMPL(_reg, _field, _none, _impl) \
224 MRS_FIELD_VALUE(_reg ## _ ## _field ## _ ## _none, ""), \
225 MRS_FIELD_VALUE(_reg ## _ ## _field ## _ ## _impl, #_field)
227 #define MRS_FIELD_VALUE_COUNT(_reg, _field, _desc) \
228 MRS_FIELD_VALUE(0ul << _reg ## _ ## _field ## _SHIFT, "1 " _desc), \
229 MRS_FIELD_VALUE(1ul << _reg ## _ ## _field ## _SHIFT, "2 " _desc "s"), \
230 MRS_FIELD_VALUE(2ul << _reg ## _ ## _field ## _SHIFT, "3 " _desc "s"), \
231 MRS_FIELD_VALUE(3ul << _reg ## _ ## _field ## _SHIFT, "4 " _desc "s"), \
232 MRS_FIELD_VALUE(4ul << _reg ## _ ## _field ## _SHIFT, "5 " _desc "s"), \
233 MRS_FIELD_VALUE(5ul << _reg ## _ ## _field ## _SHIFT, "6 " _desc "s"), \
234 MRS_FIELD_VALUE(6ul << _reg ## _ ## _field ## _SHIFT, "7 " _desc "s"), \
235 MRS_FIELD_VALUE(7ul << _reg ## _ ## _field ## _SHIFT, "8 " _desc "s"), \
236 MRS_FIELD_VALUE(8ul << _reg ## _ ## _field ## _SHIFT, "9 " _desc "s"), \
237 MRS_FIELD_VALUE(9ul << _reg ## _ ## _field ## _SHIFT, "10 "_desc "s"), \
238 MRS_FIELD_VALUE(10ul<< _reg ## _ ## _field ## _SHIFT, "11 "_desc "s"), \
239 MRS_FIELD_VALUE(11ul<< _reg ## _ ## _field ## _SHIFT, "12 "_desc "s"), \
240 MRS_FIELD_VALUE(12ul<< _reg ## _ ## _field ## _SHIFT, "13 "_desc "s"), \
241 MRS_FIELD_VALUE(13ul<< _reg ## _ ## _field ## _SHIFT, "14 "_desc "s"), \
242 MRS_FIELD_VALUE(14ul<< _reg ## _ ## _field ## _SHIFT, "15 "_desc "s"), \
243 MRS_FIELD_VALUE(15ul<< _reg ## _ ## _field ## _SHIFT, "16 "_desc "s")
245 #define MRS_FIELD_VALUE_END { .desc = NULL }
249 struct mrs_field_value *values;
256 #define MRS_FIELD(_register, _name, _sign, _type, _values) \
261 .shift = _register ## _ ## _name ## _SHIFT, \
262 .mask = _register ## _ ## _name ## _MASK, \
263 .values = (_values), \
266 #define MRS_FIELD_END { .type = MRS_INVALID, }
268 /* ID_AA64AFR0_EL1 */
269 static struct mrs_field id_aa64afr0_fields[] = {
274 /* ID_AA64AFR1_EL1 */
275 static struct mrs_field id_aa64afr1_fields[] = {
280 /* ID_AA64DFR0_EL1 */
281 static struct mrs_field_value id_aa64dfr0_pmsver[] = {
282 MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_NONE, ""),
283 MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_V1, "SPE"),
287 static struct mrs_field_value id_aa64dfr0_ctx_cmps[] = {
288 MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, CTX_CMPs, "CTX BKPT"),
292 static struct mrs_field_value id_aa64dfr0_wrps[] = {
293 MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, WRPs, "Watchpoint"),
297 static struct mrs_field_value id_aa64dfr0_brps[] = {
298 MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, BRPs, "Breakpoint"),
302 static struct mrs_field_value id_aa64dfr0_pmuver[] = {
303 MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_NONE, ""),
304 MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3, "PMUv3"),
305 MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3_1, "PMUv3+16 bit evtCount"),
306 MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_IMPL, "IMPL PMU"),
310 static struct mrs_field_value id_aa64dfr0_tracever[] = {
311 MRS_FIELD_VALUE(ID_AA64DFR0_TraceVer_NONE, ""),
312 MRS_FIELD_VALUE(ID_AA64DFR0_TraceVer_IMPL, "Trace"),
316 static struct mrs_field_value id_aa64dfr0_debugver[] = {
317 MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8, "Debugv8"),
318 MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8_VHE, "Debugv8_VHE"),
319 MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8_2, "Debugv8.2"),
323 static struct mrs_field id_aa64dfr0_fields[] = {
324 MRS_FIELD(ID_AA64DFR0, PMSVer, false, MRS_EXACT, id_aa64dfr0_pmsver),
325 MRS_FIELD(ID_AA64DFR0, CTX_CMPs, false, MRS_EXACT,
326 id_aa64dfr0_ctx_cmps),
327 MRS_FIELD(ID_AA64DFR0, WRPs, false, MRS_EXACT, id_aa64dfr0_wrps),
328 MRS_FIELD(ID_AA64DFR0, BRPs, false, MRS_LOWER, id_aa64dfr0_brps),
329 MRS_FIELD(ID_AA64DFR0, PMUVer, false, MRS_EXACT, id_aa64dfr0_pmuver),
330 MRS_FIELD(ID_AA64DFR0, TraceVer, false, MRS_EXACT,
331 id_aa64dfr0_tracever),
332 MRS_FIELD(ID_AA64DFR0, DebugVer, false, MRS_EXACT_VAL(0x6),
333 id_aa64dfr0_debugver),
339 static struct mrs_field id_aa64dfr1_fields[] = {
344 /* ID_AA64ISAR0_EL1 */
345 static struct mrs_field_value id_aa64isar0_dp[] = {
346 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, DP, NONE, IMPL),
350 static struct mrs_field_value id_aa64isar0_sm4[] = {
351 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SM4, NONE, IMPL),
355 static struct mrs_field_value id_aa64isar0_sm3[] = {
356 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SM3, NONE, IMPL),
360 static struct mrs_field_value id_aa64isar0_sha3[] = {
361 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SHA3, NONE, IMPL),
365 static struct mrs_field_value id_aa64isar0_rdm[] = {
366 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, RDM, NONE, IMPL),
370 static struct mrs_field_value id_aa64isar0_atomic[] = {
371 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, Atomic, NONE, IMPL),
375 static struct mrs_field_value id_aa64isar0_crc32[] = {
376 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, CRC32, NONE, BASE),
380 static struct mrs_field_value id_aa64isar0_sha2[] = {
381 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SHA2, NONE, BASE),
382 MRS_FIELD_VALUE(ID_AA64ISAR0_SHA2_512, "SHA2+SHA512"),
386 static struct mrs_field_value id_aa64isar0_sha1[] = {
387 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SHA1, NONE, BASE),
391 static struct mrs_field_value id_aa64isar0_aes[] = {
392 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, AES, NONE, BASE),
393 MRS_FIELD_VALUE(ID_AA64ISAR0_AES_PMULL, "AES+PMULL"),
397 static struct mrs_field id_aa64isar0_fields[] = {
398 MRS_FIELD(ID_AA64ISAR0, DP, false, MRS_LOWER, id_aa64isar0_dp),
399 MRS_FIELD(ID_AA64ISAR0, SM4, false, MRS_LOWER, id_aa64isar0_sm4),
400 MRS_FIELD(ID_AA64ISAR0, SM3, false, MRS_LOWER, id_aa64isar0_sm3),
401 MRS_FIELD(ID_AA64ISAR0, SHA3, false, MRS_LOWER, id_aa64isar0_sha3),
402 MRS_FIELD(ID_AA64ISAR0, RDM, false, MRS_LOWER, id_aa64isar0_rdm),
403 MRS_FIELD(ID_AA64ISAR0, Atomic, false, MRS_LOWER, id_aa64isar0_atomic),
404 MRS_FIELD(ID_AA64ISAR0, CRC32, false, MRS_LOWER, id_aa64isar0_crc32),
405 MRS_FIELD(ID_AA64ISAR0, SHA2, false, MRS_LOWER, id_aa64isar0_sha2),
406 MRS_FIELD(ID_AA64ISAR0, SHA1, false, MRS_LOWER, id_aa64isar0_sha1),
407 MRS_FIELD(ID_AA64ISAR0, AES, false, MRS_LOWER, id_aa64isar0_aes),
412 /* ID_AA64ISAR1_EL1 */
413 static struct mrs_field_value id_aa64isar1_gpi[] = {
414 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, GPI, NONE, IMPL),
418 static struct mrs_field_value id_aa64isar1_gpa[] = {
419 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, GPA, NONE, IMPL),
423 static struct mrs_field_value id_aa64isar1_lrcpc[] = {
424 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, LRCPC, NONE, IMPL),
428 static struct mrs_field_value id_aa64isar1_fcma[] = {
429 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, FCMA, NONE, IMPL),
433 static struct mrs_field_value id_aa64isar1_jscvt[] = {
434 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, JSCVT, NONE, IMPL),
438 static struct mrs_field_value id_aa64isar1_api[] = {
439 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, API, NONE, IMPL),
443 static struct mrs_field_value id_aa64isar1_apa[] = {
444 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, GPA, NONE, IMPL),
448 static struct mrs_field_value id_aa64isar1_dpb[] = {
449 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, DPB, NONE, IMPL),
453 static struct mrs_field id_aa64isar1_fields[] = {
454 MRS_FIELD(ID_AA64ISAR1, GPI, false, MRS_EXACT, id_aa64isar1_gpi),
455 MRS_FIELD(ID_AA64ISAR1, GPA, false, MRS_EXACT, id_aa64isar1_gpa),
456 MRS_FIELD(ID_AA64ISAR1, LRCPC, false, MRS_LOWER, id_aa64isar1_lrcpc),
457 MRS_FIELD(ID_AA64ISAR1, FCMA, false, MRS_LOWER, id_aa64isar1_fcma),
458 MRS_FIELD(ID_AA64ISAR1, JSCVT, false, MRS_LOWER, id_aa64isar1_jscvt),
459 MRS_FIELD(ID_AA64ISAR1, API, false, MRS_EXACT, id_aa64isar1_api),
460 MRS_FIELD(ID_AA64ISAR1, APA, false, MRS_EXACT, id_aa64isar1_apa),
461 MRS_FIELD(ID_AA64ISAR1, DPB, false, MRS_LOWER, id_aa64isar1_dpb),
466 /* ID_AA64MMFR0_EL1 */
467 static struct mrs_field_value id_aa64mmfr0_tgran4[] = {
468 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, TGran4, NONE, IMPL),
472 static struct mrs_field_value id_aa64mmfr0_tgran64[] = {
473 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, TGran64, NONE, IMPL),
477 static struct mrs_field_value id_aa64mmfr0_tgran16[] = {
478 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, TGran16, NONE, IMPL),
482 static struct mrs_field_value id_aa64mmfr0_bigend_el0[] = {
483 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, BigEndEL0, FIXED, MIXED),
487 static struct mrs_field_value id_aa64mmfr0_snsmem[] = {
488 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, SNSMem, NONE, DISTINCT),
492 static struct mrs_field_value id_aa64mmfr0_bigend[] = {
493 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, BigEnd, FIXED, MIXED),
497 static struct mrs_field_value id_aa64mmfr0_asid_bits[] = {
498 MRS_FIELD_VALUE(ID_AA64MMFR0_ASIDBits_8, "8bit ASID"),
499 MRS_FIELD_VALUE(ID_AA64MMFR0_ASIDBits_16, "16bit ASID"),
503 static struct mrs_field_value id_aa64mmfr0_parange[] = {
504 MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_4G, "4GB PA"),
505 MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_64G, "64GB PA"),
506 MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_1T, "1TB PA"),
507 MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_4T, "4TB PA"),
508 MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_16T, "16TB PA"),
509 MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_256T, "256TB PA"),
510 MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_4P, "4PB PA"),
514 static struct mrs_field id_aa64mmfr0_fields[] = {
515 MRS_FIELD(ID_AA64MMFR0, TGran4, false, MRS_EXACT, id_aa64mmfr0_tgran4),
516 MRS_FIELD(ID_AA64MMFR0, TGran64, false, MRS_EXACT,
517 id_aa64mmfr0_tgran64),
518 MRS_FIELD(ID_AA64MMFR0, TGran16, false, MRS_EXACT,
519 id_aa64mmfr0_tgran16),
520 MRS_FIELD(ID_AA64MMFR0, BigEndEL0, false, MRS_EXACT,
521 id_aa64mmfr0_bigend_el0),
522 MRS_FIELD(ID_AA64MMFR0, SNSMem, false, MRS_EXACT, id_aa64mmfr0_snsmem),
523 MRS_FIELD(ID_AA64MMFR0, BigEnd, false, MRS_EXACT, id_aa64mmfr0_bigend),
524 MRS_FIELD(ID_AA64MMFR0, ASIDBits, false, MRS_EXACT,
525 id_aa64mmfr0_asid_bits),
526 MRS_FIELD(ID_AA64MMFR0, PARange, false, MRS_EXACT,
527 id_aa64mmfr0_parange),
532 /* ID_AA64MMFR1_EL1 */
533 static struct mrs_field_value id_aa64mmfr1_xnx[] = {
534 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, XNX, NONE, IMPL),
538 static struct mrs_field_value id_aa64mmfr1_specsei[] = {
539 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, SpecSEI, NONE, IMPL),
543 static struct mrs_field_value id_aa64mmfr1_pan[] = {
544 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, PAN, NONE, IMPL),
545 MRS_FIELD_VALUE(ID_AA64MMFR1_PAN_ATS1E1, "PAN+ATS1E1"),
549 static struct mrs_field_value id_aa64mmfr1_lo[] = {
550 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, LO, NONE, IMPL),
554 static struct mrs_field_value id_aa64mmfr1_hpds[] = {
555 MRS_FIELD_VALUE(ID_AA64MMFR1_HPDS_NONE, ""),
556 MRS_FIELD_VALUE(ID_AA64MMFR1_HPDS_HPD, "HPD"),
557 MRS_FIELD_VALUE(ID_AA64MMFR1_HPDS_TTPBHA, "HPD+TTPBHA"),
561 static struct mrs_field_value id_aa64mmfr1_vh[] = {
562 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, VH, NONE, IMPL),
566 static struct mrs_field_value id_aa64mmfr1_vmidbits[] = {
567 MRS_FIELD_VALUE(ID_AA64MMFR1_VMIDBits_8, "8bit VMID"),
568 MRS_FIELD_VALUE(ID_AA64MMFR1_VMIDBits_16, "16bit VMID"),
572 static struct mrs_field_value id_aa64mmfr1_hafdbs[] = {
573 MRS_FIELD_VALUE(ID_AA64MMFR1_HAFDBS_NONE, ""),
574 MRS_FIELD_VALUE(ID_AA64MMFR1_HAFDBS_AF, "HAF"),
575 MRS_FIELD_VALUE(ID_AA64MMFR1_HAFDBS_AF_DBS, "HAF+DS"),
579 static struct mrs_field id_aa64mmfr1_fields[] = {
580 MRS_FIELD(ID_AA64MMFR1, XNX, false, MRS_EXACT, id_aa64mmfr1_xnx),
581 MRS_FIELD(ID_AA64MMFR1, SpecSEI, false, MRS_EXACT,
582 id_aa64mmfr1_specsei),
583 MRS_FIELD(ID_AA64MMFR1, PAN, false, MRS_EXACT, id_aa64mmfr1_pan),
584 MRS_FIELD(ID_AA64MMFR1, LO, false, MRS_EXACT, id_aa64mmfr1_lo),
585 MRS_FIELD(ID_AA64MMFR1, HPDS, false, MRS_EXACT, id_aa64mmfr1_hpds),
586 MRS_FIELD(ID_AA64MMFR1, VH, false, MRS_EXACT, id_aa64mmfr1_vh),
587 MRS_FIELD(ID_AA64MMFR1, VMIDBits, false, MRS_EXACT,
588 id_aa64mmfr1_vmidbits),
589 MRS_FIELD(ID_AA64MMFR1, HAFDBS, false, MRS_EXACT, id_aa64mmfr1_hafdbs),
594 /* ID_AA64MMFR2_EL1 */
595 static struct mrs_field_value id_aa64mmfr2_nv[] = {
596 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, NV, NONE, IMPL),
600 static struct mrs_field_value id_aa64mmfr2_ccidx[] = {
601 MRS_FIELD_VALUE(ID_AA64MMFR2_CCIDX_32, "32bit CCIDX"),
602 MRS_FIELD_VALUE(ID_AA64MMFR2_CCIDX_64, "32bit CCIDX"),
606 static struct mrs_field_value id_aa64mmfr2_varange[] = {
607 MRS_FIELD_VALUE(ID_AA64MMFR2_VARange_48, "48bit VA"),
608 MRS_FIELD_VALUE(ID_AA64MMFR2_VARange_52, "52bit VA"),
612 static struct mrs_field_value id_aa64mmfr2_iesb[] = {
613 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, IESB, NONE, IMPL),
617 static struct mrs_field_value id_aa64mmfr2_lsm[] = {
618 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, LSM, NONE, IMPL),
622 static struct mrs_field_value id_aa64mmfr2_uao[] = {
623 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, UAO, NONE, IMPL),
627 static struct mrs_field_value id_aa64mmfr2_cnp[] = {
628 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, CnP, NONE, IMPL),
632 static struct mrs_field id_aa64mmfr2_fields[] = {
633 MRS_FIELD(ID_AA64MMFR2, NV, false, MRS_EXACT, id_aa64mmfr2_nv),
634 MRS_FIELD(ID_AA64MMFR2, CCIDX, false, MRS_EXACT, id_aa64mmfr2_ccidx),
635 MRS_FIELD(ID_AA64MMFR2, VARange, false, MRS_EXACT,
636 id_aa64mmfr2_varange),
637 MRS_FIELD(ID_AA64MMFR2, IESB, false, MRS_EXACT, id_aa64mmfr2_iesb),
638 MRS_FIELD(ID_AA64MMFR2, LSM, false, MRS_EXACT, id_aa64mmfr2_lsm),
639 MRS_FIELD(ID_AA64MMFR2, UAO, false, MRS_EXACT, id_aa64mmfr2_uao),
640 MRS_FIELD(ID_AA64MMFR2, CnP, false, MRS_EXACT, id_aa64mmfr2_cnp),
645 /* ID_AA64PFR0_EL1 */
646 static struct mrs_field_value id_aa64pfr0_csv3[] = {
647 MRS_FIELD_VALUE(ID_AA64PFR0_CSV3_NONE, ""),
648 MRS_FIELD_VALUE(ID_AA64PFR0_CSV3_ISOLATED, "CSV3"),
652 static struct mrs_field_value id_aa64pfr0_csv2[] = {
653 MRS_FIELD_VALUE(ID_AA64PFR0_CSV2_NONE, ""),
654 MRS_FIELD_VALUE(ID_AA64PFR0_CSV2_ISOLATED, "CSV2"),
655 MRS_FIELD_VALUE(ID_AA64PFR0_CSV2_SCXTNUM, "SCXTNUM"),
659 static struct mrs_field_value id_aa64pfr0_dit[] = {
660 MRS_FIELD_VALUE(ID_AA64PFR0_DIT_NONE, ""),
661 MRS_FIELD_VALUE(ID_AA64PFR0_DIT_PSTATE, "PSTATE.DIT"),
665 static struct mrs_field_value id_aa64pfr0_amu[] = {
666 MRS_FIELD_VALUE(ID_AA64PFR0_AMU_NONE, ""),
667 MRS_FIELD_VALUE(ID_AA64PFR0_AMU_V1, "AMUv1"),
671 static struct mrs_field_value id_aa64pfr0_mpam[] = {
672 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, MPAM, NONE, IMPL),
676 static struct mrs_field_value id_aa64pfr0_sel2[] = {
677 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, SEL2, NONE, IMPL),
681 static struct mrs_field_value id_aa64pfr0_sve[] = {
682 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, SVE, NONE, IMPL),
686 static struct mrs_field_value id_aa64pfr0_ras[] = {
687 MRS_FIELD_VALUE(ID_AA64PFR0_RAS_NONE, ""),
688 MRS_FIELD_VALUE(ID_AA64PFR0_RAS_V1, "RASv1"),
692 static struct mrs_field_value id_aa64pfr0_gic[] = {
693 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, GIC, CPUIF_NONE, CPUIF_EN),
697 static struct mrs_field_value id_aa64pfr0_advsimd[] = {
698 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, AdvSIMD, NONE, IMPL),
699 MRS_FIELD_VALUE(ID_AA64PFR0_AdvSIMD_HP, "AdvSIMD+HP"),
703 static struct mrs_field_value id_aa64pfr0_fp[] = {
704 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, FP, NONE, IMPL),
705 MRS_FIELD_VALUE(ID_AA64PFR0_FP_HP, "FP+HP"),
709 static struct mrs_field_value id_aa64pfr0_el3[] = {
710 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, EL3, NONE, 64),
711 MRS_FIELD_VALUE(ID_AA64PFR0_EL3_64_32, "EL3 32"),
715 static struct mrs_field_value id_aa64pfr0_el2[] = {
716 MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, EL2, NONE, 64),
717 MRS_FIELD_VALUE(ID_AA64PFR0_EL2_64_32, "EL2 32"),
721 static struct mrs_field_value id_aa64pfr0_el1[] = {
722 MRS_FIELD_VALUE(ID_AA64PFR0_EL1_64, "EL1"),
723 MRS_FIELD_VALUE(ID_AA64PFR0_EL1_64_32, "EL1 32"),
727 static struct mrs_field_value id_aa64pfr0_el0[] = {
728 MRS_FIELD_VALUE(ID_AA64PFR0_EL0_64, "EL0"),
729 MRS_FIELD_VALUE(ID_AA64PFR0_EL0_64_32, "EL0 32"),
733 static struct mrs_field id_aa64pfr0_fields[] = {
734 MRS_FIELD(ID_AA64PFR0, CSV3, false, MRS_EXACT, id_aa64pfr0_csv3),
735 MRS_FIELD(ID_AA64PFR0, CSV2, false, MRS_EXACT, id_aa64pfr0_csv2),
736 MRS_FIELD(ID_AA64PFR0, DIT, false, MRS_EXACT, id_aa64pfr0_dit),
737 MRS_FIELD(ID_AA64PFR0, AMU, false, MRS_EXACT, id_aa64pfr0_amu),
738 MRS_FIELD(ID_AA64PFR0, MPAM, false, MRS_EXACT, id_aa64pfr0_mpam),
739 MRS_FIELD(ID_AA64PFR0, SEL2, false, MRS_EXACT, id_aa64pfr0_sel2),
740 MRS_FIELD(ID_AA64PFR0, SVE, false, MRS_EXACT, id_aa64pfr0_sve),
741 MRS_FIELD(ID_AA64PFR0, RAS, false, MRS_EXACT, id_aa64pfr0_ras),
742 MRS_FIELD(ID_AA64PFR0, GIC, false, MRS_EXACT, id_aa64pfr0_gic),
743 MRS_FIELD(ID_AA64PFR0, AdvSIMD, true, MRS_LOWER, id_aa64pfr0_advsimd),
744 MRS_FIELD(ID_AA64PFR0, FP, true, MRS_LOWER, id_aa64pfr0_fp),
745 MRS_FIELD(ID_AA64PFR0, EL3, false, MRS_EXACT, id_aa64pfr0_el3),
746 MRS_FIELD(ID_AA64PFR0, EL2, false, MRS_EXACT, id_aa64pfr0_el2),
747 MRS_FIELD(ID_AA64PFR0, EL1, false, MRS_LOWER, id_aa64pfr0_el1),
748 MRS_FIELD(ID_AA64PFR0, EL0, false, MRS_LOWER, id_aa64pfr0_el0),
753 /* ID_AA64PFR1_EL1 */
754 static struct mrs_field_value id_aa64pfr1_bt[] = {
755 MRS_FIELD_VALUE(ID_AA64PFR1_BT_NONE, ""),
756 MRS_FIELD_VALUE(ID_AA64PFR1_BT_IMPL, "BTI"),
760 static struct mrs_field_value id_aa64pfr1_ssbs[] = {
761 MRS_FIELD_VALUE(ID_AA64PFR1_SSBS_NONE, ""),
762 MRS_FIELD_VALUE(ID_AA64PFR1_SSBS_PSTATE, "PSTATE.SSBS"),
763 MRS_FIELD_VALUE(ID_AA64PFR1_SSBS_PSTATE_MSR, "PSTATE.SSBS MSR"),
767 static struct mrs_field_value id_aa64pfr1_mte[] = {
768 MRS_FIELD_VALUE(ID_AA64PFR1_MTE_NONE, ""),
769 MRS_FIELD_VALUE(ID_AA64PFR1_MTE_IMPL_EL0, "MTE EL0"),
770 MRS_FIELD_VALUE(ID_AA64PFR1_MTE_IMPL, "MTE"),
774 static struct mrs_field id_aa64pfr1_fields[] = {
775 MRS_FIELD(ID_AA64PFR1, BT, false, MRS_EXACT, id_aa64pfr1_bt),
776 MRS_FIELD(ID_AA64PFR1, SSBS, false, MRS_EXACT, id_aa64pfr1_ssbs),
777 MRS_FIELD(ID_AA64PFR1, MTE, false, MRS_EXACT, id_aa64pfr1_mte),
781 struct mrs_user_reg {
786 struct mrs_field *fields;
789 static struct mrs_user_reg user_regs[] = {
790 { /* id_aa64isar0_el1 */
791 .reg = ID_AA64ISAR0_EL1,
794 .offset = __offsetof(struct cpu_desc, id_aa64isar0),
795 .fields = id_aa64isar0_fields,
797 { /* id_aa64isar1_el1 */
798 .reg = ID_AA64ISAR1_EL1,
801 .offset = __offsetof(struct cpu_desc, id_aa64isar1),
802 .fields = id_aa64isar1_fields,
804 { /* id_aa64pfr0_el1 */
805 .reg = ID_AA64PFR0_EL1,
808 .offset = __offsetof(struct cpu_desc, id_aa64pfr0),
809 .fields = id_aa64pfr0_fields,
811 { /* id_aa64pfr0_el1 */
812 .reg = ID_AA64PFR1_EL1,
815 .offset = __offsetof(struct cpu_desc, id_aa64pfr1),
816 .fields = id_aa64pfr1_fields,
818 { /* id_aa64dfr0_el1 */
819 .reg = ID_AA64DFR0_EL1,
822 .offset = __offsetof(struct cpu_desc, id_aa64dfr0),
823 .fields = id_aa64dfr0_fields,
827 #define CPU_DESC_FIELD(desc, idx) \
828 *(uint64_t *)((char *)&(desc) + user_regs[(idx)].offset)
831 user_mrs_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame,
835 int CRm, Op2, i, reg;
837 if ((insn & MRS_MASK) != MRS_VALUE)
841 * We only emulate Op0 == 3, Op1 == 0, CRn == 0, CRm == {0, 4-7}.
842 * These are in the EL1 CPU identification space.
843 * CRm == 0 holds MIDR_EL1, MPIDR_EL1, and REVID_EL1.
844 * CRm == {4-7} holds the ID_AA64 registers.
846 * For full details see the ARMv8 ARM (ARM DDI 0487C.a)
847 * Table D9-2 System instruction encodings for non-Debug System
850 if (mrs_Op0(insn) != 3 || mrs_Op1(insn) != 0 || mrs_CRn(insn) != 0)
854 if (CRm > 7 || (CRm < 4 && CRm != 0))
860 for (i = 0; i < nitems(user_regs); i++) {
861 if (user_regs[i].CRm == CRm && user_regs[i].Op2 == Op2) {
862 value = CPU_DESC_FIELD(user_cpu_desc, i);
870 value = READ_SPECIALREG(midr_el1);
873 value = READ_SPECIALREG(mpidr_el1);
876 value = READ_SPECIALREG(revidr_el1);
884 * We will handle this instruction, move to the next so we
885 * don't trap here again.
887 frame->tf_elr += INSN_SIZE;
889 reg = MRS_REGISTER(insn);
890 /* If reg is 31 then write to xzr, i.e. do nothing */
894 if (reg < nitems(frame->tf_x))
895 frame->tf_x[reg] = value;
897 frame->tf_lr = value;
903 extract_user_id_field(u_int reg, u_int field_shift, uint8_t *val)
908 for (i = 0; i < nitems(user_regs); i++) {
909 if (user_regs[i].reg == reg) {
910 value = CPU_DESC_FIELD(user_cpu_desc, i);
911 *val = value >> field_shift;
920 update_user_regs(u_int cpu)
922 struct mrs_field *fields;
924 int i, j, cur_field, new_field;
926 for (i = 0; i < nitems(user_regs); i++) {
927 value = CPU_DESC_FIELD(cpu_desc[cpu], i);
931 cur = CPU_DESC_FIELD(user_cpu_desc, i);
933 fields = user_regs[i].fields;
934 for (j = 0; fields[j].type != 0; j++) {
935 switch (fields[j].type & MRS_TYPE_MASK) {
937 cur &= ~(0xfu << fields[j].shift);
939 (uint64_t)MRS_EXACT_FIELD(fields[j].type) <<
943 new_field = (value >> fields[j].shift) & 0xf;
944 cur_field = (cur >> fields[j].shift) & 0xf;
945 if ((fields[j].sign &&
946 (int)new_field < (int)cur_field) ||
948 (u_int)new_field < (u_int)cur_field)) {
949 cur &= ~(0xfu << fields[j].shift);
950 cur |= new_field << fields[j].shift;
954 panic("Invalid field type: %d", fields[j].type);
958 CPU_DESC_FIELD(user_cpu_desc, i) = cur;
963 extern u_long elf_hwcap;
964 bool __read_frequently lse_supported = false;
967 identify_cpu_sysinit(void *dummy __unused)
972 /* Create a user visible cpu description with safe values */
973 memset(&user_cpu_desc, 0, sizeof(user_cpu_desc));
974 /* Safe values for these registers */
975 user_cpu_desc.id_aa64pfr0 = ID_AA64PFR0_AdvSIMD_NONE |
976 ID_AA64PFR0_FP_NONE | ID_AA64PFR0_EL1_64 | ID_AA64PFR0_EL0_64;
977 user_cpu_desc.id_aa64dfr0 = ID_AA64DFR0_DebugVer_8;
980 print_cpu_features(cpu);
981 hwcap = parse_cpu_features_hwcap(cpu);
986 update_user_regs(cpu);
989 if ((elf_hwcap & HWCAP_ATOMICS) != 0) {
990 lse_supported = true;
992 printf("Enabling LSE atomics in the kernel\n");
996 panic("CPU does not support LSE atomic instructions");
999 install_undef_handler(true, user_mrs_handler);
1001 SYSINIT(idenrity_cpu, SI_SUB_SMP, SI_ORDER_ANY, identify_cpu_sysinit, NULL);
1004 parse_cpu_features_hwcap(u_int cpu)
1008 if (ID_AA64ISAR0_DP_VAL(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_DP_IMPL)
1009 hwcap |= HWCAP_ASIMDDP;
1011 if (ID_AA64ISAR0_SM4_VAL(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_SM4_IMPL)
1014 if (ID_AA64ISAR0_SM3_VAL(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_SM3_IMPL)
1017 if (ID_AA64ISAR0_RDM_VAL(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_RDM_IMPL)
1018 hwcap |= HWCAP_ASIMDRDM;
1020 if (ID_AA64ISAR0_Atomic_VAL(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_Atomic_IMPL)
1021 hwcap |= HWCAP_ATOMICS;
1023 if (ID_AA64ISAR0_CRC32_VAL(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_CRC32_BASE)
1024 hwcap |= HWCAP_CRC32;
1026 switch (ID_AA64ISAR0_SHA2_VAL(cpu_desc[cpu].id_aa64isar0)) {
1027 case ID_AA64ISAR0_SHA2_BASE:
1028 hwcap |= HWCAP_SHA2;
1030 case ID_AA64ISAR0_SHA2_512:
1031 hwcap |= HWCAP_SHA2 | HWCAP_SHA512;
1037 if (ID_AA64ISAR0_SHA1_VAL(cpu_desc[cpu].id_aa64isar0))
1038 hwcap |= HWCAP_SHA1;
1040 switch (ID_AA64ISAR0_AES_VAL(cpu_desc[cpu].id_aa64isar0)) {
1041 case ID_AA64ISAR0_AES_BASE:
1044 case ID_AA64ISAR0_AES_PMULL:
1045 hwcap |= HWCAP_PMULL | HWCAP_AES;
1051 if (ID_AA64ISAR1_LRCPC_VAL(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_LRCPC_IMPL)
1052 hwcap |= HWCAP_LRCPC;
1054 if (ID_AA64ISAR1_FCMA_VAL(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_FCMA_IMPL)
1055 hwcap |= HWCAP_FCMA;
1057 if (ID_AA64ISAR1_JSCVT_VAL(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_JSCVT_IMPL)
1058 hwcap |= HWCAP_JSCVT;
1060 if (ID_AA64ISAR1_DPB_VAL(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_DPB_IMPL)
1061 hwcap |= HWCAP_DCPOP;
1063 if (ID_AA64PFR0_SVE_VAL(cpu_desc[cpu].id_aa64pfr0) == ID_AA64PFR0_SVE_IMPL)
1066 switch (ID_AA64PFR0_AdvSIMD_VAL(cpu_desc[cpu].id_aa64pfr0)) {
1067 case ID_AA64PFR0_AdvSIMD_IMPL:
1068 hwcap |= HWCAP_ASIMD;
1070 case ID_AA64PFR0_AdvSIMD_HP:
1071 hwcap |= HWCAP_ASIMD | HWCAP_ASIMDDP;
1077 switch (ID_AA64PFR0_FP_VAL(cpu_desc[cpu].id_aa64pfr0)) {
1078 case ID_AA64PFR0_FP_IMPL:
1081 case ID_AA64PFR0_FP_HP:
1082 hwcap |= HWCAP_FP | HWCAP_FPHP;
1092 print_id_register(struct sbuf *sb, const char *reg_name, uint64_t reg,
1093 struct mrs_field *fields)
1095 struct mrs_field_value *fv;
1096 int field, i, j, printed;
1098 sbuf_printf(sb, "%29s = <", reg_name);
1100 #define SEP_STR ((printed++) == 0) ? "" : ","
1102 for (i = 0; fields[i].type != 0; i++) {
1103 fv = fields[i].values;
1105 /* TODO: Handle with an unknown message */
1109 field = (reg & fields[i].mask) >> fields[i].shift;
1110 for (j = 0; fv[j].desc != NULL; j++) {
1111 if ((fv[j].value >> fields[i].shift) != field)
1114 if (fv[j].desc[0] != '\0')
1115 sbuf_printf(sb, "%s%s", SEP_STR, fv[j].desc);
1118 if (fv[j].desc == NULL)
1119 sbuf_printf(sb, "%sUnknown %s(%x)", SEP_STR,
1120 fields[i].name, field);
1122 reg &= ~(0xful << fields[i].shift);
1126 sbuf_printf(sb, "%s%#lx", SEP_STR, reg);
1130 printf("%s>\n", sbuf_data(sb));
1135 print_cpu_features(u_int cpu)
1139 sb = sbuf_new_auto();
1140 sbuf_printf(sb, "CPU%3d: %s %s r%dp%d", cpu,
1141 cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name,
1142 cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision);
1144 sbuf_cat(sb, " affinity:");
1145 switch(cpu_aff_levels) {
1148 sbuf_printf(sb, " %2d", CPU_AFF3(cpu_desc[cpu].mpidr));
1151 sbuf_printf(sb, " %2d", CPU_AFF2(cpu_desc[cpu].mpidr));
1154 sbuf_printf(sb, " %2d", CPU_AFF1(cpu_desc[cpu].mpidr));
1157 case 0: /* On UP this will be zero */
1158 sbuf_printf(sb, " %2d", CPU_AFF0(cpu_desc[cpu].mpidr));
1162 printf("%s\n", sbuf_data(sb));
1166 * There is a hardware errata where, if one CPU is performing a TLB
1167 * invalidation while another is performing a store-exclusive the
1168 * store-exclusive may return the wrong status. A workaround seems
1169 * to be to use an IPI to invalidate on each CPU, however given the
1170 * limited number of affected units (pass 1.1 is the evaluation
1171 * hardware revision), and the lack of information from Cavium
1172 * this has not been implemented.
1174 * At the time of writing this the only information is from:
1175 * https://lkml.org/lkml/2016/8/4/722
1178 * XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 on its own also
1179 * triggers on pass 2.0+.
1181 if (cpu == 0 && CPU_VAR(PCPU_GET(midr)) == 0 &&
1182 CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1)
1183 printf("WARNING: ThunderX Pass 1.1 detected.\nThis has known "
1184 "hardware bugs that may cause the incorrect operation of "
1185 "atomic operations.\n");
1187 /* AArch64 Instruction Set Attribute Register 0 */
1188 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR0) != 0)
1189 print_id_register(sb, "Instruction Set Attributes 0",
1190 cpu_desc[cpu].id_aa64isar0, id_aa64isar0_fields);
1192 /* AArch64 Instruction Set Attribute Register 1 */
1193 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR1) != 0)
1194 print_id_register(sb, "Instruction Set Attributes 1",
1195 cpu_desc[cpu].id_aa64isar1, id_aa64isar1_fields);
1197 /* AArch64 Processor Feature Register 0 */
1198 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR0) != 0)
1199 print_id_register(sb, "Processor Features 0",
1200 cpu_desc[cpu].id_aa64pfr0, id_aa64pfr0_fields);
1202 /* AArch64 Processor Feature Register 1 */
1203 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR1) != 0)
1204 print_id_register(sb, "Processor Features 1",
1205 cpu_desc[cpu].id_aa64pfr1, id_aa64pfr1_fields);
1207 /* AArch64 Memory Model Feature Register 0 */
1208 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR0) != 0)
1209 print_id_register(sb, "Memory Model Features 0",
1210 cpu_desc[cpu].id_aa64mmfr0, id_aa64mmfr0_fields);
1212 /* AArch64 Memory Model Feature Register 1 */
1213 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR1) != 0)
1214 print_id_register(sb, "Memory Model Features 1",
1215 cpu_desc[cpu].id_aa64mmfr1, id_aa64mmfr1_fields);
1217 /* AArch64 Memory Model Feature Register 2 */
1218 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR2) != 0)
1219 print_id_register(sb, "Memory Model Features 2",
1220 cpu_desc[cpu].id_aa64mmfr2, id_aa64mmfr2_fields);
1222 /* AArch64 Debug Feature Register 0 */
1223 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR0) != 0)
1224 print_id_register(sb, "Debug Features 0",
1225 cpu_desc[cpu].id_aa64dfr0, id_aa64dfr0_fields);
1227 /* AArch64 Memory Model Feature Register 1 */
1228 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR1) != 0)
1229 print_id_register(sb, "Debug Features 1",
1230 cpu_desc[cpu].id_aa64dfr1, id_aa64dfr1_fields);
1232 /* AArch64 Auxiliary Feature Register 0 */
1233 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR0) != 0)
1234 print_id_register(sb, "Auxiliary Features 0",
1235 cpu_desc[cpu].id_aa64afr0, id_aa64afr0_fields);
1237 /* AArch64 Auxiliary Feature Register 1 */
1238 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR1) != 0)
1239 print_id_register(sb, "Auxiliary Features 1",
1240 cpu_desc[cpu].id_aa64afr1, id_aa64afr1_fields);
1255 const struct cpu_parts *cpu_partsp = NULL;
1257 cpu = PCPU_GET(cpuid);
1261 * Store midr to pcpu to allow fast reading
1262 * from EL0, EL1 and assembly code.
1264 PCPU_SET(midr, midr);
1266 impl_id = CPU_IMPL(midr);
1267 for (i = 0; i < nitems(cpu_implementers); i++) {
1268 if (impl_id == cpu_implementers[i].impl_id ||
1269 cpu_implementers[i].impl_id == 0) {
1270 cpu_desc[cpu].cpu_impl = impl_id;
1271 cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name;
1272 cpu_partsp = cpu_implementers[i].cpu_parts;
1277 part_id = CPU_PART(midr);
1278 for (i = 0; &cpu_partsp[i] != NULL; i++) {
1279 if (part_id == cpu_partsp[i].part_id ||
1280 cpu_partsp[i].part_id == 0) {
1281 cpu_desc[cpu].cpu_part_num = part_id;
1282 cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
1287 cpu_desc[cpu].cpu_revision = CPU_REV(midr);
1288 cpu_desc[cpu].cpu_variant = CPU_VAR(midr);
1290 snprintf(cpu_model, sizeof(cpu_model), "%s %s r%dp%d",
1291 cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name,
1292 cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision);
1294 /* Save affinity for current CPU */
1295 cpu_desc[cpu].mpidr = get_mpidr();
1296 CPU_AFFINITY(cpu) = cpu_desc[cpu].mpidr & CPU_AFF_MASK;
1298 cpu_desc[cpu].id_aa64dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
1299 cpu_desc[cpu].id_aa64dfr1 = READ_SPECIALREG(id_aa64dfr1_el1);
1300 cpu_desc[cpu].id_aa64isar0 = READ_SPECIALREG(id_aa64isar0_el1);
1301 cpu_desc[cpu].id_aa64isar1 = READ_SPECIALREG(id_aa64isar1_el1);
1302 cpu_desc[cpu].id_aa64mmfr0 = READ_SPECIALREG(id_aa64mmfr0_el1);
1303 cpu_desc[cpu].id_aa64mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
1304 cpu_desc[cpu].id_aa64mmfr2 = READ_SPECIALREG(id_aa64mmfr2_el1);
1305 cpu_desc[cpu].id_aa64pfr0 = READ_SPECIALREG(id_aa64pfr0_el1);
1306 cpu_desc[cpu].id_aa64pfr1 = READ_SPECIALREG(id_aa64pfr1_el1);
1310 * This code must run on one cpu at a time, but we are
1311 * not scheduling on the current core so implement a
1314 while (atomic_cmpset_acq_int(&ident_lock, 0, 1) == 0)
1315 __asm __volatile("wfe" ::: "memory");
1317 switch (cpu_aff_levels) {
1319 if (CPU_AFF0(cpu_desc[cpu].mpidr) !=
1320 CPU_AFF0(cpu_desc[0].mpidr))
1324 if (CPU_AFF1(cpu_desc[cpu].mpidr) !=
1325 CPU_AFF1(cpu_desc[0].mpidr))
1329 if (CPU_AFF2(cpu_desc[cpu].mpidr) !=
1330 CPU_AFF2(cpu_desc[0].mpidr))
1334 if (CPU_AFF3(cpu_desc[cpu].mpidr) !=
1335 CPU_AFF3(cpu_desc[0].mpidr))
1340 if (cpu_desc[cpu].id_aa64afr0 != cpu_desc[0].id_aa64afr0)
1341 cpu_print_regs |= PRINT_ID_AA64_AFR0;
1342 if (cpu_desc[cpu].id_aa64afr1 != cpu_desc[0].id_aa64afr1)
1343 cpu_print_regs |= PRINT_ID_AA64_AFR1;
1345 if (cpu_desc[cpu].id_aa64dfr0 != cpu_desc[0].id_aa64dfr0)
1346 cpu_print_regs |= PRINT_ID_AA64_DFR0;
1347 if (cpu_desc[cpu].id_aa64dfr1 != cpu_desc[0].id_aa64dfr1)
1348 cpu_print_regs |= PRINT_ID_AA64_DFR1;
1350 if (cpu_desc[cpu].id_aa64isar0 != cpu_desc[0].id_aa64isar0)
1351 cpu_print_regs |= PRINT_ID_AA64_ISAR0;
1352 if (cpu_desc[cpu].id_aa64isar1 != cpu_desc[0].id_aa64isar1)
1353 cpu_print_regs |= PRINT_ID_AA64_ISAR1;
1355 if (cpu_desc[cpu].id_aa64mmfr0 != cpu_desc[0].id_aa64mmfr0)
1356 cpu_print_regs |= PRINT_ID_AA64_MMFR0;
1357 if (cpu_desc[cpu].id_aa64mmfr1 != cpu_desc[0].id_aa64mmfr1)
1358 cpu_print_regs |= PRINT_ID_AA64_MMFR1;
1359 if (cpu_desc[cpu].id_aa64mmfr2 != cpu_desc[0].id_aa64mmfr2)
1360 cpu_print_regs |= PRINT_ID_AA64_MMFR2;
1362 if (cpu_desc[cpu].id_aa64pfr0 != cpu_desc[0].id_aa64pfr0)
1363 cpu_print_regs |= PRINT_ID_AA64_PFR0;
1364 if (cpu_desc[cpu].id_aa64pfr1 != cpu_desc[0].id_aa64pfr1)
1365 cpu_print_regs |= PRINT_ID_AA64_PFR1;
1367 /* Wake up the other CPUs */
1368 atomic_store_rel_int(&ident_lock, 0);
1369 __asm __volatile("sev" ::: "memory");