]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm/arm/cpufunc.c
Fix the following -Werror warning from clang 10.0.0:
[FreeBSD/FreeBSD.git] / sys / arm / arm / cpufunc.c
1 /*      $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
2
3 /*-
4  * SPDX-License-Identifier: BSD-4-Clause
5  *
6  * arm9 support code Copyright (C) 2001 ARM Ltd
7  * Copyright (c) 1997 Mark Brinicombe.
8  * Copyright (c) 1997 Causality Limited
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *      This product includes software developed by Causality Limited.
22  * 4. The name of Causality Limited may not be used to endorse or promote
23  *    products derived from this software without specific prior written
24  *    permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
27  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
28  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
29  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
30  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  * RiscBSD kernel project
39  *
40  * cpufuncs.c
41  *
42  * C functions for supporting CPU / MMU / TLB specific operations.
43  *
44  * Created      : 30/01/97
45  */
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/lock.h>
52 #include <sys/mutex.h>
53 #include <sys/bus.h>
54 #include <machine/bus.h>
55 #include <machine/cpu.h>
56 #include <machine/disassem.h>
57
58 #include <vm/vm.h>
59 #include <vm/pmap.h>
60 #include <vm/uma.h>
61
62 #include <machine/cpufunc.h>
63
64 /* PRIMARY CACHE VARIABLES */
65 int     arm_picache_size;
66 int     arm_picache_line_size;
67 int     arm_picache_ways;
68
69 int     arm_pdcache_size;       /* and unified */
70 int     arm_pdcache_line_size;
71 int     arm_pdcache_ways;
72
73 int     arm_pcache_type;
74 int     arm_pcache_unified;
75
76 int     arm_dcache_align;
77 int     arm_dcache_align_mask;
78
79 u_int   arm_cache_level;
80 u_int   arm_cache_type[14];
81 u_int   arm_cache_loc;
82
83 #if defined(CPU_ARM9E)
84 static void arm10_setup(void);
85 #endif
86 #ifdef CPU_MV_PJ4B
87 static void pj4bv7_setup(void);
88 #endif
89 #if defined(CPU_ARM1176)
90 static void arm11x6_setup(void);
91 #endif
92 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
93 static void cortexa_setup(void);
94 #endif
95
96 #if defined(CPU_ARM9E)
97 struct cpu_functions armv5_ec_cpufuncs = {
98         /* CPU functions */
99
100         cpufunc_nullop,                 /* cpwait               */
101
102         /* MMU functions */
103
104         cpufunc_control,                /* control              */
105         armv5_ec_setttb,                /* Setttb               */
106
107         /* TLB functions */
108
109         armv4_tlb_flushID,              /* tlb_flushID          */
110         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
111         armv4_tlb_flushD,               /* tlb_flushD           */
112         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
113
114         /* Cache operations */
115
116         armv5_ec_icache_sync_range,     /* icache_sync_range    */
117
118         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
119         armv5_ec_dcache_wbinv_range,    /* dcache_wbinv_range   */
120         armv5_ec_dcache_inv_range,      /* dcache_inv_range     */
121         armv5_ec_dcache_wb_range,       /* dcache_wb_range      */
122
123         armv4_idcache_inv_all,          /* idcache_inv_all      */
124         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
125         armv5_ec_idcache_wbinv_range,   /* idcache_wbinv_range  */
126
127         cpufunc_nullop,                 /* l2cache_wbinv_all    */
128         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
129         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
130         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
131         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
132
133         /* Other functions */
134
135         armv4_drain_writebuf,           /* drain_writebuf       */
136
137         (void *)cpufunc_nullop,         /* sleep                */
138
139         /* Soft functions */
140
141         arm9_context_switch,            /* context_switch       */
142
143         arm10_setup                     /* cpu setup            */
144
145 };
146
147 struct cpu_functions sheeva_cpufuncs = {
148         /* CPU functions */
149
150         cpufunc_nullop,                 /* cpwait               */
151
152         /* MMU functions */
153
154         cpufunc_control,                /* control              */
155         sheeva_setttb,                  /* Setttb               */
156
157         /* TLB functions */
158
159         armv4_tlb_flushID,              /* tlb_flushID          */
160         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
161         armv4_tlb_flushD,               /* tlb_flushD           */
162         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
163
164         /* Cache operations */
165
166         armv5_ec_icache_sync_range,     /* icache_sync_range    */
167
168         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
169         sheeva_dcache_wbinv_range,      /* dcache_wbinv_range   */
170         sheeva_dcache_inv_range,        /* dcache_inv_range     */
171         sheeva_dcache_wb_range,         /* dcache_wb_range      */
172
173         armv4_idcache_inv_all,          /* idcache_inv_all      */
174         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
175         sheeva_idcache_wbinv_range,     /* idcache_wbinv_all    */
176
177         sheeva_l2cache_wbinv_all,       /* l2cache_wbinv_all    */
178         sheeva_l2cache_wbinv_range,     /* l2cache_wbinv_range  */
179         sheeva_l2cache_inv_range,       /* l2cache_inv_range    */
180         sheeva_l2cache_wb_range,        /* l2cache_wb_range     */
181         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
182
183         /* Other functions */
184
185         armv4_drain_writebuf,           /* drain_writebuf       */
186
187         sheeva_cpu_sleep,               /* sleep                */
188
189         /* Soft functions */
190
191         arm9_context_switch,            /* context_switch       */
192
193         arm10_setup                     /* cpu setup            */
194 };
195 #endif /* CPU_ARM9E */
196
197 #ifdef CPU_MV_PJ4B
198 struct cpu_functions pj4bv7_cpufuncs = {
199
200         /* Cache operations */
201         .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
202         .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
203         .cf_l2cache_inv_range = (void *)cpufunc_nullop,
204         .cf_l2cache_wb_range = (void *)cpufunc_nullop,
205         .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
206
207         /* Other functions */
208         .cf_sleep = (void *)cpufunc_nullop,
209
210         /* Soft functions */
211         .cf_setup = pj4bv7_setup
212 };
213 #endif /* CPU_MV_PJ4B */
214
215 #if defined(CPU_ARM1176)
216 struct cpu_functions arm1176_cpufuncs = {
217
218         /* Cache operations */
219         .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
220         .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
221         .cf_l2cache_inv_range = (void *)cpufunc_nullop,
222         .cf_l2cache_wb_range = (void *)cpufunc_nullop,
223         .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
224
225         /* Other functions */
226         .cf_sleep = arm11x6_sleep, 
227
228         /* Soft functions */
229         .cf_setup = arm11x6_setup
230 };
231 #endif /*CPU_ARM1176 */
232
233 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
234 struct cpu_functions cortexa_cpufuncs = {
235
236         /* Cache operations */
237
238         /*
239          * Note: For CPUs using the PL310 the L2 ops are filled in when the
240          * L2 cache controller is actually enabled.
241          */
242         .cf_l2cache_wbinv_all = cpufunc_nullop,
243         .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
244         .cf_l2cache_inv_range = (void *)cpufunc_nullop,
245         .cf_l2cache_wb_range = (void *)cpufunc_nullop,
246         .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
247
248         /* Other functions */
249         .cf_sleep = armv7_cpu_sleep,
250
251         /* Soft functions */
252         .cf_setup = cortexa_setup
253 };
254 #endif /* CPU_CORTEXA || CPU_KRAIT */
255
256 /*
257  * Global constants also used by locore.s
258  */
259
260 struct cpu_functions cpufuncs;
261 u_int cputype;
262 #if __ARM_ARCH <= 5
263 u_int cpu_reset_needs_v4_MMU_disable;   /* flag used in locore-v4.s */
264 #endif
265
266 #if defined (CPU_ARM9E) ||      \
267   defined(CPU_ARM1176) ||       \
268   defined(CPU_MV_PJ4B) ||                       \
269   defined(CPU_CORTEXA) || defined(CPU_KRAIT)
270
271 static void get_cachetype_cp15(void);
272
273 /* Additional cache information local to this file.  Log2 of some of the
274    above numbers.  */
275 static int      arm_dcache_l2_nsets;
276 static int      arm_dcache_l2_assoc;
277 static int      arm_dcache_l2_linesize;
278
279 static void
280 get_cachetype_cp15(void)
281 {
282         u_int ctype, isize, dsize, cpuid;
283         u_int clevel, csize, i, sel;
284         u_int multiplier;
285         u_char type;
286
287         ctype = cp15_ctr_get();
288         cpuid = cp15_midr_get();
289         /*
290          * ...and thus spake the ARM ARM:
291          *
292          * If an <opcode2> value corresponding to an unimplemented or
293          * reserved ID register is encountered, the System Control
294          * processor returns the value of the main ID register.
295          */
296         if (ctype == cpuid)
297                 goto out;
298
299         if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
300                 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
301                     : "=r" (clevel));
302                 arm_cache_level = clevel;
303                 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
304                 i = 0;
305                 while ((type = (clevel & 0x7)) && i < 7) {
306                         if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
307                             type == CACHE_SEP_CACHE) {
308                                 sel = i << 1;
309                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
310                                     : : "r" (sel));
311                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
312                                     : "=r" (csize));
313                                 arm_cache_type[sel] = csize;
314                                 arm_dcache_align = 1 <<
315                                     (CPUV7_CT_xSIZE_LEN(csize) + 4);
316                                 arm_dcache_align_mask = arm_dcache_align - 1;
317                         }
318                         if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
319                                 sel = (i << 1) | 1;
320                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
321                                     : : "r" (sel));
322                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
323                                     : "=r" (csize));
324                                 arm_cache_type[sel] = csize;
325                         }
326                         i++;
327                         clevel >>= 3;
328                 }
329         } else {
330                 if ((ctype & CPU_CT_S) == 0)
331                         arm_pcache_unified = 1;
332
333                 /*
334                  * If you want to know how this code works, go read the ARM ARM.
335                  */
336
337                 arm_pcache_type = CPU_CT_CTYPE(ctype);
338
339                 if (arm_pcache_unified == 0) {
340                         isize = CPU_CT_ISIZE(ctype);
341                         multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
342                         arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
343                         if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
344                                 if (isize & CPU_CT_xSIZE_M)
345                                         arm_picache_line_size = 0; /* not present */
346                                 else
347                                         arm_picache_ways = 1;
348                         } else {
349                                 arm_picache_ways = multiplier <<
350                                     (CPU_CT_xSIZE_ASSOC(isize) - 1);
351                         }
352                         arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
353                 }
354
355                 dsize = CPU_CT_DSIZE(ctype);
356                 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
357                 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
358                 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
359                         if (dsize & CPU_CT_xSIZE_M)
360                                 arm_pdcache_line_size = 0; /* not present */
361                         else
362                                 arm_pdcache_ways = 1;
363                 } else {
364                         arm_pdcache_ways = multiplier <<
365                             (CPU_CT_xSIZE_ASSOC(dsize) - 1);
366                 }
367                 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
368
369                 arm_dcache_align = arm_pdcache_line_size;
370
371                 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
372                 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
373                 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
374                     CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
375
376         out:
377                 arm_dcache_align_mask = arm_dcache_align - 1;
378         }
379 }
380 #endif /* ARM9 || XSCALE */
381
382 /*
383  * Cannot panic here as we may not have a console yet ...
384  */
385
386 int
387 set_cpufuncs(void)
388 {
389         cputype = cp15_midr_get();
390         cputype &= CPU_ID_CPU_MASK;
391
392 #if defined(CPU_ARM9E)
393         if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
394             cputype == CPU_ID_MV88FR571_41) {
395                 uint32_t sheeva_ctrl;
396
397                 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
398                     MV_L2_ENABLE);
399                 /*
400                  * Workaround for Marvell MV78100 CPU: Cache prefetch
401                  * mechanism may affect the cache coherency validity,
402                  * so it needs to be disabled.
403                  *
404                  * Refer to errata document MV-S501058-00C.pdf (p. 3.1
405                  * L2 Prefetching Mechanism) for details.
406                  */
407                 if (cputype == CPU_ID_MV88FR571_VD ||
408                     cputype == CPU_ID_MV88FR571_41)
409                         sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
410
411                 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
412
413                 cpufuncs = sheeva_cpufuncs;
414                 get_cachetype_cp15();
415                 pmap_pte_init_generic();
416                 goto out;
417         } else if (cputype == CPU_ID_ARM926EJS) {
418                 cpufuncs = armv5_ec_cpufuncs;
419                 get_cachetype_cp15();
420                 pmap_pte_init_generic();
421                 goto out;
422         }
423 #endif /* CPU_ARM9E */
424 #if defined(CPU_ARM1176)
425         if (cputype == CPU_ID_ARM1176JZS) {
426                 cpufuncs = arm1176_cpufuncs;
427                 get_cachetype_cp15();
428                 goto out;
429         }
430 #endif /* CPU_ARM1176 */
431 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
432         switch(cputype & CPU_ID_SCHEME_MASK) {
433         case CPU_ID_CORTEXA5:
434         case CPU_ID_CORTEXA7:
435         case CPU_ID_CORTEXA8:
436         case CPU_ID_CORTEXA9:
437         case CPU_ID_CORTEXA12:
438         case CPU_ID_CORTEXA15:
439         case CPU_ID_CORTEXA53:
440         case CPU_ID_CORTEXA57:
441         case CPU_ID_CORTEXA72:
442         case CPU_ID_KRAIT300:
443                 cpufuncs = cortexa_cpufuncs;
444                 get_cachetype_cp15();
445                 goto out;
446         default:
447                 break;
448         }
449 #endif /* CPU_CORTEXA || CPU_KRAIT */
450
451 #if defined(CPU_MV_PJ4B)
452         if (cputype == CPU_ID_MV88SV581X_V7 ||
453             cputype == CPU_ID_MV88SV584X_V7 ||
454             cputype == CPU_ID_ARM_88SV581X_V7) {
455                 cpufuncs = pj4bv7_cpufuncs;
456                 get_cachetype_cp15();
457                 goto out;
458         }
459 #endif /* CPU_MV_PJ4B */
460
461         /*
462          * Bzzzz. And the answer was ...
463          */
464         panic("No support for this CPU type (%08x) in kernel", cputype);
465         return(ARCHITECTURE_NOT_PRESENT);
466 out:
467         uma_set_align(arm_dcache_align_mask);
468         return (0);
469 }
470
471 /*
472  * CPU Setup code
473  */
474
475 #if defined(CPU_ARM9E)
476 static void
477 arm10_setup(void)
478 {
479         int cpuctrl, cpuctrlmask;
480
481         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
482             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
483             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
484         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
485             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
486             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
487             | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
488             | CPU_CONTROL_BPRD_ENABLE
489             | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
490
491 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
492         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
493 #endif
494
495 #ifdef __ARMEB__
496         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
497 #endif
498
499         /* Clear out the cache */
500         cpu_idcache_wbinv_all();
501
502         /* Now really make sure they are clean.  */
503         __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
504
505         if (vector_page == ARM_VECTORS_HIGH)
506                 cpuctrl |= CPU_CONTROL_VECRELOC;
507
508         /* Set the control register */
509         cpu_control(0xffffffff, cpuctrl);
510
511         /* And again. */
512         cpu_idcache_wbinv_all();
513 }
514 #endif  /* CPU_ARM9E || CPU_ARM10 */
515
516 #if defined(CPU_ARM1176) \
517  || defined(CPU_MV_PJ4B) \
518  || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
519 static __inline void
520 cpu_scc_setup_ccnt(void)
521 {
522 /* This is how you give userland access to the CCNT and PMCn
523  * registers.
524  * BEWARE! This gives write access also, which may not be what
525  * you want!
526  */
527 #ifdef _PMC_USER_READ_WRITE_
528         /* Set PMUSERENR[0] to allow userland access */
529         cp15_pmuserenr_set(1);
530 #endif
531 #if defined(CPU_ARM1176)
532         /* Set PMCR[2,0] to enable counters and reset CCNT */
533         cp15_pmcr_set(5);
534 #else
535         /* Set up the PMCCNTR register as a cyclecounter:
536          * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
537          * Set PMCR[2,0] to enable counters and reset CCNT
538          * Set PMCNTENSET to 0x80000000 to enable CCNT */
539         cp15_pminten_clr(0xFFFFFFFF);
540         cp15_pmcr_set(5);
541         cp15_pmcnten_set(0x80000000);
542 #endif
543 }
544 #endif
545
546 #if defined(CPU_ARM1176)
547 static void
548 arm11x6_setup(void)
549 {
550         uint32_t auxctrl, auxctrl_wax;
551         uint32_t tmp, tmp2;
552         uint32_t cpuid;
553
554         cpuid = cp15_midr_get();
555
556         auxctrl = 0;
557         auxctrl_wax = ~0;
558
559         /*
560          * Enable an errata workaround
561          */
562         if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
563                 auxctrl = ARM1176_AUXCTL_PHD;
564                 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
565         }
566
567         tmp = cp15_actlr_get();
568         tmp2 = tmp;
569         tmp &= auxctrl_wax;
570         tmp |= auxctrl;
571         if (tmp != tmp2)
572                 cp15_actlr_set(tmp);
573
574         cpu_scc_setup_ccnt();
575 }
576 #endif  /* CPU_ARM1176 */
577
578 #ifdef CPU_MV_PJ4B
579 static void
580 pj4bv7_setup(void)
581 {
582
583         pj4b_config();
584         cpu_scc_setup_ccnt();
585 }
586 #endif /* CPU_MV_PJ4B */
587
588 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
589 static void
590 cortexa_setup(void)
591 {
592
593         cpu_scc_setup_ccnt();
594 }
595 #endif  /* CPU_CORTEXA || CPU_KRAIT */