]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm/arm/cpufunc.c
Allow the 32-bit arm physmem code to work on arm64.
[FreeBSD/FreeBSD.git] / sys / arm / arm / cpufunc.c
1 /*      $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
2
3 /*-
4  * SPDX-License-Identifier: BSD-4-Clause
5  *
6  * arm9 support code Copyright (C) 2001 ARM Ltd
7  * Copyright (c) 1997 Mark Brinicombe.
8  * Copyright (c) 1997 Causality Limited
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *      This product includes software developed by Causality Limited.
22  * 4. The name of Causality Limited may not be used to endorse or promote
23  *    products derived from this software without specific prior written
24  *    permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
27  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
28  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
29  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
30  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  * RiscBSD kernel project
39  *
40  * cpufuncs.c
41  *
42  * C functions for supporting CPU / MMU / TLB specific operations.
43  *
44  * Created      : 30/01/97
45  */
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/lock.h>
52 #include <sys/mutex.h>
53 #include <sys/bus.h>
54 #include <machine/bus.h>
55 #include <machine/cpu.h>
56 #include <machine/disassem.h>
57
58 #include <vm/vm.h>
59 #include <vm/pmap.h>
60 #include <vm/uma.h>
61
62 #include <machine/cpufunc.h>
63
64 #if defined(CPU_XSCALE_81342)
65 #include <arm/xscale/i8134x/i81342reg.h>
66 #endif
67
68 #ifdef CPU_XSCALE_IXP425
69 #include <arm/xscale/ixp425/ixp425reg.h>
70 #include <arm/xscale/ixp425/ixp425var.h>
71 #endif
72
73 /* PRIMARY CACHE VARIABLES */
74 int     arm_picache_size;
75 int     arm_picache_line_size;
76 int     arm_picache_ways;
77
78 int     arm_pdcache_size;       /* and unified */
79 int     arm_pdcache_line_size;
80 int     arm_pdcache_ways;
81
82 int     arm_pcache_type;
83 int     arm_pcache_unified;
84
85 int     arm_dcache_align;
86 int     arm_dcache_align_mask;
87
88 u_int   arm_cache_level;
89 u_int   arm_cache_type[14];
90 u_int   arm_cache_loc;
91
92 #ifdef CPU_ARM9
93 struct cpu_functions arm9_cpufuncs = {
94         /* CPU functions */
95
96         cpufunc_nullop,                 /* cpwait               */
97
98         /* MMU functions */
99
100         cpufunc_control,                /* control              */
101         arm9_setttb,                    /* Setttb               */
102
103         /* TLB functions */
104
105         armv4_tlb_flushID,              /* tlb_flushID          */
106         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
107         armv4_tlb_flushD,               /* tlb_flushD           */
108         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
109
110         /* Cache operations */
111
112         arm9_icache_sync_range,         /* icache_sync_range    */
113
114         arm9_dcache_wbinv_all,          /* dcache_wbinv_all     */
115         arm9_dcache_wbinv_range,        /* dcache_wbinv_range   */
116         arm9_dcache_inv_range,          /* dcache_inv_range     */
117         arm9_dcache_wb_range,           /* dcache_wb_range      */
118
119         armv4_idcache_inv_all,          /* idcache_inv_all      */
120         arm9_idcache_wbinv_all,         /* idcache_wbinv_all    */
121         arm9_idcache_wbinv_range,       /* idcache_wbinv_range  */
122         cpufunc_nullop,                 /* l2cache_wbinv_all    */
123         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
124         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
125         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
126         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
127
128         /* Other functions */
129
130         armv4_drain_writebuf,           /* drain_writebuf       */
131
132         (void *)cpufunc_nullop,         /* sleep                */
133
134         /* Soft functions */
135
136         arm9_context_switch,            /* context_switch       */
137
138         arm9_setup                      /* cpu setup            */
139
140 };
141 #endif /* CPU_ARM9 */
142
143 #if defined(CPU_ARM9E)
144 struct cpu_functions armv5_ec_cpufuncs = {
145         /* CPU functions */
146
147         cpufunc_nullop,                 /* cpwait               */
148
149         /* MMU functions */
150
151         cpufunc_control,                /* control              */
152         armv5_ec_setttb,                /* Setttb               */
153
154         /* TLB functions */
155
156         armv4_tlb_flushID,              /* tlb_flushID          */
157         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
158         armv4_tlb_flushD,               /* tlb_flushD           */
159         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
160
161         /* Cache operations */
162
163         armv5_ec_icache_sync_range,     /* icache_sync_range    */
164
165         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
166         armv5_ec_dcache_wbinv_range,    /* dcache_wbinv_range   */
167         armv5_ec_dcache_inv_range,      /* dcache_inv_range     */
168         armv5_ec_dcache_wb_range,       /* dcache_wb_range      */
169
170         armv4_idcache_inv_all,          /* idcache_inv_all      */
171         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
172         armv5_ec_idcache_wbinv_range,   /* idcache_wbinv_range  */
173
174         cpufunc_nullop,                 /* l2cache_wbinv_all    */
175         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
176         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
177         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
178         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
179
180         /* Other functions */
181
182         armv4_drain_writebuf,           /* drain_writebuf       */
183
184         (void *)cpufunc_nullop,         /* sleep                */
185
186         /* Soft functions */
187
188         arm9_context_switch,            /* context_switch       */
189
190         arm10_setup                     /* cpu setup            */
191
192 };
193
194 struct cpu_functions sheeva_cpufuncs = {
195         /* CPU functions */
196
197         cpufunc_nullop,                 /* cpwait               */
198
199         /* MMU functions */
200
201         cpufunc_control,                /* control              */
202         sheeva_setttb,                  /* Setttb               */
203
204         /* TLB functions */
205
206         armv4_tlb_flushID,              /* tlb_flushID          */
207         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
208         armv4_tlb_flushD,               /* tlb_flushD           */
209         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
210
211         /* Cache operations */
212
213         armv5_ec_icache_sync_range,     /* icache_sync_range    */
214
215         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
216         sheeva_dcache_wbinv_range,      /* dcache_wbinv_range   */
217         sheeva_dcache_inv_range,        /* dcache_inv_range     */
218         sheeva_dcache_wb_range,         /* dcache_wb_range      */
219
220         armv4_idcache_inv_all,          /* idcache_inv_all      */
221         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
222         sheeva_idcache_wbinv_range,     /* idcache_wbinv_all    */
223
224         sheeva_l2cache_wbinv_all,       /* l2cache_wbinv_all    */
225         sheeva_l2cache_wbinv_range,     /* l2cache_wbinv_range  */
226         sheeva_l2cache_inv_range,       /* l2cache_inv_range    */
227         sheeva_l2cache_wb_range,        /* l2cache_wb_range     */
228         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
229
230         /* Other functions */
231
232         armv4_drain_writebuf,           /* drain_writebuf       */
233
234         sheeva_cpu_sleep,               /* sleep                */
235
236         /* Soft functions */
237
238         arm9_context_switch,            /* context_switch       */
239
240         arm10_setup                     /* cpu setup            */
241 };
242 #endif /* CPU_ARM9E */
243
244 #ifdef CPU_MV_PJ4B
245 struct cpu_functions pj4bv7_cpufuncs = {
246
247         /* Cache operations */
248         .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
249         .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
250         .cf_l2cache_inv_range = (void *)cpufunc_nullop,
251         .cf_l2cache_wb_range = (void *)cpufunc_nullop,
252         .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
253
254         /* Other functions */
255         .cf_sleep = (void *)cpufunc_nullop,
256
257         /* Soft functions */
258         .cf_setup = pj4bv7_setup
259 };
260 #endif /* CPU_MV_PJ4B */
261
262 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
263
264 struct cpu_functions xscale_cpufuncs = {
265         /* CPU functions */
266
267         xscale_cpwait,                  /* cpwait               */
268
269         /* MMU functions */
270
271         xscale_control,                 /* control              */
272         xscale_setttb,                  /* setttb               */
273
274         /* TLB functions */
275
276         armv4_tlb_flushID,              /* tlb_flushID          */
277         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
278         armv4_tlb_flushD,               /* tlb_flushD           */
279         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
280
281         /* Cache operations */
282
283         xscale_cache_syncI_rng,         /* icache_sync_range    */
284
285         xscale_cache_purgeD,            /* dcache_wbinv_all     */
286         xscale_cache_purgeD_rng,        /* dcache_wbinv_range   */
287         xscale_cache_flushD_rng,        /* dcache_inv_range     */
288         xscale_cache_cleanD_rng,        /* dcache_wb_range      */
289
290         xscale_cache_flushID,           /* idcache_inv_all      */
291         xscale_cache_purgeID,           /* idcache_wbinv_all    */
292         xscale_cache_purgeID_rng,       /* idcache_wbinv_range  */
293         cpufunc_nullop,                 /* l2cache_wbinv_all    */
294         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
295         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
296         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
297         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
298
299         /* Other functions */
300
301         armv4_drain_writebuf,           /* drain_writebuf       */
302
303         xscale_cpu_sleep,               /* sleep                */
304
305         /* Soft functions */
306
307         xscale_context_switch,          /* context_switch       */
308
309         xscale_setup                    /* cpu setup            */
310 };
311 #endif
312 /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
313
314 #ifdef CPU_XSCALE_81342
315 struct cpu_functions xscalec3_cpufuncs = {
316         /* CPU functions */
317
318         xscale_cpwait,                  /* cpwait               */
319
320         /* MMU functions */
321
322         xscale_control,                 /* control              */
323         xscalec3_setttb,                /* setttb               */
324
325         /* TLB functions */
326
327         armv4_tlb_flushID,              /* tlb_flushID          */
328         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
329         armv4_tlb_flushD,               /* tlb_flushD           */
330         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
331
332         /* Cache operations */
333
334         xscalec3_cache_syncI_rng,       /* icache_sync_range    */
335
336         xscalec3_cache_purgeD,          /* dcache_wbinv_all     */
337         xscalec3_cache_purgeD_rng,      /* dcache_wbinv_range   */
338         xscale_cache_flushD_rng,        /* dcache_inv_range     */
339         xscalec3_cache_cleanD_rng,      /* dcache_wb_range      */
340
341         xscale_cache_flushID,           /* idcache_inv_all      */
342         xscalec3_cache_purgeID,         /* idcache_wbinv_all    */
343         xscalec3_cache_purgeID_rng,     /* idcache_wbinv_range  */
344         xscalec3_l2cache_purge,         /* l2cache_wbinv_all    */
345         xscalec3_l2cache_purge_rng,     /* l2cache_wbinv_range  */
346         xscalec3_l2cache_flush_rng,     /* l2cache_inv_range    */
347         xscalec3_l2cache_clean_rng,     /* l2cache_wb_range     */
348         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
349
350         /* Other functions */
351
352         armv4_drain_writebuf,           /* drain_writebuf       */
353
354         xscale_cpu_sleep,               /* sleep                */
355
356         /* Soft functions */
357
358         xscalec3_context_switch,        /* context_switch       */
359
360         xscale_setup                    /* cpu setup            */
361 };
362 #endif /* CPU_XSCALE_81342 */
363
364
365 #if defined(CPU_FA526)
366 struct cpu_functions fa526_cpufuncs = {
367         /* CPU functions */
368
369         cpufunc_nullop,                 /* cpwait               */
370
371         /* MMU functions */
372
373         cpufunc_control,                /* control              */
374         fa526_setttb,                   /* setttb               */
375
376         /* TLB functions */
377
378         armv4_tlb_flushID,              /* tlb_flushID          */
379         fa526_tlb_flushID_SE,           /* tlb_flushID_SE       */
380         armv4_tlb_flushD,               /* tlb_flushD           */
381         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
382
383         /* Cache operations */
384
385         fa526_icache_sync_range,        /* icache_sync_range    */
386
387         fa526_dcache_wbinv_all,         /* dcache_wbinv_all     */
388         fa526_dcache_wbinv_range,       /* dcache_wbinv_range   */
389         fa526_dcache_inv_range,         /* dcache_inv_range     */
390         fa526_dcache_wb_range,          /* dcache_wb_range      */
391
392         armv4_idcache_inv_all,          /* idcache_inv_all      */
393         fa526_idcache_wbinv_all,        /* idcache_wbinv_all    */
394         fa526_idcache_wbinv_range,      /* idcache_wbinv_range  */
395         cpufunc_nullop,                 /* l2cache_wbinv_all    */
396         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
397         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
398         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
399         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
400
401         /* Other functions */
402
403         armv4_drain_writebuf,           /* drain_writebuf       */
404
405         fa526_cpu_sleep,                /* sleep                */
406
407         /* Soft functions */
408
409
410         fa526_context_switch,           /* context_switch       */
411
412         fa526_setup                     /* cpu setup            */
413 };
414 #endif  /* CPU_FA526 */
415
416 #if defined(CPU_ARM1176)
417 struct cpu_functions arm1176_cpufuncs = {
418
419         /* Cache operations */
420         .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
421         .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
422         .cf_l2cache_inv_range = (void *)cpufunc_nullop,
423         .cf_l2cache_wb_range = (void *)cpufunc_nullop,
424         .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
425
426         /* Other functions */
427         .cf_sleep = arm11x6_sleep, 
428
429         /* Soft functions */
430         .cf_setup = arm11x6_setup
431 };
432 #endif /*CPU_ARM1176 */
433
434 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
435 struct cpu_functions cortexa_cpufuncs = {
436
437         /* Cache operations */
438
439         /*
440          * Note: For CPUs using the PL310 the L2 ops are filled in when the
441          * L2 cache controller is actually enabled.
442          */
443         .cf_l2cache_wbinv_all = cpufunc_nullop,
444         .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
445         .cf_l2cache_inv_range = (void *)cpufunc_nullop,
446         .cf_l2cache_wb_range = (void *)cpufunc_nullop,
447         .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
448
449         /* Other functions */
450         .cf_sleep = armv7_cpu_sleep,
451
452         /* Soft functions */
453         .cf_setup = cortexa_setup
454 };
455 #endif /* CPU_CORTEXA || CPU_KRAIT */
456
457 /*
458  * Global constants also used by locore.s
459  */
460
461 struct cpu_functions cpufuncs;
462 u_int cputype;
463 #if __ARM_ARCH <= 5
464 u_int cpu_reset_needs_v4_MMU_disable;   /* flag used in locore-v4.s */
465 #endif
466
467 #if defined(CPU_ARM9) ||        \
468   defined (CPU_ARM9E) ||        \
469   defined(CPU_ARM1176) ||       \
470   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||           \
471   defined(CPU_FA526) || defined(CPU_MV_PJ4B) ||                 \
472   defined(CPU_XSCALE_81342) || \
473   defined(CPU_CORTEXA) || defined(CPU_KRAIT)
474
475 /* Global cache line sizes, use 32 as default */
476 int     arm_dcache_min_line_size = 32;
477 int     arm_icache_min_line_size = 32;
478 int     arm_idcache_min_line_size = 32;
479
480 static void get_cachetype_cp15(void);
481
482 /* Additional cache information local to this file.  Log2 of some of the
483    above numbers.  */
484 static int      arm_dcache_l2_nsets;
485 static int      arm_dcache_l2_assoc;
486 static int      arm_dcache_l2_linesize;
487
488 static void
489 get_cachetype_cp15(void)
490 {
491         u_int ctype, isize, dsize, cpuid;
492         u_int clevel, csize, i, sel;
493         u_int multiplier;
494         u_char type;
495
496         __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
497                 : "=r" (ctype));
498
499         cpuid = cpu_ident();
500         /*
501          * ...and thus spake the ARM ARM:
502          *
503          * If an <opcode2> value corresponding to an unimplemented or
504          * reserved ID register is encountered, the System Control
505          * processor returns the value of the main ID register.
506          */
507         if (ctype == cpuid)
508                 goto out;
509
510         if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
511                 /* Resolve minimal cache line sizes */
512                 arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
513                 arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
514                 arm_idcache_min_line_size =
515                     min(arm_icache_min_line_size, arm_dcache_min_line_size);
516
517                 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
518                     : "=r" (clevel));
519                 arm_cache_level = clevel;
520                 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
521                 i = 0;
522                 while ((type = (clevel & 0x7)) && i < 7) {
523                         if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
524                             type == CACHE_SEP_CACHE) {
525                                 sel = i << 1;
526                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
527                                     : : "r" (sel));
528                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
529                                     : "=r" (csize));
530                                 arm_cache_type[sel] = csize;
531                                 arm_dcache_align = 1 <<
532                                     (CPUV7_CT_xSIZE_LEN(csize) + 4);
533                                 arm_dcache_align_mask = arm_dcache_align - 1;
534                         }
535                         if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
536                                 sel = (i << 1) | 1;
537                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
538                                     : : "r" (sel));
539                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
540                                     : "=r" (csize));
541                                 arm_cache_type[sel] = csize;
542                         }
543                         i++;
544                         clevel >>= 3;
545                 }
546         } else {
547                 if ((ctype & CPU_CT_S) == 0)
548                         arm_pcache_unified = 1;
549
550                 /*
551                  * If you want to know how this code works, go read the ARM ARM.
552                  */
553
554                 arm_pcache_type = CPU_CT_CTYPE(ctype);
555
556                 if (arm_pcache_unified == 0) {
557                         isize = CPU_CT_ISIZE(ctype);
558                         multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
559                         arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
560                         if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
561                                 if (isize & CPU_CT_xSIZE_M)
562                                         arm_picache_line_size = 0; /* not present */
563                                 else
564                                         arm_picache_ways = 1;
565                         } else {
566                                 arm_picache_ways = multiplier <<
567                                     (CPU_CT_xSIZE_ASSOC(isize) - 1);
568                         }
569                         arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
570                 }
571
572                 dsize = CPU_CT_DSIZE(ctype);
573                 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
574                 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
575                 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
576                         if (dsize & CPU_CT_xSIZE_M)
577                                 arm_pdcache_line_size = 0; /* not present */
578                         else
579                                 arm_pdcache_ways = 1;
580                 } else {
581                         arm_pdcache_ways = multiplier <<
582                             (CPU_CT_xSIZE_ASSOC(dsize) - 1);
583                 }
584                 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
585
586                 arm_dcache_align = arm_pdcache_line_size;
587
588                 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
589                 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
590                 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
591                     CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
592
593         out:
594                 arm_dcache_align_mask = arm_dcache_align - 1;
595         }
596 }
597 #endif /* ARM9 || XSCALE */
598
599 /*
600  * Cannot panic here as we may not have a console yet ...
601  */
602
603 int
604 set_cpufuncs(void)
605 {
606         cputype = cpu_ident();
607         cputype &= CPU_ID_CPU_MASK;
608
609 #ifdef CPU_ARM9
610         if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
611              (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
612             (cputype & 0x0000f000) == 0x00009000) {
613                 cpufuncs = arm9_cpufuncs;
614                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
615                 get_cachetype_cp15();
616                 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
617                 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
618                     arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
619                 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
620                 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
621                 pmap_pte_init_generic();
622                 goto out;
623         }
624 #endif /* CPU_ARM9 */
625 #if defined(CPU_ARM9E)
626         if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
627             cputype == CPU_ID_MV88FR571_41) {
628                 uint32_t sheeva_ctrl;
629
630                 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
631                     MV_L2_ENABLE);
632                 /*
633                  * Workaround for Marvell MV78100 CPU: Cache prefetch
634                  * mechanism may affect the cache coherency validity,
635                  * so it needs to be disabled.
636                  *
637                  * Refer to errata document MV-S501058-00C.pdf (p. 3.1
638                  * L2 Prefetching Mechanism) for details.
639                  */
640                 if (cputype == CPU_ID_MV88FR571_VD ||
641                     cputype == CPU_ID_MV88FR571_41)
642                         sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
643
644                 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
645
646                 cpufuncs = sheeva_cpufuncs;
647                 get_cachetype_cp15();
648                 pmap_pte_init_generic();
649                 goto out;
650         } else if (cputype == CPU_ID_ARM926EJS) {
651                 cpufuncs = armv5_ec_cpufuncs;
652                 get_cachetype_cp15();
653                 pmap_pte_init_generic();
654                 goto out;
655         }
656 #endif /* CPU_ARM9E */
657 #if defined(CPU_ARM1176)
658         if (cputype == CPU_ID_ARM1176JZS) {
659                 cpufuncs = arm1176_cpufuncs;
660                 get_cachetype_cp15();
661                 goto out;
662         }
663 #endif /* CPU_ARM1176 */
664 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
665         switch(cputype & CPU_ID_SCHEME_MASK) {
666         case CPU_ID_CORTEXA5:
667         case CPU_ID_CORTEXA7:
668         case CPU_ID_CORTEXA8:
669         case CPU_ID_CORTEXA9:
670         case CPU_ID_CORTEXA12:
671         case CPU_ID_CORTEXA15:
672         case CPU_ID_CORTEXA53:
673         case CPU_ID_CORTEXA57:
674         case CPU_ID_CORTEXA72:
675         case CPU_ID_KRAIT300:
676                 cpufuncs = cortexa_cpufuncs;
677                 get_cachetype_cp15();
678                 goto out;
679         default:
680                 break;
681         }
682 #endif /* CPU_CORTEXA || CPU_KRAIT */
683
684 #if defined(CPU_MV_PJ4B)
685         if (cputype == CPU_ID_MV88SV581X_V7 ||
686             cputype == CPU_ID_MV88SV584X_V7 ||
687             cputype == CPU_ID_ARM_88SV581X_V7) {
688                 cpufuncs = pj4bv7_cpufuncs;
689                 get_cachetype_cp15();
690                 goto out;
691         }
692 #endif /* CPU_MV_PJ4B */
693
694 #if defined(CPU_FA526)
695         if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
696                 cpufuncs = fa526_cpufuncs;
697                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
698                 get_cachetype_cp15();
699                 pmap_pte_init_generic();
700
701                 goto out;
702         }
703 #endif  /* CPU_FA526 */
704
705 #if defined(CPU_XSCALE_81342)
706         if (cputype == CPU_ID_81342) {
707                 cpufuncs = xscalec3_cpufuncs;
708                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
709                 get_cachetype_cp15();
710                 pmap_pte_init_xscale();
711                 goto out;
712         }
713 #endif /* CPU_XSCALE_81342 */
714 #ifdef CPU_XSCALE_PXA2X0
715         /* ignore core revision to test PXA2xx CPUs */
716         if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
717             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
718             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
719
720                 cpufuncs = xscale_cpufuncs;
721                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
722                 get_cachetype_cp15();
723                 pmap_pte_init_xscale();
724
725                 goto out;
726         }
727 #endif /* CPU_XSCALE_PXA2X0 */
728 #ifdef CPU_XSCALE_IXP425
729         if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
730             cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
731
732                 cpufuncs = xscale_cpufuncs;
733                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
734                 get_cachetype_cp15();
735                 pmap_pte_init_xscale();
736
737                 goto out;
738         }
739 #endif /* CPU_XSCALE_IXP425 */
740         /*
741          * Bzzzz. And the answer was ...
742          */
743         panic("No support for this CPU type (%08x) in kernel", cputype);
744         return(ARCHITECTURE_NOT_PRESENT);
745 out:
746         uma_set_align(arm_dcache_align_mask);
747         return (0);
748 }
749
750 /*
751  * CPU Setup code
752  */
753
754 #ifdef CPU_ARM9
755 void
756 arm9_setup(void)
757 {
758         int cpuctrl, cpuctrlmask;
759
760         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
761             | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
762             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
763             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
764             CPU_CONTROL_ROUNDROBIN;
765         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
766                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
767                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
768                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
769                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
770                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
771                  | CPU_CONTROL_ROUNDROBIN;
772
773 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
774         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
775 #endif
776
777 #ifdef __ARMEB__
778         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
779 #endif
780         if (vector_page == ARM_VECTORS_HIGH)
781                 cpuctrl |= CPU_CONTROL_VECRELOC;
782
783         /* Clear out the cache */
784         cpu_idcache_wbinv_all();
785
786         /* Set the control register (SCTLR)   */
787         cpu_control(cpuctrlmask, cpuctrl);
788
789 }
790 #endif  /* CPU_ARM9 */
791
792 #if defined(CPU_ARM9E)
793 void
794 arm10_setup(void)
795 {
796         int cpuctrl, cpuctrlmask;
797
798         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
799             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
800             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
801         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
802             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
803             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
804             | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
805             | CPU_CONTROL_BPRD_ENABLE
806             | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
807
808 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
809         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
810 #endif
811
812 #ifdef __ARMEB__
813         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
814 #endif
815
816         /* Clear out the cache */
817         cpu_idcache_wbinv_all();
818
819         /* Now really make sure they are clean.  */
820         __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
821
822         if (vector_page == ARM_VECTORS_HIGH)
823                 cpuctrl |= CPU_CONTROL_VECRELOC;
824
825         /* Set the control register */
826         cpu_control(0xffffffff, cpuctrl);
827
828         /* And again. */
829         cpu_idcache_wbinv_all();
830 }
831 #endif  /* CPU_ARM9E || CPU_ARM10 */
832
833 #if defined(CPU_ARM1176) \
834  || defined(CPU_MV_PJ4B) \
835  || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
836 static __inline void
837 cpu_scc_setup_ccnt(void)
838 {
839 /* This is how you give userland access to the CCNT and PMCn
840  * registers.
841  * BEWARE! This gives write access also, which may not be what
842  * you want!
843  */
844 #ifdef _PMC_USER_READ_WRITE_
845         /* Set PMUSERENR[0] to allow userland access */
846         cp15_pmuserenr_set(1);
847 #endif
848 #if defined(CPU_ARM1176)
849         /* Set PMCR[2,0] to enable counters and reset CCNT */
850         cp15_pmcr_set(5);
851 #else
852         /* Set up the PMCCNTR register as a cyclecounter:
853          * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
854          * Set PMCR[2,0] to enable counters and reset CCNT
855          * Set PMCNTENSET to 0x80000000 to enable CCNT */
856         cp15_pminten_clr(0xFFFFFFFF);
857         cp15_pmcr_set(5);
858         cp15_pmcnten_set(0x80000000);
859 #endif
860 }
861 #endif
862
863 #if defined(CPU_ARM1176)
864 void
865 arm11x6_setup(void)
866 {
867         uint32_t auxctrl, auxctrl_wax;
868         uint32_t tmp, tmp2;
869         uint32_t cpuid;
870
871         cpuid = cpu_ident();
872
873         auxctrl = 0;
874         auxctrl_wax = ~0;
875
876         /*
877          * Enable an errata workaround
878          */
879         if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
880                 auxctrl = ARM1176_AUXCTL_PHD;
881                 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
882         }
883
884         tmp = cp15_actlr_get();
885         tmp2 = tmp;
886         tmp &= auxctrl_wax;
887         tmp |= auxctrl;
888         if (tmp != tmp2)
889                 cp15_actlr_set(tmp);
890
891         cpu_scc_setup_ccnt();
892 }
893 #endif  /* CPU_ARM1176 */
894
895 #ifdef CPU_MV_PJ4B
896 void
897 pj4bv7_setup(void)
898 {
899
900         pj4b_config();
901         cpu_scc_setup_ccnt();
902 }
903 #endif /* CPU_MV_PJ4B */
904
905 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
906
907 void
908 cortexa_setup(void)
909 {
910
911         cpu_scc_setup_ccnt();
912 }
913 #endif  /* CPU_CORTEXA || CPU_KRAIT */
914
915 #if defined(CPU_FA526)
916 void
917 fa526_setup(void)
918 {
919         int cpuctrl, cpuctrlmask;
920
921         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
922                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
923                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
924                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
925                 | CPU_CONTROL_BPRD_ENABLE;
926         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
927                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
928                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
929                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
930                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
931                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
932                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
933
934 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
935         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
936 #endif
937
938 #ifdef __ARMEB__
939         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
940 #endif
941
942         if (vector_page == ARM_VECTORS_HIGH)
943                 cpuctrl |= CPU_CONTROL_VECRELOC;
944
945         /* Clear out the cache */
946         cpu_idcache_wbinv_all();
947
948         /* Set the control register */
949         cpu_control(0xffffffff, cpuctrl);
950 }
951 #endif  /* CPU_FA526 */
952
953 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
954   defined(CPU_XSCALE_81342)
955 void
956 xscale_setup(void)
957 {
958         uint32_t auxctl;
959         int cpuctrl, cpuctrlmask;
960
961         /*
962          * The XScale Write Buffer is always enabled.  Our option
963          * is to enable/disable coalescing.  Note that bits 6:3
964          * must always be enabled.
965          */
966
967         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
968                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
969                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
970                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
971                  | CPU_CONTROL_BPRD_ENABLE;
972         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
973                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
974                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
975                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
976                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
977                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
978                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
979                  CPU_CONTROL_L2_ENABLE;
980
981 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
982         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
983 #endif
984
985 #ifdef __ARMEB__
986         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
987 #endif
988
989         if (vector_page == ARM_VECTORS_HIGH)
990                 cpuctrl |= CPU_CONTROL_VECRELOC;
991 #ifdef CPU_XSCALE_CORE3
992         cpuctrl |= CPU_CONTROL_L2_ENABLE;
993 #endif
994
995         /* Clear out the cache */
996         cpu_idcache_wbinv_all();
997
998         /*
999          * Set the control register.  Note that bits 6:3 must always
1000          * be set to 1.
1001          */
1002 /*      cpu_control(cpuctrlmask, cpuctrl);*/
1003         cpu_control(0xffffffff, cpuctrl);
1004
1005         /* Make sure write coalescing is turned on */
1006         __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1007                 : "=r" (auxctl));
1008 #ifdef XSCALE_NO_COALESCE_WRITES
1009         auxctl |= XSCALE_AUXCTL_K;
1010 #else
1011         auxctl &= ~XSCALE_AUXCTL_K;
1012 #endif
1013 #ifdef CPU_XSCALE_CORE3
1014         auxctl |= XSCALE_AUXCTL_LLR;
1015         auxctl |= XSCALE_AUXCTL_MD_MASK;
1016 #endif
1017         __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1018                 : : "r" (auxctl));
1019 }
1020 #endif  /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */