]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm/arm/cpufunc.c
Remove kernel support for armeb
[FreeBSD/FreeBSD.git] / sys / arm / arm / cpufunc.c
1 /*      $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
2
3 /*-
4  * SPDX-License-Identifier: BSD-4-Clause
5  *
6  * arm9 support code Copyright (C) 2001 ARM Ltd
7  * Copyright (c) 1997 Mark Brinicombe.
8  * Copyright (c) 1997 Causality Limited
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *      This product includes software developed by Causality Limited.
22  * 4. The name of Causality Limited may not be used to endorse or promote
23  *    products derived from this software without specific prior written
24  *    permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
27  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
28  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
29  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
30  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  * RiscBSD kernel project
39  *
40  * cpufuncs.c
41  *
42  * C functions for supporting CPU / MMU / TLB specific operations.
43  *
44  * Created      : 30/01/97
45  */
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/lock.h>
52 #include <sys/mutex.h>
53 #include <sys/bus.h>
54 #include <machine/bus.h>
55 #include <machine/cpu.h>
56 #include <machine/disassem.h>
57
58 #include <vm/vm.h>
59 #include <vm/pmap.h>
60 #include <vm/uma.h>
61
62 #include <machine/cpufunc.h>
63
64 #if defined(CPU_XSCALE_81342)
65 #include <arm/xscale/i8134x/i81342reg.h>
66 #endif
67
68 /* PRIMARY CACHE VARIABLES */
69 int     arm_picache_size;
70 int     arm_picache_line_size;
71 int     arm_picache_ways;
72
73 int     arm_pdcache_size;       /* and unified */
74 int     arm_pdcache_line_size;
75 int     arm_pdcache_ways;
76
77 int     arm_pcache_type;
78 int     arm_pcache_unified;
79
80 int     arm_dcache_align;
81 int     arm_dcache_align_mask;
82
83 u_int   arm_cache_level;
84 u_int   arm_cache_type[14];
85 u_int   arm_cache_loc;
86
87 #ifdef CPU_ARM9
88 struct cpu_functions arm9_cpufuncs = {
89         /* CPU functions */
90
91         cpufunc_nullop,                 /* cpwait               */
92
93         /* MMU functions */
94
95         cpufunc_control,                /* control              */
96         arm9_setttb,                    /* Setttb               */
97
98         /* TLB functions */
99
100         armv4_tlb_flushID,              /* tlb_flushID          */
101         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
102         armv4_tlb_flushD,               /* tlb_flushD           */
103         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
104
105         /* Cache operations */
106
107         arm9_icache_sync_range,         /* icache_sync_range    */
108
109         arm9_dcache_wbinv_all,          /* dcache_wbinv_all     */
110         arm9_dcache_wbinv_range,        /* dcache_wbinv_range   */
111         arm9_dcache_inv_range,          /* dcache_inv_range     */
112         arm9_dcache_wb_range,           /* dcache_wb_range      */
113
114         armv4_idcache_inv_all,          /* idcache_inv_all      */
115         arm9_idcache_wbinv_all,         /* idcache_wbinv_all    */
116         arm9_idcache_wbinv_range,       /* idcache_wbinv_range  */
117         cpufunc_nullop,                 /* l2cache_wbinv_all    */
118         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
119         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
120         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
121         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
122
123         /* Other functions */
124
125         armv4_drain_writebuf,           /* drain_writebuf       */
126
127         (void *)cpufunc_nullop,         /* sleep                */
128
129         /* Soft functions */
130
131         arm9_context_switch,            /* context_switch       */
132
133         arm9_setup                      /* cpu setup            */
134
135 };
136 #endif /* CPU_ARM9 */
137
138 #if defined(CPU_ARM9E)
139 struct cpu_functions armv5_ec_cpufuncs = {
140         /* CPU functions */
141
142         cpufunc_nullop,                 /* cpwait               */
143
144         /* MMU functions */
145
146         cpufunc_control,                /* control              */
147         armv5_ec_setttb,                /* Setttb               */
148
149         /* TLB functions */
150
151         armv4_tlb_flushID,              /* tlb_flushID          */
152         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
153         armv4_tlb_flushD,               /* tlb_flushD           */
154         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
155
156         /* Cache operations */
157
158         armv5_ec_icache_sync_range,     /* icache_sync_range    */
159
160         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
161         armv5_ec_dcache_wbinv_range,    /* dcache_wbinv_range   */
162         armv5_ec_dcache_inv_range,      /* dcache_inv_range     */
163         armv5_ec_dcache_wb_range,       /* dcache_wb_range      */
164
165         armv4_idcache_inv_all,          /* idcache_inv_all      */
166         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
167         armv5_ec_idcache_wbinv_range,   /* idcache_wbinv_range  */
168
169         cpufunc_nullop,                 /* l2cache_wbinv_all    */
170         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
171         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
172         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
173         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
174
175         /* Other functions */
176
177         armv4_drain_writebuf,           /* drain_writebuf       */
178
179         (void *)cpufunc_nullop,         /* sleep                */
180
181         /* Soft functions */
182
183         arm9_context_switch,            /* context_switch       */
184
185         arm10_setup                     /* cpu setup            */
186
187 };
188
189 struct cpu_functions sheeva_cpufuncs = {
190         /* CPU functions */
191
192         cpufunc_nullop,                 /* cpwait               */
193
194         /* MMU functions */
195
196         cpufunc_control,                /* control              */
197         sheeva_setttb,                  /* Setttb               */
198
199         /* TLB functions */
200
201         armv4_tlb_flushID,              /* tlb_flushID          */
202         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
203         armv4_tlb_flushD,               /* tlb_flushD           */
204         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
205
206         /* Cache operations */
207
208         armv5_ec_icache_sync_range,     /* icache_sync_range    */
209
210         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
211         sheeva_dcache_wbinv_range,      /* dcache_wbinv_range   */
212         sheeva_dcache_inv_range,        /* dcache_inv_range     */
213         sheeva_dcache_wb_range,         /* dcache_wb_range      */
214
215         armv4_idcache_inv_all,          /* idcache_inv_all      */
216         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
217         sheeva_idcache_wbinv_range,     /* idcache_wbinv_all    */
218
219         sheeva_l2cache_wbinv_all,       /* l2cache_wbinv_all    */
220         sheeva_l2cache_wbinv_range,     /* l2cache_wbinv_range  */
221         sheeva_l2cache_inv_range,       /* l2cache_inv_range    */
222         sheeva_l2cache_wb_range,        /* l2cache_wb_range     */
223         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
224
225         /* Other functions */
226
227         armv4_drain_writebuf,           /* drain_writebuf       */
228
229         sheeva_cpu_sleep,               /* sleep                */
230
231         /* Soft functions */
232
233         arm9_context_switch,            /* context_switch       */
234
235         arm10_setup                     /* cpu setup            */
236 };
237 #endif /* CPU_ARM9E */
238
239 #ifdef CPU_MV_PJ4B
240 struct cpu_functions pj4bv7_cpufuncs = {
241
242         /* Cache operations */
243         .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
244         .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
245         .cf_l2cache_inv_range = (void *)cpufunc_nullop,
246         .cf_l2cache_wb_range = (void *)cpufunc_nullop,
247         .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
248
249         /* Other functions */
250         .cf_sleep = (void *)cpufunc_nullop,
251
252         /* Soft functions */
253         .cf_setup = pj4bv7_setup
254 };
255 #endif /* CPU_MV_PJ4B */
256
257 #if defined(CPU_XSCALE_PXA2X0)
258
259 struct cpu_functions xscale_cpufuncs = {
260         /* CPU functions */
261
262         xscale_cpwait,                  /* cpwait               */
263
264         /* MMU functions */
265
266         xscale_control,                 /* control              */
267         xscale_setttb,                  /* setttb               */
268
269         /* TLB functions */
270
271         armv4_tlb_flushID,              /* tlb_flushID          */
272         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
273         armv4_tlb_flushD,               /* tlb_flushD           */
274         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
275
276         /* Cache operations */
277
278         xscale_cache_syncI_rng,         /* icache_sync_range    */
279
280         xscale_cache_purgeD,            /* dcache_wbinv_all     */
281         xscale_cache_purgeD_rng,        /* dcache_wbinv_range   */
282         xscale_cache_flushD_rng,        /* dcache_inv_range     */
283         xscale_cache_cleanD_rng,        /* dcache_wb_range      */
284
285         xscale_cache_flushID,           /* idcache_inv_all      */
286         xscale_cache_purgeID,           /* idcache_wbinv_all    */
287         xscale_cache_purgeID_rng,       /* idcache_wbinv_range  */
288         cpufunc_nullop,                 /* l2cache_wbinv_all    */
289         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
290         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
291         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
292         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
293
294         /* Other functions */
295
296         armv4_drain_writebuf,           /* drain_writebuf       */
297
298         xscale_cpu_sleep,               /* sleep                */
299
300         /* Soft functions */
301
302         xscale_context_switch,          /* context_switch       */
303
304         xscale_setup                    /* cpu setup            */
305 };
306 #endif
307 /* CPU_XSCALE_PXA2X0 */
308
309 #ifdef CPU_XSCALE_81342
310 struct cpu_functions xscalec3_cpufuncs = {
311         /* CPU functions */
312
313         xscale_cpwait,                  /* cpwait               */
314
315         /* MMU functions */
316
317         xscale_control,                 /* control              */
318         xscalec3_setttb,                /* setttb               */
319
320         /* TLB functions */
321
322         armv4_tlb_flushID,              /* tlb_flushID          */
323         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
324         armv4_tlb_flushD,               /* tlb_flushD           */
325         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
326
327         /* Cache operations */
328
329         xscalec3_cache_syncI_rng,       /* icache_sync_range    */
330
331         xscalec3_cache_purgeD,          /* dcache_wbinv_all     */
332         xscalec3_cache_purgeD_rng,      /* dcache_wbinv_range   */
333         xscale_cache_flushD_rng,        /* dcache_inv_range     */
334         xscalec3_cache_cleanD_rng,      /* dcache_wb_range      */
335
336         xscale_cache_flushID,           /* idcache_inv_all      */
337         xscalec3_cache_purgeID,         /* idcache_wbinv_all    */
338         xscalec3_cache_purgeID_rng,     /* idcache_wbinv_range  */
339         xscalec3_l2cache_purge,         /* l2cache_wbinv_all    */
340         xscalec3_l2cache_purge_rng,     /* l2cache_wbinv_range  */
341         xscalec3_l2cache_flush_rng,     /* l2cache_inv_range    */
342         xscalec3_l2cache_clean_rng,     /* l2cache_wb_range     */
343         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
344
345         /* Other functions */
346
347         armv4_drain_writebuf,           /* drain_writebuf       */
348
349         xscale_cpu_sleep,               /* sleep                */
350
351         /* Soft functions */
352
353         xscalec3_context_switch,        /* context_switch       */
354
355         xscale_setup                    /* cpu setup            */
356 };
357 #endif /* CPU_XSCALE_81342 */
358
359
360 #if defined(CPU_FA526)
361 struct cpu_functions fa526_cpufuncs = {
362         /* CPU functions */
363
364         cpufunc_nullop,                 /* cpwait               */
365
366         /* MMU functions */
367
368         cpufunc_control,                /* control              */
369         fa526_setttb,                   /* setttb               */
370
371         /* TLB functions */
372
373         armv4_tlb_flushID,              /* tlb_flushID          */
374         fa526_tlb_flushID_SE,           /* tlb_flushID_SE       */
375         armv4_tlb_flushD,               /* tlb_flushD           */
376         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
377
378         /* Cache operations */
379
380         fa526_icache_sync_range,        /* icache_sync_range    */
381
382         fa526_dcache_wbinv_all,         /* dcache_wbinv_all     */
383         fa526_dcache_wbinv_range,       /* dcache_wbinv_range   */
384         fa526_dcache_inv_range,         /* dcache_inv_range     */
385         fa526_dcache_wb_range,          /* dcache_wb_range      */
386
387         armv4_idcache_inv_all,          /* idcache_inv_all      */
388         fa526_idcache_wbinv_all,        /* idcache_wbinv_all    */
389         fa526_idcache_wbinv_range,      /* idcache_wbinv_range  */
390         cpufunc_nullop,                 /* l2cache_wbinv_all    */
391         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
392         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
393         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
394         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
395
396         /* Other functions */
397
398         armv4_drain_writebuf,           /* drain_writebuf       */
399
400         fa526_cpu_sleep,                /* sleep                */
401
402         /* Soft functions */
403
404
405         fa526_context_switch,           /* context_switch       */
406
407         fa526_setup                     /* cpu setup            */
408 };
409 #endif  /* CPU_FA526 */
410
411 #if defined(CPU_ARM1176)
412 struct cpu_functions arm1176_cpufuncs = {
413
414         /* Cache operations */
415         .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
416         .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
417         .cf_l2cache_inv_range = (void *)cpufunc_nullop,
418         .cf_l2cache_wb_range = (void *)cpufunc_nullop,
419         .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
420
421         /* Other functions */
422         .cf_sleep = arm11x6_sleep, 
423
424         /* Soft functions */
425         .cf_setup = arm11x6_setup
426 };
427 #endif /*CPU_ARM1176 */
428
429 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
430 struct cpu_functions cortexa_cpufuncs = {
431
432         /* Cache operations */
433
434         /*
435          * Note: For CPUs using the PL310 the L2 ops are filled in when the
436          * L2 cache controller is actually enabled.
437          */
438         .cf_l2cache_wbinv_all = cpufunc_nullop,
439         .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
440         .cf_l2cache_inv_range = (void *)cpufunc_nullop,
441         .cf_l2cache_wb_range = (void *)cpufunc_nullop,
442         .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
443
444         /* Other functions */
445         .cf_sleep = armv7_cpu_sleep,
446
447         /* Soft functions */
448         .cf_setup = cortexa_setup
449 };
450 #endif /* CPU_CORTEXA || CPU_KRAIT */
451
452 /*
453  * Global constants also used by locore.s
454  */
455
456 struct cpu_functions cpufuncs;
457 u_int cputype;
458 #if __ARM_ARCH <= 5
459 u_int cpu_reset_needs_v4_MMU_disable;   /* flag used in locore-v4.s */
460 #endif
461
462 #if defined(CPU_ARM9) ||        \
463   defined (CPU_ARM9E) ||        \
464   defined(CPU_ARM1176) ||       \
465   defined(CPU_XSCALE_PXA2X0) || \
466   defined(CPU_FA526) || defined(CPU_MV_PJ4B) ||                 \
467   defined(CPU_XSCALE_81342) || \
468   defined(CPU_CORTEXA) || defined(CPU_KRAIT)
469
470 /* Global cache line sizes, use 32 as default */
471 int     arm_dcache_min_line_size = 32;
472 int     arm_icache_min_line_size = 32;
473 int     arm_idcache_min_line_size = 32;
474
475 static void get_cachetype_cp15(void);
476
477 /* Additional cache information local to this file.  Log2 of some of the
478    above numbers.  */
479 static int      arm_dcache_l2_nsets;
480 static int      arm_dcache_l2_assoc;
481 static int      arm_dcache_l2_linesize;
482
483 static void
484 get_cachetype_cp15(void)
485 {
486         u_int ctype, isize, dsize, cpuid;
487         u_int clevel, csize, i, sel;
488         u_int multiplier;
489         u_char type;
490
491         __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
492                 : "=r" (ctype));
493
494         cpuid = cpu_ident();
495         /*
496          * ...and thus spake the ARM ARM:
497          *
498          * If an <opcode2> value corresponding to an unimplemented or
499          * reserved ID register is encountered, the System Control
500          * processor returns the value of the main ID register.
501          */
502         if (ctype == cpuid)
503                 goto out;
504
505         if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
506                 /* Resolve minimal cache line sizes */
507                 arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
508                 arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
509                 arm_idcache_min_line_size =
510                     min(arm_icache_min_line_size, arm_dcache_min_line_size);
511
512                 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
513                     : "=r" (clevel));
514                 arm_cache_level = clevel;
515                 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
516                 i = 0;
517                 while ((type = (clevel & 0x7)) && i < 7) {
518                         if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
519                             type == CACHE_SEP_CACHE) {
520                                 sel = i << 1;
521                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
522                                     : : "r" (sel));
523                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
524                                     : "=r" (csize));
525                                 arm_cache_type[sel] = csize;
526                                 arm_dcache_align = 1 <<
527                                     (CPUV7_CT_xSIZE_LEN(csize) + 4);
528                                 arm_dcache_align_mask = arm_dcache_align - 1;
529                         }
530                         if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
531                                 sel = (i << 1) | 1;
532                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
533                                     : : "r" (sel));
534                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
535                                     : "=r" (csize));
536                                 arm_cache_type[sel] = csize;
537                         }
538                         i++;
539                         clevel >>= 3;
540                 }
541         } else {
542                 if ((ctype & CPU_CT_S) == 0)
543                         arm_pcache_unified = 1;
544
545                 /*
546                  * If you want to know how this code works, go read the ARM ARM.
547                  */
548
549                 arm_pcache_type = CPU_CT_CTYPE(ctype);
550
551                 if (arm_pcache_unified == 0) {
552                         isize = CPU_CT_ISIZE(ctype);
553                         multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
554                         arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
555                         if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
556                                 if (isize & CPU_CT_xSIZE_M)
557                                         arm_picache_line_size = 0; /* not present */
558                                 else
559                                         arm_picache_ways = 1;
560                         } else {
561                                 arm_picache_ways = multiplier <<
562                                     (CPU_CT_xSIZE_ASSOC(isize) - 1);
563                         }
564                         arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
565                 }
566
567                 dsize = CPU_CT_DSIZE(ctype);
568                 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
569                 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
570                 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
571                         if (dsize & CPU_CT_xSIZE_M)
572                                 arm_pdcache_line_size = 0; /* not present */
573                         else
574                                 arm_pdcache_ways = 1;
575                 } else {
576                         arm_pdcache_ways = multiplier <<
577                             (CPU_CT_xSIZE_ASSOC(dsize) - 1);
578                 }
579                 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
580
581                 arm_dcache_align = arm_pdcache_line_size;
582
583                 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
584                 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
585                 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
586                     CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
587
588         out:
589                 arm_dcache_align_mask = arm_dcache_align - 1;
590         }
591 }
592 #endif /* ARM9 || XSCALE */
593
594 /*
595  * Cannot panic here as we may not have a console yet ...
596  */
597
598 int
599 set_cpufuncs(void)
600 {
601         cputype = cpu_ident();
602         cputype &= CPU_ID_CPU_MASK;
603
604 #ifdef CPU_ARM9
605         if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
606              (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
607             (cputype & 0x0000f000) == 0x00009000) {
608                 cpufuncs = arm9_cpufuncs;
609                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
610                 get_cachetype_cp15();
611                 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
612                 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
613                     arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
614                 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
615                 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
616                 pmap_pte_init_generic();
617                 goto out;
618         }
619 #endif /* CPU_ARM9 */
620 #if defined(CPU_ARM9E)
621         if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
622             cputype == CPU_ID_MV88FR571_41) {
623                 uint32_t sheeva_ctrl;
624
625                 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
626                     MV_L2_ENABLE);
627                 /*
628                  * Workaround for Marvell MV78100 CPU: Cache prefetch
629                  * mechanism may affect the cache coherency validity,
630                  * so it needs to be disabled.
631                  *
632                  * Refer to errata document MV-S501058-00C.pdf (p. 3.1
633                  * L2 Prefetching Mechanism) for details.
634                  */
635                 if (cputype == CPU_ID_MV88FR571_VD ||
636                     cputype == CPU_ID_MV88FR571_41)
637                         sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
638
639                 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
640
641                 cpufuncs = sheeva_cpufuncs;
642                 get_cachetype_cp15();
643                 pmap_pte_init_generic();
644                 goto out;
645         } else if (cputype == CPU_ID_ARM926EJS) {
646                 cpufuncs = armv5_ec_cpufuncs;
647                 get_cachetype_cp15();
648                 pmap_pte_init_generic();
649                 goto out;
650         }
651 #endif /* CPU_ARM9E */
652 #if defined(CPU_ARM1176)
653         if (cputype == CPU_ID_ARM1176JZS) {
654                 cpufuncs = arm1176_cpufuncs;
655                 get_cachetype_cp15();
656                 goto out;
657         }
658 #endif /* CPU_ARM1176 */
659 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
660         switch(cputype & CPU_ID_SCHEME_MASK) {
661         case CPU_ID_CORTEXA5:
662         case CPU_ID_CORTEXA7:
663         case CPU_ID_CORTEXA8:
664         case CPU_ID_CORTEXA9:
665         case CPU_ID_CORTEXA12:
666         case CPU_ID_CORTEXA15:
667         case CPU_ID_CORTEXA53:
668         case CPU_ID_CORTEXA57:
669         case CPU_ID_CORTEXA72:
670         case CPU_ID_KRAIT300:
671                 cpufuncs = cortexa_cpufuncs;
672                 get_cachetype_cp15();
673                 goto out;
674         default:
675                 break;
676         }
677 #endif /* CPU_CORTEXA || CPU_KRAIT */
678
679 #if defined(CPU_MV_PJ4B)
680         if (cputype == CPU_ID_MV88SV581X_V7 ||
681             cputype == CPU_ID_MV88SV584X_V7 ||
682             cputype == CPU_ID_ARM_88SV581X_V7) {
683                 cpufuncs = pj4bv7_cpufuncs;
684                 get_cachetype_cp15();
685                 goto out;
686         }
687 #endif /* CPU_MV_PJ4B */
688
689 #if defined(CPU_FA526)
690         if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
691                 cpufuncs = fa526_cpufuncs;
692                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
693                 get_cachetype_cp15();
694                 pmap_pte_init_generic();
695
696                 goto out;
697         }
698 #endif  /* CPU_FA526 */
699
700 #if defined(CPU_XSCALE_81342)
701         if (cputype == CPU_ID_81342) {
702                 cpufuncs = xscalec3_cpufuncs;
703                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
704                 get_cachetype_cp15();
705                 pmap_pte_init_xscale();
706                 goto out;
707         }
708 #endif /* CPU_XSCALE_81342 */
709 #ifdef CPU_XSCALE_PXA2X0
710         /* ignore core revision to test PXA2xx CPUs */
711         if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
712             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
713             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
714
715                 cpufuncs = xscale_cpufuncs;
716                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
717                 get_cachetype_cp15();
718                 pmap_pte_init_xscale();
719
720                 goto out;
721         }
722 #endif /* CPU_XSCALE_PXA2X0 */
723         /*
724          * Bzzzz. And the answer was ...
725          */
726         panic("No support for this CPU type (%08x) in kernel", cputype);
727         return(ARCHITECTURE_NOT_PRESENT);
728 out:
729         uma_set_align(arm_dcache_align_mask);
730         return (0);
731 }
732
733 /*
734  * CPU Setup code
735  */
736
737 #ifdef CPU_ARM9
738 void
739 arm9_setup(void)
740 {
741         int cpuctrl, cpuctrlmask;
742
743         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
744             | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
745             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
746             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
747             CPU_CONTROL_ROUNDROBIN;
748         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
749                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
750                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
751                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
752                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
753                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
754                  | CPU_CONTROL_ROUNDROBIN;
755
756 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
757         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
758 #endif
759
760 #ifdef __ARMEB__
761         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
762 #endif
763         if (vector_page == ARM_VECTORS_HIGH)
764                 cpuctrl |= CPU_CONTROL_VECRELOC;
765
766         /* Clear out the cache */
767         cpu_idcache_wbinv_all();
768
769         /* Set the control register (SCTLR)   */
770         cpu_control(cpuctrlmask, cpuctrl);
771
772 }
773 #endif  /* CPU_ARM9 */
774
775 #if defined(CPU_ARM9E)
776 void
777 arm10_setup(void)
778 {
779         int cpuctrl, cpuctrlmask;
780
781         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
782             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
783             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
784         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
785             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
786             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
787             | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
788             | CPU_CONTROL_BPRD_ENABLE
789             | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
790
791 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
792         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
793 #endif
794
795 #ifdef __ARMEB__
796         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
797 #endif
798
799         /* Clear out the cache */
800         cpu_idcache_wbinv_all();
801
802         /* Now really make sure they are clean.  */
803         __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
804
805         if (vector_page == ARM_VECTORS_HIGH)
806                 cpuctrl |= CPU_CONTROL_VECRELOC;
807
808         /* Set the control register */
809         cpu_control(0xffffffff, cpuctrl);
810
811         /* And again. */
812         cpu_idcache_wbinv_all();
813 }
814 #endif  /* CPU_ARM9E || CPU_ARM10 */
815
816 #if defined(CPU_ARM1176) \
817  || defined(CPU_MV_PJ4B) \
818  || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
819 static __inline void
820 cpu_scc_setup_ccnt(void)
821 {
822 /* This is how you give userland access to the CCNT and PMCn
823  * registers.
824  * BEWARE! This gives write access also, which may not be what
825  * you want!
826  */
827 #ifdef _PMC_USER_READ_WRITE_
828         /* Set PMUSERENR[0] to allow userland access */
829         cp15_pmuserenr_set(1);
830 #endif
831 #if defined(CPU_ARM1176)
832         /* Set PMCR[2,0] to enable counters and reset CCNT */
833         cp15_pmcr_set(5);
834 #else
835         /* Set up the PMCCNTR register as a cyclecounter:
836          * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
837          * Set PMCR[2,0] to enable counters and reset CCNT
838          * Set PMCNTENSET to 0x80000000 to enable CCNT */
839         cp15_pminten_clr(0xFFFFFFFF);
840         cp15_pmcr_set(5);
841         cp15_pmcnten_set(0x80000000);
842 #endif
843 }
844 #endif
845
846 #if defined(CPU_ARM1176)
847 void
848 arm11x6_setup(void)
849 {
850         uint32_t auxctrl, auxctrl_wax;
851         uint32_t tmp, tmp2;
852         uint32_t cpuid;
853
854         cpuid = cpu_ident();
855
856         auxctrl = 0;
857         auxctrl_wax = ~0;
858
859         /*
860          * Enable an errata workaround
861          */
862         if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
863                 auxctrl = ARM1176_AUXCTL_PHD;
864                 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
865         }
866
867         tmp = cp15_actlr_get();
868         tmp2 = tmp;
869         tmp &= auxctrl_wax;
870         tmp |= auxctrl;
871         if (tmp != tmp2)
872                 cp15_actlr_set(tmp);
873
874         cpu_scc_setup_ccnt();
875 }
876 #endif  /* CPU_ARM1176 */
877
878 #ifdef CPU_MV_PJ4B
879 void
880 pj4bv7_setup(void)
881 {
882
883         pj4b_config();
884         cpu_scc_setup_ccnt();
885 }
886 #endif /* CPU_MV_PJ4B */
887
888 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
889
890 void
891 cortexa_setup(void)
892 {
893
894         cpu_scc_setup_ccnt();
895 }
896 #endif  /* CPU_CORTEXA || CPU_KRAIT */
897
898 #if defined(CPU_FA526)
899 void
900 fa526_setup(void)
901 {
902         int cpuctrl, cpuctrlmask;
903
904         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
905                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
906                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
907                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
908                 | CPU_CONTROL_BPRD_ENABLE;
909         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
910                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
911                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
912                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
913                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
914                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
915                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
916
917 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
918         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
919 #endif
920
921 #ifdef __ARMEB__
922         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
923 #endif
924
925         if (vector_page == ARM_VECTORS_HIGH)
926                 cpuctrl |= CPU_CONTROL_VECRELOC;
927
928         /* Clear out the cache */
929         cpu_idcache_wbinv_all();
930
931         /* Set the control register */
932         cpu_control(0xffffffff, cpuctrl);
933 }
934 #endif  /* CPU_FA526 */
935
936 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_81342)
937 void
938 xscale_setup(void)
939 {
940         uint32_t auxctl;
941         int cpuctrl, cpuctrlmask;
942
943         /*
944          * The XScale Write Buffer is always enabled.  Our option
945          * is to enable/disable coalescing.  Note that bits 6:3
946          * must always be enabled.
947          */
948
949         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
950                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
951                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
952                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
953                  | CPU_CONTROL_BPRD_ENABLE;
954         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
955                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
956                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
957                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
958                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
959                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
960                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
961                  CPU_CONTROL_L2_ENABLE;
962
963 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
964         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
965 #endif
966
967 #ifdef __ARMEB__
968         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
969 #endif
970
971         if (vector_page == ARM_VECTORS_HIGH)
972                 cpuctrl |= CPU_CONTROL_VECRELOC;
973 #ifdef CPU_XSCALE_CORE3
974         cpuctrl |= CPU_CONTROL_L2_ENABLE;
975 #endif
976
977         /* Clear out the cache */
978         cpu_idcache_wbinv_all();
979
980         /*
981          * Set the control register.  Note that bits 6:3 must always
982          * be set to 1.
983          */
984 /*      cpu_control(cpuctrlmask, cpuctrl);*/
985         cpu_control(0xffffffff, cpuctrl);
986
987         /* Make sure write coalescing is turned on */
988         __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
989                 : "=r" (auxctl));
990 #ifdef XSCALE_NO_COALESCE_WRITES
991         auxctl |= XSCALE_AUXCTL_K;
992 #else
993         auxctl &= ~XSCALE_AUXCTL_K;
994 #endif
995 #ifdef CPU_XSCALE_CORE3
996         auxctl |= XSCALE_AUXCTL_LLR;
997         auxctl |= XSCALE_AUXCTL_MD_MASK;
998 #endif
999         __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1000                 : : "r" (auxctl));
1001 }
1002 #endif  /* CPU_XSCALE_PXA2X0 */