]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm/arm/cpufunc.c
Remove the parts of cpu_functions from armv6 that are unused on that
[FreeBSD/FreeBSD.git] / sys / arm / arm / cpufunc.c
1 /*      $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
2
3 /*-
4  * arm9 support code Copyright (C) 2001 ARM Ltd
5  * Copyright (c) 1997 Mark Brinicombe.
6  * Copyright (c) 1997 Causality Limited
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by Causality Limited.
20  * 4. The name of Causality Limited may not be used to endorse or promote
21  *    products derived from this software without specific prior written
22  *    permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  * RiscBSD kernel project
37  *
38  * cpufuncs.c
39  *
40  * C functions for supporting CPU / MMU / TLB specific operations.
41  *
42  * Created      : 30/01/97
43  */
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/bus.h>
52 #include <machine/bus.h>
53 #include <machine/cpu.h>
54 #include <machine/disassem.h>
55
56 #include <vm/vm.h>
57 #include <vm/pmap.h>
58 #include <vm/uma.h>
59
60 #include <machine/cpuconf.h>
61 #include <machine/cpufunc.h>
62
63 #if defined(CPU_XSCALE_81342)
64 #include <arm/xscale/i8134x/i81342reg.h>
65 #endif
66
67 #ifdef CPU_XSCALE_IXP425
68 #include <arm/xscale/ixp425/ixp425reg.h>
69 #include <arm/xscale/ixp425/ixp425var.h>
70 #endif
71
72 /* PRIMARY CACHE VARIABLES */
73 int     arm_picache_size;
74 int     arm_picache_line_size;
75 int     arm_picache_ways;
76
77 int     arm_pdcache_size;       /* and unified */
78 int     arm_pdcache_line_size;
79 int     arm_pdcache_ways;
80
81 int     arm_pcache_type;
82 int     arm_pcache_unified;
83
84 int     arm_dcache_align;
85 int     arm_dcache_align_mask;
86
87 u_int   arm_cache_level;
88 u_int   arm_cache_type[14];
89 u_int   arm_cache_loc;
90
91 #ifdef CPU_ARM9
92 struct cpu_functions arm9_cpufuncs = {
93         /* CPU functions */
94
95         cpufunc_nullop,                 /* cpwait               */
96
97         /* MMU functions */
98
99         cpufunc_control,                /* control              */
100         arm9_setttb,                    /* Setttb               */
101
102         /* TLB functions */
103
104         armv4_tlb_flushID,              /* tlb_flushID          */
105         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
106         armv4_tlb_flushD,               /* tlb_flushD           */
107         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
108
109         /* Cache operations */
110
111         arm9_icache_sync_range,         /* icache_sync_range    */
112
113         arm9_dcache_wbinv_all,          /* dcache_wbinv_all     */
114         arm9_dcache_wbinv_range,        /* dcache_wbinv_range   */
115         arm9_dcache_inv_range,          /* dcache_inv_range     */
116         arm9_dcache_wb_range,           /* dcache_wb_range      */
117
118         armv4_idcache_inv_all,          /* idcache_inv_all      */
119         arm9_idcache_wbinv_all,         /* idcache_wbinv_all    */
120         arm9_idcache_wbinv_range,       /* idcache_wbinv_range  */
121         cpufunc_nullop,                 /* l2cache_wbinv_all    */
122         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
123         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
124         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
125         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
126
127         /* Other functions */
128
129         armv4_drain_writebuf,           /* drain_writebuf       */
130
131         (void *)cpufunc_nullop,         /* sleep                */
132
133         /* Soft functions */
134
135         arm9_context_switch,            /* context_switch       */
136
137         arm9_setup                      /* cpu setup            */
138
139 };
140 #endif /* CPU_ARM9 */
141
142 #if defined(CPU_ARM9E)
143 struct cpu_functions armv5_ec_cpufuncs = {
144         /* CPU functions */
145
146         cpufunc_nullop,                 /* cpwait               */
147
148         /* MMU functions */
149
150         cpufunc_control,                /* control              */
151         armv5_ec_setttb,                /* Setttb               */
152
153         /* TLB functions */
154
155         armv4_tlb_flushID,              /* tlb_flushID          */
156         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
157         armv4_tlb_flushD,               /* tlb_flushD           */
158         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
159
160         /* Cache operations */
161
162         armv5_ec_icache_sync_range,     /* icache_sync_range    */
163
164         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
165         armv5_ec_dcache_wbinv_range,    /* dcache_wbinv_range   */
166         armv5_ec_dcache_inv_range,      /* dcache_inv_range     */
167         armv5_ec_dcache_wb_range,       /* dcache_wb_range      */
168
169         armv4_idcache_inv_all,          /* idcache_inv_all      */
170         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
171         armv5_ec_idcache_wbinv_range,   /* idcache_wbinv_range  */
172
173         cpufunc_nullop,                 /* l2cache_wbinv_all    */
174         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
175         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
176         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
177         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
178
179         /* Other functions */
180
181         armv4_drain_writebuf,           /* drain_writebuf       */
182
183         (void *)cpufunc_nullop,         /* sleep                */
184
185         /* Soft functions */
186
187         arm9_context_switch,            /* context_switch       */
188
189         arm10_setup                     /* cpu setup            */
190
191 };
192
193 struct cpu_functions sheeva_cpufuncs = {
194         /* CPU functions */
195
196         cpufunc_nullop,                 /* cpwait               */
197
198         /* MMU functions */
199
200         cpufunc_control,                /* control              */
201         sheeva_setttb,                  /* Setttb               */
202
203         /* TLB functions */
204
205         armv4_tlb_flushID,              /* tlb_flushID          */
206         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
207         armv4_tlb_flushD,               /* tlb_flushD           */
208         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
209
210         /* Cache operations */
211
212         armv5_ec_icache_sync_range,     /* icache_sync_range    */
213
214         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
215         sheeva_dcache_wbinv_range,      /* dcache_wbinv_range   */
216         sheeva_dcache_inv_range,        /* dcache_inv_range     */
217         sheeva_dcache_wb_range,         /* dcache_wb_range      */
218
219         armv4_idcache_inv_all,          /* idcache_inv_all      */
220         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
221         sheeva_idcache_wbinv_range,     /* idcache_wbinv_all    */
222
223         sheeva_l2cache_wbinv_all,       /* l2cache_wbinv_all    */
224         sheeva_l2cache_wbinv_range,     /* l2cache_wbinv_range  */
225         sheeva_l2cache_inv_range,       /* l2cache_inv_range    */
226         sheeva_l2cache_wb_range,        /* l2cache_wb_range     */
227         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
228
229         /* Other functions */
230
231         armv4_drain_writebuf,           /* drain_writebuf       */
232
233         sheeva_cpu_sleep,               /* sleep                */
234
235         /* Soft functions */
236
237         arm9_context_switch,            /* context_switch       */
238
239         arm10_setup                     /* cpu setup            */
240 };
241 #endif /* CPU_ARM9E */
242
243 #ifdef CPU_MV_PJ4B
244 struct cpu_functions pj4bv7_cpufuncs = {
245         /* MMU functions */
246         .cf_control = cpufunc_control,
247         .cf_setttb = armv7_setttb,
248
249         /* Cache operations */
250         .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
251         .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
252         .cf_l2cache_inv_range = (void *)cpufunc_nullop,
253         .cf_l2cache_wb_range = (void *)cpufunc_nullop,
254         .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
255
256         /* Other functions */
257         .cf_drain_writebuf = armv7_drain_writebuf,
258         .cf_sleep = (void *)cpufunc_nullop,
259
260         /* Soft functions */
261         .cf_setup = pj4bv7_setup
262 };
263 #endif /* CPU_MV_PJ4B */
264
265 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
266
267 struct cpu_functions xscale_cpufuncs = {
268         /* CPU functions */
269
270         xscale_cpwait,                  /* cpwait               */
271
272         /* MMU functions */
273
274         xscale_control,                 /* control              */
275         xscale_setttb,                  /* setttb               */
276
277         /* TLB functions */
278
279         armv4_tlb_flushID,              /* tlb_flushID          */
280         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
281         armv4_tlb_flushD,               /* tlb_flushD           */
282         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
283
284         /* Cache operations */
285
286         xscale_cache_syncI_rng,         /* icache_sync_range    */
287
288         xscale_cache_purgeD,            /* dcache_wbinv_all     */
289         xscale_cache_purgeD_rng,        /* dcache_wbinv_range   */
290         xscale_cache_flushD_rng,        /* dcache_inv_range     */
291         xscale_cache_cleanD_rng,        /* dcache_wb_range      */
292
293         xscale_cache_flushID,           /* idcache_inv_all      */
294         xscale_cache_purgeID,           /* idcache_wbinv_all    */
295         xscale_cache_purgeID_rng,       /* idcache_wbinv_range  */
296         cpufunc_nullop,                 /* l2cache_wbinv_all    */
297         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
298         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
299         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
300         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
301
302         /* Other functions */
303
304         armv4_drain_writebuf,           /* drain_writebuf       */
305
306         xscale_cpu_sleep,               /* sleep                */
307
308         /* Soft functions */
309
310         xscale_context_switch,          /* context_switch       */
311
312         xscale_setup                    /* cpu setup            */
313 };
314 #endif
315 /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
316
317 #ifdef CPU_XSCALE_81342
318 struct cpu_functions xscalec3_cpufuncs = {
319         /* CPU functions */
320
321         xscale_cpwait,                  /* cpwait               */
322
323         /* MMU functions */
324
325         xscale_control,                 /* control              */
326         xscalec3_setttb,                /* setttb               */
327
328         /* TLB functions */
329
330         armv4_tlb_flushID,              /* tlb_flushID          */
331         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
332         armv4_tlb_flushD,               /* tlb_flushD           */
333         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
334
335         /* Cache operations */
336
337         xscalec3_cache_syncI_rng,       /* icache_sync_range    */
338
339         xscalec3_cache_purgeD,          /* dcache_wbinv_all     */
340         xscalec3_cache_purgeD_rng,      /* dcache_wbinv_range   */
341         xscale_cache_flushD_rng,        /* dcache_inv_range     */
342         xscalec3_cache_cleanD_rng,      /* dcache_wb_range      */
343
344         xscale_cache_flushID,           /* idcache_inv_all      */
345         xscalec3_cache_purgeID,         /* idcache_wbinv_all    */
346         xscalec3_cache_purgeID_rng,     /* idcache_wbinv_range  */
347         xscalec3_l2cache_purge,         /* l2cache_wbinv_all    */
348         xscalec3_l2cache_purge_rng,     /* l2cache_wbinv_range  */
349         xscalec3_l2cache_flush_rng,     /* l2cache_inv_range    */
350         xscalec3_l2cache_clean_rng,     /* l2cache_wb_range     */
351         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
352
353         /* Other functions */
354
355         armv4_drain_writebuf,           /* drain_writebuf       */
356
357         xscale_cpu_sleep,               /* sleep                */
358
359         /* Soft functions */
360
361         xscalec3_context_switch,        /* context_switch       */
362
363         xscale_setup                    /* cpu setup            */
364 };
365 #endif /* CPU_XSCALE_81342 */
366
367
368 #if defined(CPU_FA526)
369 struct cpu_functions fa526_cpufuncs = {
370         /* CPU functions */
371
372         cpufunc_nullop,                 /* cpwait               */
373
374         /* MMU functions */
375
376         cpufunc_control,                /* control              */
377         fa526_setttb,                   /* setttb               */
378
379         /* TLB functions */
380
381         armv4_tlb_flushID,              /* tlb_flushID          */
382         fa526_tlb_flushID_SE,           /* tlb_flushID_SE       */
383         armv4_tlb_flushD,               /* tlb_flushD           */
384         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
385
386         /* Cache operations */
387
388         fa526_icache_sync_range,        /* icache_sync_range    */
389
390         fa526_dcache_wbinv_all,         /* dcache_wbinv_all     */
391         fa526_dcache_wbinv_range,       /* dcache_wbinv_range   */
392         fa526_dcache_inv_range,         /* dcache_inv_range     */
393         fa526_dcache_wb_range,          /* dcache_wb_range      */
394
395         armv4_idcache_inv_all,          /* idcache_inv_all      */
396         fa526_idcache_wbinv_all,        /* idcache_wbinv_all    */
397         fa526_idcache_wbinv_range,      /* idcache_wbinv_range  */
398         cpufunc_nullop,                 /* l2cache_wbinv_all    */
399         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
400         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
401         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
402         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
403
404         /* Other functions */
405
406         armv4_drain_writebuf,           /* drain_writebuf       */
407
408         fa526_cpu_sleep,                /* sleep                */
409
410         /* Soft functions */
411
412
413         fa526_context_switch,           /* context_switch       */
414
415         fa526_setup                     /* cpu setup            */
416 };
417 #endif  /* CPU_FA526 */
418
419 #if defined(CPU_ARM1176)
420 struct cpu_functions arm1176_cpufuncs = {
421         /* MMU functions */
422         .cf_control = cpufunc_control,
423         .cf_setttb = arm11x6_setttb,
424
425         /* Cache operations */
426         .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
427         .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
428         .cf_l2cache_inv_range = (void *)cpufunc_nullop,
429         .cf_l2cache_wb_range = (void *)cpufunc_nullop,
430         .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
431
432         /* Other functions */
433         .cf_drain_writebuf = arm11_drain_writebuf,
434         .cf_sleep = arm11x6_sleep, 
435
436         /* Soft functions */
437         .cf_setup = arm11x6_setup
438 };
439 #endif /*CPU_ARM1176 */
440
441 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
442 struct cpu_functions cortexa_cpufuncs = {
443         /* MMU functions */
444         .cf_control = cpufunc_control,
445         .cf_setttb = armv7_setttb,
446
447         /* Cache operations */
448
449         /*
450          * Note: For CPUs using the PL310 the L2 ops are filled in when the
451          * L2 cache controller is actually enabled.
452          */
453         .cf_l2cache_wbinv_all = cpufunc_nullop,
454         .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
455         .cf_l2cache_inv_range = (void *)cpufunc_nullop,
456         .cf_l2cache_wb_range = (void *)cpufunc_nullop,
457         .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
458
459         /* Other functions */
460         .cf_drain_writebuf = armv7_drain_writebuf,
461         .cf_sleep = armv7_cpu_sleep,
462
463         /* Soft functions */
464         .cf_setup = cortexa_setup
465 };
466 #endif /* CPU_CORTEXA */
467
468 /*
469  * Global constants also used by locore.s
470  */
471
472 struct cpu_functions cpufuncs;
473 u_int cputype;
474 #if __ARM_ARCH <= 5
475 u_int cpu_reset_needs_v4_MMU_disable;   /* flag used in locore-v4.s */
476 #endif
477
478 #if defined(CPU_ARM9) ||        \
479   defined (CPU_ARM9E) ||        \
480   defined(CPU_ARM1176) ||       \
481   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||           \
482   defined(CPU_FA526) || defined(CPU_MV_PJ4B) ||                 \
483   defined(CPU_XSCALE_81342) || \
484   defined(CPU_CORTEXA) || defined(CPU_KRAIT)
485
486 /* Global cache line sizes, use 32 as default */
487 int     arm_dcache_min_line_size = 32;
488 int     arm_icache_min_line_size = 32;
489 int     arm_idcache_min_line_size = 32;
490
491 static void get_cachetype_cp15(void);
492
493 /* Additional cache information local to this file.  Log2 of some of the
494    above numbers.  */
495 static int      arm_dcache_l2_nsets;
496 static int      arm_dcache_l2_assoc;
497 static int      arm_dcache_l2_linesize;
498
499 static void
500 get_cachetype_cp15()
501 {
502         u_int ctype, isize, dsize, cpuid;
503         u_int clevel, csize, i, sel;
504         u_int multiplier;
505         u_char type;
506
507         __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
508                 : "=r" (ctype));
509
510         cpuid = cpu_ident();
511         /*
512          * ...and thus spake the ARM ARM:
513          *
514          * If an <opcode2> value corresponding to an unimplemented or
515          * reserved ID register is encountered, the System Control
516          * processor returns the value of the main ID register.
517          */
518         if (ctype == cpuid)
519                 goto out;
520
521         if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
522                 /* Resolve minimal cache line sizes */
523                 arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
524                 arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
525                 arm_idcache_min_line_size =
526                     min(arm_icache_min_line_size, arm_dcache_min_line_size);
527
528                 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
529                     : "=r" (clevel));
530                 arm_cache_level = clevel;
531                 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
532                 i = 0;
533                 while ((type = (clevel & 0x7)) && i < 7) {
534                         if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
535                             type == CACHE_SEP_CACHE) {
536                                 sel = i << 1;
537                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
538                                     : : "r" (sel));
539                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
540                                     : "=r" (csize));
541                                 arm_cache_type[sel] = csize;
542                                 arm_dcache_align = 1 <<
543                                     (CPUV7_CT_xSIZE_LEN(csize) + 4);
544                                 arm_dcache_align_mask = arm_dcache_align - 1;
545                         }
546                         if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
547                                 sel = (i << 1) | 1;
548                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
549                                     : : "r" (sel));
550                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
551                                     : "=r" (csize));
552                                 arm_cache_type[sel] = csize;
553                         }
554                         i++;
555                         clevel >>= 3;
556                 }
557         } else {
558                 if ((ctype & CPU_CT_S) == 0)
559                         arm_pcache_unified = 1;
560
561                 /*
562                  * If you want to know how this code works, go read the ARM ARM.
563                  */
564
565                 arm_pcache_type = CPU_CT_CTYPE(ctype);
566
567                 if (arm_pcache_unified == 0) {
568                         isize = CPU_CT_ISIZE(ctype);
569                         multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
570                         arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
571                         if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
572                                 if (isize & CPU_CT_xSIZE_M)
573                                         arm_picache_line_size = 0; /* not present */
574                                 else
575                                         arm_picache_ways = 1;
576                         } else {
577                                 arm_picache_ways = multiplier <<
578                                     (CPU_CT_xSIZE_ASSOC(isize) - 1);
579                         }
580                         arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
581                 }
582
583                 dsize = CPU_CT_DSIZE(ctype);
584                 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
585                 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
586                 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
587                         if (dsize & CPU_CT_xSIZE_M)
588                                 arm_pdcache_line_size = 0; /* not present */
589                         else
590                                 arm_pdcache_ways = 1;
591                 } else {
592                         arm_pdcache_ways = multiplier <<
593                             (CPU_CT_xSIZE_ASSOC(dsize) - 1);
594                 }
595                 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
596
597                 arm_dcache_align = arm_pdcache_line_size;
598
599                 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
600                 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
601                 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
602                     CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
603
604         out:
605                 arm_dcache_align_mask = arm_dcache_align - 1;
606         }
607 }
608 #endif /* ARM9 || XSCALE */
609
610 /*
611  * Cannot panic here as we may not have a console yet ...
612  */
613
614 int
615 set_cpufuncs()
616 {
617         cputype = cpu_ident();
618         cputype &= CPU_ID_CPU_MASK;
619
620 #ifdef CPU_ARM9
621         if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
622              (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
623             (cputype & 0x0000f000) == 0x00009000) {
624                 cpufuncs = arm9_cpufuncs;
625                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
626                 get_cachetype_cp15();
627                 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
628                 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
629                     arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
630                 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
631                 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
632                 pmap_pte_init_generic();
633                 goto out;
634         }
635 #endif /* CPU_ARM9 */
636 #if defined(CPU_ARM9E)
637         if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
638             cputype == CPU_ID_MV88FR571_41) {
639                 uint32_t sheeva_ctrl;
640
641                 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
642                     MV_L2_ENABLE);
643                 /*
644                  * Workaround for Marvell MV78100 CPU: Cache prefetch
645                  * mechanism may affect the cache coherency validity,
646                  * so it needs to be disabled.
647                  *
648                  * Refer to errata document MV-S501058-00C.pdf (p. 3.1
649                  * L2 Prefetching Mechanism) for details.
650                  */
651                 if (cputype == CPU_ID_MV88FR571_VD ||
652                     cputype == CPU_ID_MV88FR571_41)
653                         sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
654
655                 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
656
657                 cpufuncs = sheeva_cpufuncs;
658                 get_cachetype_cp15();
659                 pmap_pte_init_generic();
660                 goto out;
661         } else if (cputype == CPU_ID_ARM926EJS) {
662                 cpufuncs = armv5_ec_cpufuncs;
663                 get_cachetype_cp15();
664                 pmap_pte_init_generic();
665                 goto out;
666         }
667 #endif /* CPU_ARM9E */
668 #if defined(CPU_ARM1176)
669         if (cputype == CPU_ID_ARM1176JZS) {
670                 cpufuncs = arm1176_cpufuncs;
671                 get_cachetype_cp15();
672                 goto out;
673         }
674 #endif /* CPU_ARM1176 */
675 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
676         switch(cputype & CPU_ID_SCHEME_MASK) {
677         case CPU_ID_CORTEXA5:
678         case CPU_ID_CORTEXA7:
679         case CPU_ID_CORTEXA8:
680         case CPU_ID_CORTEXA9:
681         case CPU_ID_CORTEXA12:
682         case CPU_ID_CORTEXA15:
683         case CPU_ID_KRAIT300:
684                 cpufuncs = cortexa_cpufuncs;
685                 get_cachetype_cp15();
686                 goto out;
687         default:
688                 break;
689         }
690 #endif /* CPU_CORTEXA */
691
692 #if defined(CPU_MV_PJ4B)
693         if (cputype == CPU_ID_MV88SV581X_V7 ||
694             cputype == CPU_ID_MV88SV584X_V7 ||
695             cputype == CPU_ID_ARM_88SV581X_V7) {
696                 cpufuncs = pj4bv7_cpufuncs;
697                 get_cachetype_cp15();
698                 goto out;
699         }
700 #endif /* CPU_MV_PJ4B */
701
702 #if defined(CPU_FA526)
703         if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
704                 cpufuncs = fa526_cpufuncs;
705                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
706                 get_cachetype_cp15();
707                 pmap_pte_init_generic();
708
709                 goto out;
710         }
711 #endif  /* CPU_FA526 */
712
713 #if defined(CPU_XSCALE_81342)
714         if (cputype == CPU_ID_81342) {
715                 cpufuncs = xscalec3_cpufuncs;
716                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
717                 get_cachetype_cp15();
718                 pmap_pte_init_xscale();
719                 goto out;
720         }
721 #endif /* CPU_XSCALE_81342 */
722 #ifdef CPU_XSCALE_PXA2X0
723         /* ignore core revision to test PXA2xx CPUs */
724         if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
725             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
726             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
727
728                 cpufuncs = xscale_cpufuncs;
729                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
730                 get_cachetype_cp15();
731                 pmap_pte_init_xscale();
732
733                 goto out;
734         }
735 #endif /* CPU_XSCALE_PXA2X0 */
736 #ifdef CPU_XSCALE_IXP425
737         if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
738             cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
739
740                 cpufuncs = xscale_cpufuncs;
741                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
742                 get_cachetype_cp15();
743                 pmap_pte_init_xscale();
744
745                 goto out;
746         }
747 #endif /* CPU_XSCALE_IXP425 */
748         /*
749          * Bzzzz. And the answer was ...
750          */
751         panic("No support for this CPU type (%08x) in kernel", cputype);
752         return(ARCHITECTURE_NOT_PRESENT);
753 out:
754         uma_set_align(arm_dcache_align_mask);
755         return (0);
756 }
757
758 /*
759  * CPU Setup code
760  */
761
762 #ifdef CPU_ARM9
763 void
764 arm9_setup(void)
765 {
766         int cpuctrl, cpuctrlmask;
767
768         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
769             | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
770             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
771             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
772             CPU_CONTROL_ROUNDROBIN;
773         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
774                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
775                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
776                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
777                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
778                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
779                  | CPU_CONTROL_ROUNDROBIN;
780
781 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
782         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
783 #endif
784
785 #ifdef __ARMEB__
786         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
787 #endif
788         if (vector_page == ARM_VECTORS_HIGH)
789                 cpuctrl |= CPU_CONTROL_VECRELOC;
790
791         /* Clear out the cache */
792         cpu_idcache_wbinv_all();
793
794         /* Set the control register (SCTLR)   */
795         cpu_control(cpuctrlmask, cpuctrl);
796
797 }
798 #endif  /* CPU_ARM9 */
799
800 #if defined(CPU_ARM9E)
801 void
802 arm10_setup(void)
803 {
804         int cpuctrl, cpuctrlmask;
805
806         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
807             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
808             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
809         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
810             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
811             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
812             | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
813             | CPU_CONTROL_BPRD_ENABLE
814             | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
815
816 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
817         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
818 #endif
819
820 #ifdef __ARMEB__
821         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
822 #endif
823
824         /* Clear out the cache */
825         cpu_idcache_wbinv_all();
826
827         /* Now really make sure they are clean.  */
828         __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
829
830         if (vector_page == ARM_VECTORS_HIGH)
831                 cpuctrl |= CPU_CONTROL_VECRELOC;
832
833         /* Set the control register */
834         cpu_control(0xffffffff, cpuctrl);
835
836         /* And again. */
837         cpu_idcache_wbinv_all();
838 }
839 #endif  /* CPU_ARM9E || CPU_ARM10 */
840
841 #if defined(CPU_ARM1176) \
842  || defined(CPU_MV_PJ4B) \
843  || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
844 static __inline void
845 cpu_scc_setup_ccnt(void)
846 {
847 /* This is how you give userland access to the CCNT and PMCn
848  * registers.
849  * BEWARE! This gives write access also, which may not be what
850  * you want!
851  */
852 #ifdef _PMC_USER_READ_WRITE_
853         /* Set PMUSERENR[0] to allow userland access */
854         cp15_pmuserenr_set(1);
855 #endif
856 #if defined(CPU_ARM1176)
857         /* Set PMCR[2,0] to enable counters and reset CCNT */
858         cp15_pmcr_set(5);
859 #else
860         /* Set up the PMCCNTR register as a cyclecounter:
861          * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
862          * Set PMCR[2,0] to enable counters and reset CCNT
863          * Set PMCNTENSET to 0x80000000 to enable CCNT */
864         cp15_pminten_clr(0xFFFFFFFF);
865         cp15_pmcr_set(5);
866         cp15_pmcnten_set(0x80000000);
867 #endif
868 }
869 #endif
870
871 #if defined(CPU_ARM1176)
872 void
873 arm11x6_setup(void)
874 {
875         uint32_t auxctrl, auxctrl_wax;
876         uint32_t tmp, tmp2;
877         uint32_t cpuid;
878
879         cpuid = cpu_ident();
880
881         auxctrl = 0;
882         auxctrl_wax = ~0;
883
884         /*
885          * Enable an errata workaround
886          */
887         if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
888                 auxctrl = ARM1176_AUXCTL_PHD;
889                 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
890         }
891
892         tmp = cp15_actlr_get();
893         tmp2 = tmp;
894         tmp &= auxctrl_wax;
895         tmp |= auxctrl;
896         if (tmp != tmp2)
897                 cp15_actlr_set(tmp);
898
899         cpu_scc_setup_ccnt();
900 }
901 #endif  /* CPU_ARM1176 */
902
903 #ifdef CPU_MV_PJ4B
904 void
905 pj4bv7_setup(void)
906 {
907
908         pj4b_config();
909         cpu_scc_setup_ccnt();
910 }
911 #endif /* CPU_MV_PJ4B */
912
913 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
914
915 void
916 cortexa_setup(void)
917 {
918
919         cpu_scc_setup_ccnt();
920 }
921 #endif  /* CPU_CORTEXA */
922
923 #if defined(CPU_FA526)
924 void
925 fa526_setup(void)
926 {
927         int cpuctrl, cpuctrlmask;
928
929         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
930                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
931                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
932                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
933                 | CPU_CONTROL_BPRD_ENABLE;
934         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
935                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
936                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
937                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
938                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
939                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
940                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
941
942 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
943         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
944 #endif
945
946 #ifdef __ARMEB__
947         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
948 #endif
949
950         if (vector_page == ARM_VECTORS_HIGH)
951                 cpuctrl |= CPU_CONTROL_VECRELOC;
952
953         /* Clear out the cache */
954         cpu_idcache_wbinv_all();
955
956         /* Set the control register */
957         cpu_control(0xffffffff, cpuctrl);
958 }
959 #endif  /* CPU_FA526 */
960
961 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
962   defined(CPU_XSCALE_81342)
963 void
964 xscale_setup(void)
965 {
966         uint32_t auxctl;
967         int cpuctrl, cpuctrlmask;
968
969         /*
970          * The XScale Write Buffer is always enabled.  Our option
971          * is to enable/disable coalescing.  Note that bits 6:3
972          * must always be enabled.
973          */
974
975         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
976                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
977                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
978                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
979                  | CPU_CONTROL_BPRD_ENABLE;
980         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
981                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
982                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
983                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
984                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
985                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
986                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
987                  CPU_CONTROL_L2_ENABLE;
988
989 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
990         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
991 #endif
992
993 #ifdef __ARMEB__
994         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
995 #endif
996
997         if (vector_page == ARM_VECTORS_HIGH)
998                 cpuctrl |= CPU_CONTROL_VECRELOC;
999 #ifdef CPU_XSCALE_CORE3
1000         cpuctrl |= CPU_CONTROL_L2_ENABLE;
1001 #endif
1002
1003         /* Clear out the cache */
1004         cpu_idcache_wbinv_all();
1005
1006         /*
1007          * Set the control register.  Note that bits 6:3 must always
1008          * be set to 1.
1009          */
1010 /*      cpu_control(cpuctrlmask, cpuctrl);*/
1011         cpu_control(0xffffffff, cpuctrl);
1012
1013         /* Make sure write coalescing is turned on */
1014         __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1015                 : "=r" (auxctl));
1016 #ifdef XSCALE_NO_COALESCE_WRITES
1017         auxctl |= XSCALE_AUXCTL_K;
1018 #else
1019         auxctl &= ~XSCALE_AUXCTL_K;
1020 #endif
1021 #ifdef CPU_XSCALE_CORE3
1022         auxctl |= XSCALE_AUXCTL_LLR;
1023         auxctl |= XSCALE_AUXCTL_MD_MASK;
1024 #endif
1025         __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1026                 : : "r" (auxctl));
1027 }
1028 #endif  /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */