]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm/arm/cpufunc.c
MFV r305100: Update amd from am-utils 6.1.5 to 6.2.
[FreeBSD/FreeBSD.git] / sys / arm / arm / cpufunc.c
1 /*      $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
2
3 /*-
4  * arm9 support code Copyright (C) 2001 ARM Ltd
5  * Copyright (c) 1997 Mark Brinicombe.
6  * Copyright (c) 1997 Causality Limited
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by Causality Limited.
20  * 4. The name of Causality Limited may not be used to endorse or promote
21  *    products derived from this software without specific prior written
22  *    permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  * RiscBSD kernel project
37  *
38  * cpufuncs.c
39  *
40  * C functions for supporting CPU / MMU / TLB specific operations.
41  *
42  * Created      : 30/01/97
43  */
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/bus.h>
52 #include <machine/bus.h>
53 #include <machine/cpu.h>
54 #include <machine/disassem.h>
55
56 #include <vm/vm.h>
57 #include <vm/pmap.h>
58 #include <vm/uma.h>
59
60 #include <machine/cpuconf.h>
61 #include <machine/cpufunc.h>
62
63 #if defined(CPU_XSCALE_81342)
64 #include <arm/xscale/i8134x/i81342reg.h>
65 #endif
66
67 #ifdef CPU_XSCALE_IXP425
68 #include <arm/xscale/ixp425/ixp425reg.h>
69 #include <arm/xscale/ixp425/ixp425var.h>
70 #endif
71
72 /* PRIMARY CACHE VARIABLES */
73 int     arm_picache_size;
74 int     arm_picache_line_size;
75 int     arm_picache_ways;
76
77 int     arm_pdcache_size;       /* and unified */
78 int     arm_pdcache_line_size;
79 int     arm_pdcache_ways;
80
81 int     arm_pcache_type;
82 int     arm_pcache_unified;
83
84 int     arm_dcache_align;
85 int     arm_dcache_align_mask;
86
87 u_int   arm_cache_level;
88 u_int   arm_cache_type[14];
89 u_int   arm_cache_loc;
90
91 #ifdef CPU_ARM9
92 struct cpu_functions arm9_cpufuncs = {
93         /* CPU functions */
94
95         cpufunc_nullop,                 /* cpwait               */
96
97         /* MMU functions */
98
99         cpufunc_control,                /* control              */
100         arm9_setttb,                    /* Setttb               */
101
102         /* TLB functions */
103
104         armv4_tlb_flushID,              /* tlb_flushID          */
105         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
106         armv4_tlb_flushD,               /* tlb_flushD           */
107         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
108
109         /* Cache operations */
110
111         arm9_icache_sync_range,         /* icache_sync_range    */
112
113         arm9_dcache_wbinv_all,          /* dcache_wbinv_all     */
114         arm9_dcache_wbinv_range,        /* dcache_wbinv_range   */
115         arm9_dcache_inv_range,          /* dcache_inv_range     */
116         arm9_dcache_wb_range,           /* dcache_wb_range      */
117
118         armv4_idcache_inv_all,          /* idcache_inv_all      */
119         arm9_idcache_wbinv_all,         /* idcache_wbinv_all    */
120         arm9_idcache_wbinv_range,       /* idcache_wbinv_range  */
121         cpufunc_nullop,                 /* l2cache_wbinv_all    */
122         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
123         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
124         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
125         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
126
127         /* Other functions */
128
129         armv4_drain_writebuf,           /* drain_writebuf       */
130
131         (void *)cpufunc_nullop,         /* sleep                */
132
133         /* Soft functions */
134
135         arm9_context_switch,            /* context_switch       */
136
137         arm9_setup                      /* cpu setup            */
138
139 };
140 #endif /* CPU_ARM9 */
141
142 #if defined(CPU_ARM9E)
143 struct cpu_functions armv5_ec_cpufuncs = {
144         /* CPU functions */
145
146         cpufunc_nullop,                 /* cpwait               */
147
148         /* MMU functions */
149
150         cpufunc_control,                /* control              */
151         armv5_ec_setttb,                /* Setttb               */
152
153         /* TLB functions */
154
155         armv4_tlb_flushID,              /* tlb_flushID          */
156         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
157         armv4_tlb_flushD,               /* tlb_flushD           */
158         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
159
160         /* Cache operations */
161
162         armv5_ec_icache_sync_range,     /* icache_sync_range    */
163
164         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
165         armv5_ec_dcache_wbinv_range,    /* dcache_wbinv_range   */
166         armv5_ec_dcache_inv_range,      /* dcache_inv_range     */
167         armv5_ec_dcache_wb_range,       /* dcache_wb_range      */
168
169         armv4_idcache_inv_all,          /* idcache_inv_all      */
170         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
171         armv5_ec_idcache_wbinv_range,   /* idcache_wbinv_range  */
172
173         cpufunc_nullop,                 /* l2cache_wbinv_all    */
174         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
175         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
176         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
177         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
178
179         /* Other functions */
180
181         armv4_drain_writebuf,           /* drain_writebuf       */
182
183         (void *)cpufunc_nullop,         /* sleep                */
184
185         /* Soft functions */
186
187         arm9_context_switch,            /* context_switch       */
188
189         arm10_setup                     /* cpu setup            */
190
191 };
192
193 struct cpu_functions sheeva_cpufuncs = {
194         /* CPU functions */
195
196         cpufunc_nullop,                 /* cpwait               */
197
198         /* MMU functions */
199
200         cpufunc_control,                /* control              */
201         sheeva_setttb,                  /* Setttb               */
202
203         /* TLB functions */
204
205         armv4_tlb_flushID,              /* tlb_flushID          */
206         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
207         armv4_tlb_flushD,               /* tlb_flushD           */
208         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
209
210         /* Cache operations */
211
212         armv5_ec_icache_sync_range,     /* icache_sync_range    */
213
214         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
215         sheeva_dcache_wbinv_range,      /* dcache_wbinv_range   */
216         sheeva_dcache_inv_range,        /* dcache_inv_range     */
217         sheeva_dcache_wb_range,         /* dcache_wb_range      */
218
219         armv4_idcache_inv_all,          /* idcache_inv_all      */
220         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
221         sheeva_idcache_wbinv_range,     /* idcache_wbinv_all    */
222
223         sheeva_l2cache_wbinv_all,       /* l2cache_wbinv_all    */
224         sheeva_l2cache_wbinv_range,     /* l2cache_wbinv_range  */
225         sheeva_l2cache_inv_range,       /* l2cache_inv_range    */
226         sheeva_l2cache_wb_range,        /* l2cache_wb_range     */
227         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
228
229         /* Other functions */
230
231         armv4_drain_writebuf,           /* drain_writebuf       */
232
233         sheeva_cpu_sleep,               /* sleep                */
234
235         /* Soft functions */
236
237         arm9_context_switch,            /* context_switch       */
238
239         arm10_setup                     /* cpu setup            */
240 };
241 #endif /* CPU_ARM9E */
242
243 #ifdef CPU_MV_PJ4B
244 struct cpu_functions pj4bv7_cpufuncs = {
245
246         /* Cache operations */
247         .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
248         .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
249         .cf_l2cache_inv_range = (void *)cpufunc_nullop,
250         .cf_l2cache_wb_range = (void *)cpufunc_nullop,
251         .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
252
253         /* Other functions */
254         .cf_sleep = (void *)cpufunc_nullop,
255
256         /* Soft functions */
257         .cf_setup = pj4bv7_setup
258 };
259 #endif /* CPU_MV_PJ4B */
260
261 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
262
263 struct cpu_functions xscale_cpufuncs = {
264         /* CPU functions */
265
266         xscale_cpwait,                  /* cpwait               */
267
268         /* MMU functions */
269
270         xscale_control,                 /* control              */
271         xscale_setttb,                  /* setttb               */
272
273         /* TLB functions */
274
275         armv4_tlb_flushID,              /* tlb_flushID          */
276         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
277         armv4_tlb_flushD,               /* tlb_flushD           */
278         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
279
280         /* Cache operations */
281
282         xscale_cache_syncI_rng,         /* icache_sync_range    */
283
284         xscale_cache_purgeD,            /* dcache_wbinv_all     */
285         xscale_cache_purgeD_rng,        /* dcache_wbinv_range   */
286         xscale_cache_flushD_rng,        /* dcache_inv_range     */
287         xscale_cache_cleanD_rng,        /* dcache_wb_range      */
288
289         xscale_cache_flushID,           /* idcache_inv_all      */
290         xscale_cache_purgeID,           /* idcache_wbinv_all    */
291         xscale_cache_purgeID_rng,       /* idcache_wbinv_range  */
292         cpufunc_nullop,                 /* l2cache_wbinv_all    */
293         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
294         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
295         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
296         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
297
298         /* Other functions */
299
300         armv4_drain_writebuf,           /* drain_writebuf       */
301
302         xscale_cpu_sleep,               /* sleep                */
303
304         /* Soft functions */
305
306         xscale_context_switch,          /* context_switch       */
307
308         xscale_setup                    /* cpu setup            */
309 };
310 #endif
311 /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
312
313 #ifdef CPU_XSCALE_81342
314 struct cpu_functions xscalec3_cpufuncs = {
315         /* CPU functions */
316
317         xscale_cpwait,                  /* cpwait               */
318
319         /* MMU functions */
320
321         xscale_control,                 /* control              */
322         xscalec3_setttb,                /* setttb               */
323
324         /* TLB functions */
325
326         armv4_tlb_flushID,              /* tlb_flushID          */
327         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
328         armv4_tlb_flushD,               /* tlb_flushD           */
329         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
330
331         /* Cache operations */
332
333         xscalec3_cache_syncI_rng,       /* icache_sync_range    */
334
335         xscalec3_cache_purgeD,          /* dcache_wbinv_all     */
336         xscalec3_cache_purgeD_rng,      /* dcache_wbinv_range   */
337         xscale_cache_flushD_rng,        /* dcache_inv_range     */
338         xscalec3_cache_cleanD_rng,      /* dcache_wb_range      */
339
340         xscale_cache_flushID,           /* idcache_inv_all      */
341         xscalec3_cache_purgeID,         /* idcache_wbinv_all    */
342         xscalec3_cache_purgeID_rng,     /* idcache_wbinv_range  */
343         xscalec3_l2cache_purge,         /* l2cache_wbinv_all    */
344         xscalec3_l2cache_purge_rng,     /* l2cache_wbinv_range  */
345         xscalec3_l2cache_flush_rng,     /* l2cache_inv_range    */
346         xscalec3_l2cache_clean_rng,     /* l2cache_wb_range     */
347         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
348
349         /* Other functions */
350
351         armv4_drain_writebuf,           /* drain_writebuf       */
352
353         xscale_cpu_sleep,               /* sleep                */
354
355         /* Soft functions */
356
357         xscalec3_context_switch,        /* context_switch       */
358
359         xscale_setup                    /* cpu setup            */
360 };
361 #endif /* CPU_XSCALE_81342 */
362
363
364 #if defined(CPU_FA526)
365 struct cpu_functions fa526_cpufuncs = {
366         /* CPU functions */
367
368         cpufunc_nullop,                 /* cpwait               */
369
370         /* MMU functions */
371
372         cpufunc_control,                /* control              */
373         fa526_setttb,                   /* setttb               */
374
375         /* TLB functions */
376
377         armv4_tlb_flushID,              /* tlb_flushID          */
378         fa526_tlb_flushID_SE,           /* tlb_flushID_SE       */
379         armv4_tlb_flushD,               /* tlb_flushD           */
380         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
381
382         /* Cache operations */
383
384         fa526_icache_sync_range,        /* icache_sync_range    */
385
386         fa526_dcache_wbinv_all,         /* dcache_wbinv_all     */
387         fa526_dcache_wbinv_range,       /* dcache_wbinv_range   */
388         fa526_dcache_inv_range,         /* dcache_inv_range     */
389         fa526_dcache_wb_range,          /* dcache_wb_range      */
390
391         armv4_idcache_inv_all,          /* idcache_inv_all      */
392         fa526_idcache_wbinv_all,        /* idcache_wbinv_all    */
393         fa526_idcache_wbinv_range,      /* idcache_wbinv_range  */
394         cpufunc_nullop,                 /* l2cache_wbinv_all    */
395         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
396         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
397         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
398         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
399
400         /* Other functions */
401
402         armv4_drain_writebuf,           /* drain_writebuf       */
403
404         fa526_cpu_sleep,                /* sleep                */
405
406         /* Soft functions */
407
408
409         fa526_context_switch,           /* context_switch       */
410
411         fa526_setup                     /* cpu setup            */
412 };
413 #endif  /* CPU_FA526 */
414
415 #if defined(CPU_ARM1176)
416 struct cpu_functions arm1176_cpufuncs = {
417
418         /* Cache operations */
419         .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
420         .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
421         .cf_l2cache_inv_range = (void *)cpufunc_nullop,
422         .cf_l2cache_wb_range = (void *)cpufunc_nullop,
423         .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
424
425         /* Other functions */
426         .cf_sleep = arm11x6_sleep, 
427
428         /* Soft functions */
429         .cf_setup = arm11x6_setup
430 };
431 #endif /*CPU_ARM1176 */
432
433 #if defined(CPU_CORTEXA8) || defined(CPU_CORTEXA_MP) || defined(CPU_KRAIT)
434 struct cpu_functions cortexa_cpufuncs = {
435
436         /* Cache operations */
437
438         /*
439          * Note: For CPUs using the PL310 the L2 ops are filled in when the
440          * L2 cache controller is actually enabled.
441          */
442         .cf_l2cache_wbinv_all = cpufunc_nullop,
443         .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
444         .cf_l2cache_inv_range = (void *)cpufunc_nullop,
445         .cf_l2cache_wb_range = (void *)cpufunc_nullop,
446         .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
447
448         /* Other functions */
449         .cf_sleep = armv7_cpu_sleep,
450
451         /* Soft functions */
452         .cf_setup = cortexa_setup
453 };
454 #endif /* CPU_CORTEXA8 || CPU_CORTEXA_MP || CPU_KRAIT */
455
456 /*
457  * Global constants also used by locore.s
458  */
459
460 struct cpu_functions cpufuncs;
461 u_int cputype;
462 #if __ARM_ARCH <= 5
463 u_int cpu_reset_needs_v4_MMU_disable;   /* flag used in locore-v4.s */
464 #endif
465
466 #if defined(CPU_ARM9) ||        \
467   defined (CPU_ARM9E) ||        \
468   defined(CPU_ARM1176) ||       \
469   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||           \
470   defined(CPU_FA526) || defined(CPU_MV_PJ4B) ||                 \
471   defined(CPU_XSCALE_81342) || \
472   defined(CPU_CORTEXA8) || defined(CPU_CORTEXA_MP) || defined(CPU_KRAIT)
473
474 /* Global cache line sizes, use 32 as default */
475 int     arm_dcache_min_line_size = 32;
476 int     arm_icache_min_line_size = 32;
477 int     arm_idcache_min_line_size = 32;
478
479 static void get_cachetype_cp15(void);
480
481 /* Additional cache information local to this file.  Log2 of some of the
482    above numbers.  */
483 static int      arm_dcache_l2_nsets;
484 static int      arm_dcache_l2_assoc;
485 static int      arm_dcache_l2_linesize;
486
487 static void
488 get_cachetype_cp15()
489 {
490         u_int ctype, isize, dsize, cpuid;
491         u_int clevel, csize, i, sel;
492         u_int multiplier;
493         u_char type;
494
495         __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
496                 : "=r" (ctype));
497
498         cpuid = cpu_ident();
499         /*
500          * ...and thus spake the ARM ARM:
501          *
502          * If an <opcode2> value corresponding to an unimplemented or
503          * reserved ID register is encountered, the System Control
504          * processor returns the value of the main ID register.
505          */
506         if (ctype == cpuid)
507                 goto out;
508
509         if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
510                 /* Resolve minimal cache line sizes */
511                 arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
512                 arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
513                 arm_idcache_min_line_size =
514                     min(arm_icache_min_line_size, arm_dcache_min_line_size);
515
516                 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
517                     : "=r" (clevel));
518                 arm_cache_level = clevel;
519                 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
520                 i = 0;
521                 while ((type = (clevel & 0x7)) && i < 7) {
522                         if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
523                             type == CACHE_SEP_CACHE) {
524                                 sel = i << 1;
525                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
526                                     : : "r" (sel));
527                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
528                                     : "=r" (csize));
529                                 arm_cache_type[sel] = csize;
530                                 arm_dcache_align = 1 <<
531                                     (CPUV7_CT_xSIZE_LEN(csize) + 4);
532                                 arm_dcache_align_mask = arm_dcache_align - 1;
533                         }
534                         if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
535                                 sel = (i << 1) | 1;
536                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
537                                     : : "r" (sel));
538                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
539                                     : "=r" (csize));
540                                 arm_cache_type[sel] = csize;
541                         }
542                         i++;
543                         clevel >>= 3;
544                 }
545         } else {
546                 if ((ctype & CPU_CT_S) == 0)
547                         arm_pcache_unified = 1;
548
549                 /*
550                  * If you want to know how this code works, go read the ARM ARM.
551                  */
552
553                 arm_pcache_type = CPU_CT_CTYPE(ctype);
554
555                 if (arm_pcache_unified == 0) {
556                         isize = CPU_CT_ISIZE(ctype);
557                         multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
558                         arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
559                         if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
560                                 if (isize & CPU_CT_xSIZE_M)
561                                         arm_picache_line_size = 0; /* not present */
562                                 else
563                                         arm_picache_ways = 1;
564                         } else {
565                                 arm_picache_ways = multiplier <<
566                                     (CPU_CT_xSIZE_ASSOC(isize) - 1);
567                         }
568                         arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
569                 }
570
571                 dsize = CPU_CT_DSIZE(ctype);
572                 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
573                 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
574                 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
575                         if (dsize & CPU_CT_xSIZE_M)
576                                 arm_pdcache_line_size = 0; /* not present */
577                         else
578                                 arm_pdcache_ways = 1;
579                 } else {
580                         arm_pdcache_ways = multiplier <<
581                             (CPU_CT_xSIZE_ASSOC(dsize) - 1);
582                 }
583                 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
584
585                 arm_dcache_align = arm_pdcache_line_size;
586
587                 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
588                 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
589                 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
590                     CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
591
592         out:
593                 arm_dcache_align_mask = arm_dcache_align - 1;
594         }
595 }
596 #endif /* ARM9 || XSCALE */
597
598 /*
599  * Cannot panic here as we may not have a console yet ...
600  */
601
602 int
603 set_cpufuncs()
604 {
605         cputype = cpu_ident();
606         cputype &= CPU_ID_CPU_MASK;
607
608 #ifdef CPU_ARM9
609         if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
610              (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
611             (cputype & 0x0000f000) == 0x00009000) {
612                 cpufuncs = arm9_cpufuncs;
613                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
614                 get_cachetype_cp15();
615                 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
616                 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
617                     arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
618                 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
619                 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
620                 pmap_pte_init_generic();
621                 goto out;
622         }
623 #endif /* CPU_ARM9 */
624 #if defined(CPU_ARM9E)
625         if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
626             cputype == CPU_ID_MV88FR571_41) {
627                 uint32_t sheeva_ctrl;
628
629                 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
630                     MV_L2_ENABLE);
631                 /*
632                  * Workaround for Marvell MV78100 CPU: Cache prefetch
633                  * mechanism may affect the cache coherency validity,
634                  * so it needs to be disabled.
635                  *
636                  * Refer to errata document MV-S501058-00C.pdf (p. 3.1
637                  * L2 Prefetching Mechanism) for details.
638                  */
639                 if (cputype == CPU_ID_MV88FR571_VD ||
640                     cputype == CPU_ID_MV88FR571_41)
641                         sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
642
643                 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
644
645                 cpufuncs = sheeva_cpufuncs;
646                 get_cachetype_cp15();
647                 pmap_pte_init_generic();
648                 goto out;
649         } else if (cputype == CPU_ID_ARM926EJS) {
650                 cpufuncs = armv5_ec_cpufuncs;
651                 get_cachetype_cp15();
652                 pmap_pte_init_generic();
653                 goto out;
654         }
655 #endif /* CPU_ARM9E */
656 #if defined(CPU_ARM1176)
657         if (cputype == CPU_ID_ARM1176JZS) {
658                 cpufuncs = arm1176_cpufuncs;
659                 get_cachetype_cp15();
660                 goto out;
661         }
662 #endif /* CPU_ARM1176 */
663 #if defined(CPU_CORTEXA8) || defined(CPU_CORTEXA_MP) || defined(CPU_KRAIT)
664         switch(cputype & CPU_ID_SCHEME_MASK) {
665         case CPU_ID_CORTEXA5:
666         case CPU_ID_CORTEXA7:
667         case CPU_ID_CORTEXA8:
668         case CPU_ID_CORTEXA9:
669         case CPU_ID_CORTEXA12:
670         case CPU_ID_CORTEXA15:
671         case CPU_ID_CORTEXA53:
672         case CPU_ID_CORTEXA57:
673         case CPU_ID_CORTEXA72:
674         case CPU_ID_KRAIT300:
675                 cpufuncs = cortexa_cpufuncs;
676                 get_cachetype_cp15();
677                 goto out;
678         default:
679                 break;
680         }
681 #endif /* CPU_CORTEXA8 || CPU_CORTEXA_MP || CPU_KRAIT */
682
683 #if defined(CPU_MV_PJ4B)
684         if (cputype == CPU_ID_MV88SV581X_V7 ||
685             cputype == CPU_ID_MV88SV584X_V7 ||
686             cputype == CPU_ID_ARM_88SV581X_V7) {
687                 cpufuncs = pj4bv7_cpufuncs;
688                 get_cachetype_cp15();
689                 goto out;
690         }
691 #endif /* CPU_MV_PJ4B */
692
693 #if defined(CPU_FA526)
694         if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
695                 cpufuncs = fa526_cpufuncs;
696                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
697                 get_cachetype_cp15();
698                 pmap_pte_init_generic();
699
700                 goto out;
701         }
702 #endif  /* CPU_FA526 */
703
704 #if defined(CPU_XSCALE_81342)
705         if (cputype == CPU_ID_81342) {
706                 cpufuncs = xscalec3_cpufuncs;
707                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
708                 get_cachetype_cp15();
709                 pmap_pte_init_xscale();
710                 goto out;
711         }
712 #endif /* CPU_XSCALE_81342 */
713 #ifdef CPU_XSCALE_PXA2X0
714         /* ignore core revision to test PXA2xx CPUs */
715         if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
716             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
717             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
718
719                 cpufuncs = xscale_cpufuncs;
720                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
721                 get_cachetype_cp15();
722                 pmap_pte_init_xscale();
723
724                 goto out;
725         }
726 #endif /* CPU_XSCALE_PXA2X0 */
727 #ifdef CPU_XSCALE_IXP425
728         if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
729             cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
730
731                 cpufuncs = xscale_cpufuncs;
732                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
733                 get_cachetype_cp15();
734                 pmap_pte_init_xscale();
735
736                 goto out;
737         }
738 #endif /* CPU_XSCALE_IXP425 */
739         /*
740          * Bzzzz. And the answer was ...
741          */
742         panic("No support for this CPU type (%08x) in kernel", cputype);
743         return(ARCHITECTURE_NOT_PRESENT);
744 out:
745         uma_set_align(arm_dcache_align_mask);
746         return (0);
747 }
748
749 /*
750  * CPU Setup code
751  */
752
753 #ifdef CPU_ARM9
754 void
755 arm9_setup(void)
756 {
757         int cpuctrl, cpuctrlmask;
758
759         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
760             | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
761             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
762             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
763             CPU_CONTROL_ROUNDROBIN;
764         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
765                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
766                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
767                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
768                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
769                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
770                  | CPU_CONTROL_ROUNDROBIN;
771
772 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
773         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
774 #endif
775
776 #ifdef __ARMEB__
777         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
778 #endif
779         if (vector_page == ARM_VECTORS_HIGH)
780                 cpuctrl |= CPU_CONTROL_VECRELOC;
781
782         /* Clear out the cache */
783         cpu_idcache_wbinv_all();
784
785         /* Set the control register (SCTLR)   */
786         cpu_control(cpuctrlmask, cpuctrl);
787
788 }
789 #endif  /* CPU_ARM9 */
790
791 #if defined(CPU_ARM9E)
792 void
793 arm10_setup(void)
794 {
795         int cpuctrl, cpuctrlmask;
796
797         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
798             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
799             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
800         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
801             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
802             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
803             | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
804             | CPU_CONTROL_BPRD_ENABLE
805             | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
806
807 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
808         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
809 #endif
810
811 #ifdef __ARMEB__
812         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
813 #endif
814
815         /* Clear out the cache */
816         cpu_idcache_wbinv_all();
817
818         /* Now really make sure they are clean.  */
819         __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
820
821         if (vector_page == ARM_VECTORS_HIGH)
822                 cpuctrl |= CPU_CONTROL_VECRELOC;
823
824         /* Set the control register */
825         cpu_control(0xffffffff, cpuctrl);
826
827         /* And again. */
828         cpu_idcache_wbinv_all();
829 }
830 #endif  /* CPU_ARM9E || CPU_ARM10 */
831
832 #if defined(CPU_ARM1176) \
833  || defined(CPU_MV_PJ4B) \
834  || defined(CPU_CORTEXA8) || defined(CPU_CORTEXA_MP) || defined(CPU_KRAIT)
835 static __inline void
836 cpu_scc_setup_ccnt(void)
837 {
838 /* This is how you give userland access to the CCNT and PMCn
839  * registers.
840  * BEWARE! This gives write access also, which may not be what
841  * you want!
842  */
843 #ifdef _PMC_USER_READ_WRITE_
844         /* Set PMUSERENR[0] to allow userland access */
845         cp15_pmuserenr_set(1);
846 #endif
847 #if defined(CPU_ARM1176)
848         /* Set PMCR[2,0] to enable counters and reset CCNT */
849         cp15_pmcr_set(5);
850 #else
851         /* Set up the PMCCNTR register as a cyclecounter:
852          * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
853          * Set PMCR[2,0] to enable counters and reset CCNT
854          * Set PMCNTENSET to 0x80000000 to enable CCNT */
855         cp15_pminten_clr(0xFFFFFFFF);
856         cp15_pmcr_set(5);
857         cp15_pmcnten_set(0x80000000);
858 #endif
859 }
860 #endif
861
862 #if defined(CPU_ARM1176)
863 void
864 arm11x6_setup(void)
865 {
866         uint32_t auxctrl, auxctrl_wax;
867         uint32_t tmp, tmp2;
868         uint32_t cpuid;
869
870         cpuid = cpu_ident();
871
872         auxctrl = 0;
873         auxctrl_wax = ~0;
874
875         /*
876          * Enable an errata workaround
877          */
878         if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
879                 auxctrl = ARM1176_AUXCTL_PHD;
880                 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
881         }
882
883         tmp = cp15_actlr_get();
884         tmp2 = tmp;
885         tmp &= auxctrl_wax;
886         tmp |= auxctrl;
887         if (tmp != tmp2)
888                 cp15_actlr_set(tmp);
889
890         cpu_scc_setup_ccnt();
891 }
892 #endif  /* CPU_ARM1176 */
893
894 #ifdef CPU_MV_PJ4B
895 void
896 pj4bv7_setup(void)
897 {
898
899         pj4b_config();
900         cpu_scc_setup_ccnt();
901 }
902 #endif /* CPU_MV_PJ4B */
903
904 #if defined(CPU_CORTEXA8) || defined(CPU_CORTEXA_MP) || defined(CPU_KRAIT)
905
906 void
907 cortexa_setup(void)
908 {
909
910         cpu_scc_setup_ccnt();
911 }
912 #endif  /* CPU_CORTEXA8 || CPU_CORTEXA_MP || CPU_KRAIT */
913
914 #if defined(CPU_FA526)
915 void
916 fa526_setup(void)
917 {
918         int cpuctrl, cpuctrlmask;
919
920         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
921                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
922                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
923                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
924                 | CPU_CONTROL_BPRD_ENABLE;
925         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
926                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
927                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
928                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
929                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
930                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
931                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
932
933 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
934         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
935 #endif
936
937 #ifdef __ARMEB__
938         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
939 #endif
940
941         if (vector_page == ARM_VECTORS_HIGH)
942                 cpuctrl |= CPU_CONTROL_VECRELOC;
943
944         /* Clear out the cache */
945         cpu_idcache_wbinv_all();
946
947         /* Set the control register */
948         cpu_control(0xffffffff, cpuctrl);
949 }
950 #endif  /* CPU_FA526 */
951
952 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
953   defined(CPU_XSCALE_81342)
954 void
955 xscale_setup(void)
956 {
957         uint32_t auxctl;
958         int cpuctrl, cpuctrlmask;
959
960         /*
961          * The XScale Write Buffer is always enabled.  Our option
962          * is to enable/disable coalescing.  Note that bits 6:3
963          * must always be enabled.
964          */
965
966         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
967                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
968                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
969                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
970                  | CPU_CONTROL_BPRD_ENABLE;
971         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
972                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
973                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
974                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
975                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
976                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
977                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
978                  CPU_CONTROL_L2_ENABLE;
979
980 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
981         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
982 #endif
983
984 #ifdef __ARMEB__
985         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
986 #endif
987
988         if (vector_page == ARM_VECTORS_HIGH)
989                 cpuctrl |= CPU_CONTROL_VECRELOC;
990 #ifdef CPU_XSCALE_CORE3
991         cpuctrl |= CPU_CONTROL_L2_ENABLE;
992 #endif
993
994         /* Clear out the cache */
995         cpu_idcache_wbinv_all();
996
997         /*
998          * Set the control register.  Note that bits 6:3 must always
999          * be set to 1.
1000          */
1001 /*      cpu_control(cpuctrlmask, cpuctrl);*/
1002         cpu_control(0xffffffff, cpuctrl);
1003
1004         /* Make sure write coalescing is turned on */
1005         __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1006                 : "=r" (auxctl));
1007 #ifdef XSCALE_NO_COALESCE_WRITES
1008         auxctl |= XSCALE_AUXCTL_K;
1009 #else
1010         auxctl &= ~XSCALE_AUXCTL_K;
1011 #endif
1012 #ifdef CPU_XSCALE_CORE3
1013         auxctl |= XSCALE_AUXCTL_LLR;
1014         auxctl |= XSCALE_AUXCTL_MD_MASK;
1015 #endif
1016         __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1017                 : : "r" (auxctl));
1018 }
1019 #endif  /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */