]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm/arm/cpufunc.c
Merge llvm, clang, lld and lldb trunk r300890, and update build glue.
[FreeBSD/FreeBSD.git] / sys / arm / arm / cpufunc.c
1 /*      $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
2
3 /*-
4  * arm9 support code Copyright (C) 2001 ARM Ltd
5  * Copyright (c) 1997 Mark Brinicombe.
6  * Copyright (c) 1997 Causality Limited
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by Causality Limited.
20  * 4. The name of Causality Limited may not be used to endorse or promote
21  *    products derived from this software without specific prior written
22  *    permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  * RiscBSD kernel project
37  *
38  * cpufuncs.c
39  *
40  * C functions for supporting CPU / MMU / TLB specific operations.
41  *
42  * Created      : 30/01/97
43  */
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/bus.h>
52 #include <machine/bus.h>
53 #include <machine/cpu.h>
54 #include <machine/disassem.h>
55
56 #include <vm/vm.h>
57 #include <vm/pmap.h>
58 #include <vm/uma.h>
59
60 #include <machine/cpufunc.h>
61
62 #if defined(CPU_XSCALE_81342)
63 #include <arm/xscale/i8134x/i81342reg.h>
64 #endif
65
66 #ifdef CPU_XSCALE_IXP425
67 #include <arm/xscale/ixp425/ixp425reg.h>
68 #include <arm/xscale/ixp425/ixp425var.h>
69 #endif
70
71 /* PRIMARY CACHE VARIABLES */
72 int     arm_picache_size;
73 int     arm_picache_line_size;
74 int     arm_picache_ways;
75
76 int     arm_pdcache_size;       /* and unified */
77 int     arm_pdcache_line_size;
78 int     arm_pdcache_ways;
79
80 int     arm_pcache_type;
81 int     arm_pcache_unified;
82
83 int     arm_dcache_align;
84 int     arm_dcache_align_mask;
85
86 u_int   arm_cache_level;
87 u_int   arm_cache_type[14];
88 u_int   arm_cache_loc;
89
90 #ifdef CPU_ARM9
91 struct cpu_functions arm9_cpufuncs = {
92         /* CPU functions */
93
94         cpufunc_nullop,                 /* cpwait               */
95
96         /* MMU functions */
97
98         cpufunc_control,                /* control              */
99         arm9_setttb,                    /* Setttb               */
100
101         /* TLB functions */
102
103         armv4_tlb_flushID,              /* tlb_flushID          */
104         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
105         armv4_tlb_flushD,               /* tlb_flushD           */
106         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
107
108         /* Cache operations */
109
110         arm9_icache_sync_range,         /* icache_sync_range    */
111
112         arm9_dcache_wbinv_all,          /* dcache_wbinv_all     */
113         arm9_dcache_wbinv_range,        /* dcache_wbinv_range   */
114         arm9_dcache_inv_range,          /* dcache_inv_range     */
115         arm9_dcache_wb_range,           /* dcache_wb_range      */
116
117         armv4_idcache_inv_all,          /* idcache_inv_all      */
118         arm9_idcache_wbinv_all,         /* idcache_wbinv_all    */
119         arm9_idcache_wbinv_range,       /* idcache_wbinv_range  */
120         cpufunc_nullop,                 /* l2cache_wbinv_all    */
121         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
122         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
123         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
124         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
125
126         /* Other functions */
127
128         armv4_drain_writebuf,           /* drain_writebuf       */
129
130         (void *)cpufunc_nullop,         /* sleep                */
131
132         /* Soft functions */
133
134         arm9_context_switch,            /* context_switch       */
135
136         arm9_setup                      /* cpu setup            */
137
138 };
139 #endif /* CPU_ARM9 */
140
141 #if defined(CPU_ARM9E)
142 struct cpu_functions armv5_ec_cpufuncs = {
143         /* CPU functions */
144
145         cpufunc_nullop,                 /* cpwait               */
146
147         /* MMU functions */
148
149         cpufunc_control,                /* control              */
150         armv5_ec_setttb,                /* Setttb               */
151
152         /* TLB functions */
153
154         armv4_tlb_flushID,              /* tlb_flushID          */
155         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
156         armv4_tlb_flushD,               /* tlb_flushD           */
157         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
158
159         /* Cache operations */
160
161         armv5_ec_icache_sync_range,     /* icache_sync_range    */
162
163         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
164         armv5_ec_dcache_wbinv_range,    /* dcache_wbinv_range   */
165         armv5_ec_dcache_inv_range,      /* dcache_inv_range     */
166         armv5_ec_dcache_wb_range,       /* dcache_wb_range      */
167
168         armv4_idcache_inv_all,          /* idcache_inv_all      */
169         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
170         armv5_ec_idcache_wbinv_range,   /* idcache_wbinv_range  */
171
172         cpufunc_nullop,                 /* l2cache_wbinv_all    */
173         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
174         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
175         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
176         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
177
178         /* Other functions */
179
180         armv4_drain_writebuf,           /* drain_writebuf       */
181
182         (void *)cpufunc_nullop,         /* sleep                */
183
184         /* Soft functions */
185
186         arm9_context_switch,            /* context_switch       */
187
188         arm10_setup                     /* cpu setup            */
189
190 };
191
192 struct cpu_functions sheeva_cpufuncs = {
193         /* CPU functions */
194
195         cpufunc_nullop,                 /* cpwait               */
196
197         /* MMU functions */
198
199         cpufunc_control,                /* control              */
200         sheeva_setttb,                  /* Setttb               */
201
202         /* TLB functions */
203
204         armv4_tlb_flushID,              /* tlb_flushID          */
205         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
206         armv4_tlb_flushD,               /* tlb_flushD           */
207         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
208
209         /* Cache operations */
210
211         armv5_ec_icache_sync_range,     /* icache_sync_range    */
212
213         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
214         sheeva_dcache_wbinv_range,      /* dcache_wbinv_range   */
215         sheeva_dcache_inv_range,        /* dcache_inv_range     */
216         sheeva_dcache_wb_range,         /* dcache_wb_range      */
217
218         armv4_idcache_inv_all,          /* idcache_inv_all      */
219         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
220         sheeva_idcache_wbinv_range,     /* idcache_wbinv_all    */
221
222         sheeva_l2cache_wbinv_all,       /* l2cache_wbinv_all    */
223         sheeva_l2cache_wbinv_range,     /* l2cache_wbinv_range  */
224         sheeva_l2cache_inv_range,       /* l2cache_inv_range    */
225         sheeva_l2cache_wb_range,        /* l2cache_wb_range     */
226         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
227
228         /* Other functions */
229
230         armv4_drain_writebuf,           /* drain_writebuf       */
231
232         sheeva_cpu_sleep,               /* sleep                */
233
234         /* Soft functions */
235
236         arm9_context_switch,            /* context_switch       */
237
238         arm10_setup                     /* cpu setup            */
239 };
240 #endif /* CPU_ARM9E */
241
242 #ifdef CPU_MV_PJ4B
243 struct cpu_functions pj4bv7_cpufuncs = {
244
245         /* Cache operations */
246         .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
247         .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
248         .cf_l2cache_inv_range = (void *)cpufunc_nullop,
249         .cf_l2cache_wb_range = (void *)cpufunc_nullop,
250         .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
251
252         /* Other functions */
253         .cf_sleep = (void *)cpufunc_nullop,
254
255         /* Soft functions */
256         .cf_setup = pj4bv7_setup
257 };
258 #endif /* CPU_MV_PJ4B */
259
260 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
261
262 struct cpu_functions xscale_cpufuncs = {
263         /* CPU functions */
264
265         xscale_cpwait,                  /* cpwait               */
266
267         /* MMU functions */
268
269         xscale_control,                 /* control              */
270         xscale_setttb,                  /* setttb               */
271
272         /* TLB functions */
273
274         armv4_tlb_flushID,              /* tlb_flushID          */
275         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
276         armv4_tlb_flushD,               /* tlb_flushD           */
277         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
278
279         /* Cache operations */
280
281         xscale_cache_syncI_rng,         /* icache_sync_range    */
282
283         xscale_cache_purgeD,            /* dcache_wbinv_all     */
284         xscale_cache_purgeD_rng,        /* dcache_wbinv_range   */
285         xscale_cache_flushD_rng,        /* dcache_inv_range     */
286         xscale_cache_cleanD_rng,        /* dcache_wb_range      */
287
288         xscale_cache_flushID,           /* idcache_inv_all      */
289         xscale_cache_purgeID,           /* idcache_wbinv_all    */
290         xscale_cache_purgeID_rng,       /* idcache_wbinv_range  */
291         cpufunc_nullop,                 /* l2cache_wbinv_all    */
292         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
293         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
294         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
295         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
296
297         /* Other functions */
298
299         armv4_drain_writebuf,           /* drain_writebuf       */
300
301         xscale_cpu_sleep,               /* sleep                */
302
303         /* Soft functions */
304
305         xscale_context_switch,          /* context_switch       */
306
307         xscale_setup                    /* cpu setup            */
308 };
309 #endif
310 /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
311
312 #ifdef CPU_XSCALE_81342
313 struct cpu_functions xscalec3_cpufuncs = {
314         /* CPU functions */
315
316         xscale_cpwait,                  /* cpwait               */
317
318         /* MMU functions */
319
320         xscale_control,                 /* control              */
321         xscalec3_setttb,                /* setttb               */
322
323         /* TLB functions */
324
325         armv4_tlb_flushID,              /* tlb_flushID          */
326         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
327         armv4_tlb_flushD,               /* tlb_flushD           */
328         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
329
330         /* Cache operations */
331
332         xscalec3_cache_syncI_rng,       /* icache_sync_range    */
333
334         xscalec3_cache_purgeD,          /* dcache_wbinv_all     */
335         xscalec3_cache_purgeD_rng,      /* dcache_wbinv_range   */
336         xscale_cache_flushD_rng,        /* dcache_inv_range     */
337         xscalec3_cache_cleanD_rng,      /* dcache_wb_range      */
338
339         xscale_cache_flushID,           /* idcache_inv_all      */
340         xscalec3_cache_purgeID,         /* idcache_wbinv_all    */
341         xscalec3_cache_purgeID_rng,     /* idcache_wbinv_range  */
342         xscalec3_l2cache_purge,         /* l2cache_wbinv_all    */
343         xscalec3_l2cache_purge_rng,     /* l2cache_wbinv_range  */
344         xscalec3_l2cache_flush_rng,     /* l2cache_inv_range    */
345         xscalec3_l2cache_clean_rng,     /* l2cache_wb_range     */
346         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
347
348         /* Other functions */
349
350         armv4_drain_writebuf,           /* drain_writebuf       */
351
352         xscale_cpu_sleep,               /* sleep                */
353
354         /* Soft functions */
355
356         xscalec3_context_switch,        /* context_switch       */
357
358         xscale_setup                    /* cpu setup            */
359 };
360 #endif /* CPU_XSCALE_81342 */
361
362
363 #if defined(CPU_FA526)
364 struct cpu_functions fa526_cpufuncs = {
365         /* CPU functions */
366
367         cpufunc_nullop,                 /* cpwait               */
368
369         /* MMU functions */
370
371         cpufunc_control,                /* control              */
372         fa526_setttb,                   /* setttb               */
373
374         /* TLB functions */
375
376         armv4_tlb_flushID,              /* tlb_flushID          */
377         fa526_tlb_flushID_SE,           /* tlb_flushID_SE       */
378         armv4_tlb_flushD,               /* tlb_flushD           */
379         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
380
381         /* Cache operations */
382
383         fa526_icache_sync_range,        /* icache_sync_range    */
384
385         fa526_dcache_wbinv_all,         /* dcache_wbinv_all     */
386         fa526_dcache_wbinv_range,       /* dcache_wbinv_range   */
387         fa526_dcache_inv_range,         /* dcache_inv_range     */
388         fa526_dcache_wb_range,          /* dcache_wb_range      */
389
390         armv4_idcache_inv_all,          /* idcache_inv_all      */
391         fa526_idcache_wbinv_all,        /* idcache_wbinv_all    */
392         fa526_idcache_wbinv_range,      /* idcache_wbinv_range  */
393         cpufunc_nullop,                 /* l2cache_wbinv_all    */
394         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
395         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
396         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
397         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
398
399         /* Other functions */
400
401         armv4_drain_writebuf,           /* drain_writebuf       */
402
403         fa526_cpu_sleep,                /* sleep                */
404
405         /* Soft functions */
406
407
408         fa526_context_switch,           /* context_switch       */
409
410         fa526_setup                     /* cpu setup            */
411 };
412 #endif  /* CPU_FA526 */
413
414 #if defined(CPU_ARM1176)
415 struct cpu_functions arm1176_cpufuncs = {
416
417         /* Cache operations */
418         .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
419         .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
420         .cf_l2cache_inv_range = (void *)cpufunc_nullop,
421         .cf_l2cache_wb_range = (void *)cpufunc_nullop,
422         .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
423
424         /* Other functions */
425         .cf_sleep = arm11x6_sleep, 
426
427         /* Soft functions */
428         .cf_setup = arm11x6_setup
429 };
430 #endif /*CPU_ARM1176 */
431
432 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
433 struct cpu_functions cortexa_cpufuncs = {
434
435         /* Cache operations */
436
437         /*
438          * Note: For CPUs using the PL310 the L2 ops are filled in when the
439          * L2 cache controller is actually enabled.
440          */
441         .cf_l2cache_wbinv_all = cpufunc_nullop,
442         .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
443         .cf_l2cache_inv_range = (void *)cpufunc_nullop,
444         .cf_l2cache_wb_range = (void *)cpufunc_nullop,
445         .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
446
447         /* Other functions */
448         .cf_sleep = armv7_cpu_sleep,
449
450         /* Soft functions */
451         .cf_setup = cortexa_setup
452 };
453 #endif /* CPU_CORTEXA || CPU_KRAIT */
454
455 /*
456  * Global constants also used by locore.s
457  */
458
459 struct cpu_functions cpufuncs;
460 u_int cputype;
461 #if __ARM_ARCH <= 5
462 u_int cpu_reset_needs_v4_MMU_disable;   /* flag used in locore-v4.s */
463 #endif
464
465 #if defined(CPU_ARM9) ||        \
466   defined (CPU_ARM9E) ||        \
467   defined(CPU_ARM1176) ||       \
468   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||           \
469   defined(CPU_FA526) || defined(CPU_MV_PJ4B) ||                 \
470   defined(CPU_XSCALE_81342) || \
471   defined(CPU_CORTEXA) || defined(CPU_KRAIT)
472
473 /* Global cache line sizes, use 32 as default */
474 int     arm_dcache_min_line_size = 32;
475 int     arm_icache_min_line_size = 32;
476 int     arm_idcache_min_line_size = 32;
477
478 static void get_cachetype_cp15(void);
479
480 /* Additional cache information local to this file.  Log2 of some of the
481    above numbers.  */
482 static int      arm_dcache_l2_nsets;
483 static int      arm_dcache_l2_assoc;
484 static int      arm_dcache_l2_linesize;
485
486 static void
487 get_cachetype_cp15(void)
488 {
489         u_int ctype, isize, dsize, cpuid;
490         u_int clevel, csize, i, sel;
491         u_int multiplier;
492         u_char type;
493
494         __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
495                 : "=r" (ctype));
496
497         cpuid = cpu_ident();
498         /*
499          * ...and thus spake the ARM ARM:
500          *
501          * If an <opcode2> value corresponding to an unimplemented or
502          * reserved ID register is encountered, the System Control
503          * processor returns the value of the main ID register.
504          */
505         if (ctype == cpuid)
506                 goto out;
507
508         if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
509                 /* Resolve minimal cache line sizes */
510                 arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
511                 arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
512                 arm_idcache_min_line_size =
513                     min(arm_icache_min_line_size, arm_dcache_min_line_size);
514
515                 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
516                     : "=r" (clevel));
517                 arm_cache_level = clevel;
518                 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
519                 i = 0;
520                 while ((type = (clevel & 0x7)) && i < 7) {
521                         if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
522                             type == CACHE_SEP_CACHE) {
523                                 sel = i << 1;
524                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
525                                     : : "r" (sel));
526                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
527                                     : "=r" (csize));
528                                 arm_cache_type[sel] = csize;
529                                 arm_dcache_align = 1 <<
530                                     (CPUV7_CT_xSIZE_LEN(csize) + 4);
531                                 arm_dcache_align_mask = arm_dcache_align - 1;
532                         }
533                         if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
534                                 sel = (i << 1) | 1;
535                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
536                                     : : "r" (sel));
537                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
538                                     : "=r" (csize));
539                                 arm_cache_type[sel] = csize;
540                         }
541                         i++;
542                         clevel >>= 3;
543                 }
544         } else {
545                 if ((ctype & CPU_CT_S) == 0)
546                         arm_pcache_unified = 1;
547
548                 /*
549                  * If you want to know how this code works, go read the ARM ARM.
550                  */
551
552                 arm_pcache_type = CPU_CT_CTYPE(ctype);
553
554                 if (arm_pcache_unified == 0) {
555                         isize = CPU_CT_ISIZE(ctype);
556                         multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
557                         arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
558                         if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
559                                 if (isize & CPU_CT_xSIZE_M)
560                                         arm_picache_line_size = 0; /* not present */
561                                 else
562                                         arm_picache_ways = 1;
563                         } else {
564                                 arm_picache_ways = multiplier <<
565                                     (CPU_CT_xSIZE_ASSOC(isize) - 1);
566                         }
567                         arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
568                 }
569
570                 dsize = CPU_CT_DSIZE(ctype);
571                 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
572                 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
573                 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
574                         if (dsize & CPU_CT_xSIZE_M)
575                                 arm_pdcache_line_size = 0; /* not present */
576                         else
577                                 arm_pdcache_ways = 1;
578                 } else {
579                         arm_pdcache_ways = multiplier <<
580                             (CPU_CT_xSIZE_ASSOC(dsize) - 1);
581                 }
582                 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
583
584                 arm_dcache_align = arm_pdcache_line_size;
585
586                 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
587                 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
588                 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
589                     CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
590
591         out:
592                 arm_dcache_align_mask = arm_dcache_align - 1;
593         }
594 }
595 #endif /* ARM9 || XSCALE */
596
597 /*
598  * Cannot panic here as we may not have a console yet ...
599  */
600
601 int
602 set_cpufuncs(void)
603 {
604         cputype = cpu_ident();
605         cputype &= CPU_ID_CPU_MASK;
606
607 #ifdef CPU_ARM9
608         if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
609              (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
610             (cputype & 0x0000f000) == 0x00009000) {
611                 cpufuncs = arm9_cpufuncs;
612                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
613                 get_cachetype_cp15();
614                 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
615                 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
616                     arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
617                 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
618                 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
619                 pmap_pte_init_generic();
620                 goto out;
621         }
622 #endif /* CPU_ARM9 */
623 #if defined(CPU_ARM9E)
624         if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
625             cputype == CPU_ID_MV88FR571_41) {
626                 uint32_t sheeva_ctrl;
627
628                 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
629                     MV_L2_ENABLE);
630                 /*
631                  * Workaround for Marvell MV78100 CPU: Cache prefetch
632                  * mechanism may affect the cache coherency validity,
633                  * so it needs to be disabled.
634                  *
635                  * Refer to errata document MV-S501058-00C.pdf (p. 3.1
636                  * L2 Prefetching Mechanism) for details.
637                  */
638                 if (cputype == CPU_ID_MV88FR571_VD ||
639                     cputype == CPU_ID_MV88FR571_41)
640                         sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
641
642                 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
643
644                 cpufuncs = sheeva_cpufuncs;
645                 get_cachetype_cp15();
646                 pmap_pte_init_generic();
647                 goto out;
648         } else if (cputype == CPU_ID_ARM926EJS) {
649                 cpufuncs = armv5_ec_cpufuncs;
650                 get_cachetype_cp15();
651                 pmap_pte_init_generic();
652                 goto out;
653         }
654 #endif /* CPU_ARM9E */
655 #if defined(CPU_ARM1176)
656         if (cputype == CPU_ID_ARM1176JZS) {
657                 cpufuncs = arm1176_cpufuncs;
658                 get_cachetype_cp15();
659                 goto out;
660         }
661 #endif /* CPU_ARM1176 */
662 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
663         switch(cputype & CPU_ID_SCHEME_MASK) {
664         case CPU_ID_CORTEXA5:
665         case CPU_ID_CORTEXA7:
666         case CPU_ID_CORTEXA8:
667         case CPU_ID_CORTEXA9:
668         case CPU_ID_CORTEXA12:
669         case CPU_ID_CORTEXA15:
670         case CPU_ID_CORTEXA53:
671         case CPU_ID_CORTEXA57:
672         case CPU_ID_CORTEXA72:
673         case CPU_ID_KRAIT300:
674                 cpufuncs = cortexa_cpufuncs;
675                 get_cachetype_cp15();
676                 goto out;
677         default:
678                 break;
679         }
680 #endif /* CPU_CORTEXA || CPU_KRAIT */
681
682 #if defined(CPU_MV_PJ4B)
683         if (cputype == CPU_ID_MV88SV581X_V7 ||
684             cputype == CPU_ID_MV88SV584X_V7 ||
685             cputype == CPU_ID_ARM_88SV581X_V7) {
686                 cpufuncs = pj4bv7_cpufuncs;
687                 get_cachetype_cp15();
688                 goto out;
689         }
690 #endif /* CPU_MV_PJ4B */
691
692 #if defined(CPU_FA526)
693         if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
694                 cpufuncs = fa526_cpufuncs;
695                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
696                 get_cachetype_cp15();
697                 pmap_pte_init_generic();
698
699                 goto out;
700         }
701 #endif  /* CPU_FA526 */
702
703 #if defined(CPU_XSCALE_81342)
704         if (cputype == CPU_ID_81342) {
705                 cpufuncs = xscalec3_cpufuncs;
706                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
707                 get_cachetype_cp15();
708                 pmap_pte_init_xscale();
709                 goto out;
710         }
711 #endif /* CPU_XSCALE_81342 */
712 #ifdef CPU_XSCALE_PXA2X0
713         /* ignore core revision to test PXA2xx CPUs */
714         if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
715             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
716             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
717
718                 cpufuncs = xscale_cpufuncs;
719                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
720                 get_cachetype_cp15();
721                 pmap_pte_init_xscale();
722
723                 goto out;
724         }
725 #endif /* CPU_XSCALE_PXA2X0 */
726 #ifdef CPU_XSCALE_IXP425
727         if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
728             cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
729
730                 cpufuncs = xscale_cpufuncs;
731                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
732                 get_cachetype_cp15();
733                 pmap_pte_init_xscale();
734
735                 goto out;
736         }
737 #endif /* CPU_XSCALE_IXP425 */
738         /*
739          * Bzzzz. And the answer was ...
740          */
741         panic("No support for this CPU type (%08x) in kernel", cputype);
742         return(ARCHITECTURE_NOT_PRESENT);
743 out:
744         uma_set_align(arm_dcache_align_mask);
745         return (0);
746 }
747
748 /*
749  * CPU Setup code
750  */
751
752 #ifdef CPU_ARM9
753 void
754 arm9_setup(void)
755 {
756         int cpuctrl, cpuctrlmask;
757
758         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
759             | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
760             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
761             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
762             CPU_CONTROL_ROUNDROBIN;
763         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
764                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
765                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
766                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
767                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
768                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
769                  | CPU_CONTROL_ROUNDROBIN;
770
771 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
772         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
773 #endif
774
775 #ifdef __ARMEB__
776         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
777 #endif
778         if (vector_page == ARM_VECTORS_HIGH)
779                 cpuctrl |= CPU_CONTROL_VECRELOC;
780
781         /* Clear out the cache */
782         cpu_idcache_wbinv_all();
783
784         /* Set the control register (SCTLR)   */
785         cpu_control(cpuctrlmask, cpuctrl);
786
787 }
788 #endif  /* CPU_ARM9 */
789
790 #if defined(CPU_ARM9E)
791 void
792 arm10_setup(void)
793 {
794         int cpuctrl, cpuctrlmask;
795
796         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
797             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
798             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
799         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
800             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
801             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
802             | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
803             | CPU_CONTROL_BPRD_ENABLE
804             | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
805
806 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
807         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
808 #endif
809
810 #ifdef __ARMEB__
811         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
812 #endif
813
814         /* Clear out the cache */
815         cpu_idcache_wbinv_all();
816
817         /* Now really make sure they are clean.  */
818         __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
819
820         if (vector_page == ARM_VECTORS_HIGH)
821                 cpuctrl |= CPU_CONTROL_VECRELOC;
822
823         /* Set the control register */
824         cpu_control(0xffffffff, cpuctrl);
825
826         /* And again. */
827         cpu_idcache_wbinv_all();
828 }
829 #endif  /* CPU_ARM9E || CPU_ARM10 */
830
831 #if defined(CPU_ARM1176) \
832  || defined(CPU_MV_PJ4B) \
833  || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
834 static __inline void
835 cpu_scc_setup_ccnt(void)
836 {
837 /* This is how you give userland access to the CCNT and PMCn
838  * registers.
839  * BEWARE! This gives write access also, which may not be what
840  * you want!
841  */
842 #ifdef _PMC_USER_READ_WRITE_
843         /* Set PMUSERENR[0] to allow userland access */
844         cp15_pmuserenr_set(1);
845 #endif
846 #if defined(CPU_ARM1176)
847         /* Set PMCR[2,0] to enable counters and reset CCNT */
848         cp15_pmcr_set(5);
849 #else
850         /* Set up the PMCCNTR register as a cyclecounter:
851          * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
852          * Set PMCR[2,0] to enable counters and reset CCNT
853          * Set PMCNTENSET to 0x80000000 to enable CCNT */
854         cp15_pminten_clr(0xFFFFFFFF);
855         cp15_pmcr_set(5);
856         cp15_pmcnten_set(0x80000000);
857 #endif
858 }
859 #endif
860
861 #if defined(CPU_ARM1176)
862 void
863 arm11x6_setup(void)
864 {
865         uint32_t auxctrl, auxctrl_wax;
866         uint32_t tmp, tmp2;
867         uint32_t cpuid;
868
869         cpuid = cpu_ident();
870
871         auxctrl = 0;
872         auxctrl_wax = ~0;
873
874         /*
875          * Enable an errata workaround
876          */
877         if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
878                 auxctrl = ARM1176_AUXCTL_PHD;
879                 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
880         }
881
882         tmp = cp15_actlr_get();
883         tmp2 = tmp;
884         tmp &= auxctrl_wax;
885         tmp |= auxctrl;
886         if (tmp != tmp2)
887                 cp15_actlr_set(tmp);
888
889         cpu_scc_setup_ccnt();
890 }
891 #endif  /* CPU_ARM1176 */
892
893 #ifdef CPU_MV_PJ4B
894 void
895 pj4bv7_setup(void)
896 {
897
898         pj4b_config();
899         cpu_scc_setup_ccnt();
900 }
901 #endif /* CPU_MV_PJ4B */
902
903 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
904
905 void
906 cortexa_setup(void)
907 {
908
909         cpu_scc_setup_ccnt();
910 }
911 #endif  /* CPU_CORTEXA || CPU_KRAIT */
912
913 #if defined(CPU_FA526)
914 void
915 fa526_setup(void)
916 {
917         int cpuctrl, cpuctrlmask;
918
919         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
920                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
921                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
922                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
923                 | CPU_CONTROL_BPRD_ENABLE;
924         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
925                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
926                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
927                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
928                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
929                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
930                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
931
932 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
933         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
934 #endif
935
936 #ifdef __ARMEB__
937         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
938 #endif
939
940         if (vector_page == ARM_VECTORS_HIGH)
941                 cpuctrl |= CPU_CONTROL_VECRELOC;
942
943         /* Clear out the cache */
944         cpu_idcache_wbinv_all();
945
946         /* Set the control register */
947         cpu_control(0xffffffff, cpuctrl);
948 }
949 #endif  /* CPU_FA526 */
950
951 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
952   defined(CPU_XSCALE_81342)
953 void
954 xscale_setup(void)
955 {
956         uint32_t auxctl;
957         int cpuctrl, cpuctrlmask;
958
959         /*
960          * The XScale Write Buffer is always enabled.  Our option
961          * is to enable/disable coalescing.  Note that bits 6:3
962          * must always be enabled.
963          */
964
965         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
966                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
967                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
968                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
969                  | CPU_CONTROL_BPRD_ENABLE;
970         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
971                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
972                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
973                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
974                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
975                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
976                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
977                  CPU_CONTROL_L2_ENABLE;
978
979 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
980         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
981 #endif
982
983 #ifdef __ARMEB__
984         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
985 #endif
986
987         if (vector_page == ARM_VECTORS_HIGH)
988                 cpuctrl |= CPU_CONTROL_VECRELOC;
989 #ifdef CPU_XSCALE_CORE3
990         cpuctrl |= CPU_CONTROL_L2_ENABLE;
991 #endif
992
993         /* Clear out the cache */
994         cpu_idcache_wbinv_all();
995
996         /*
997          * Set the control register.  Note that bits 6:3 must always
998          * be set to 1.
999          */
1000 /*      cpu_control(cpuctrlmask, cpuctrl);*/
1001         cpu_control(0xffffffff, cpuctrl);
1002
1003         /* Make sure write coalescing is turned on */
1004         __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1005                 : "=r" (auxctl));
1006 #ifdef XSCALE_NO_COALESCE_WRITES
1007         auxctl |= XSCALE_AUXCTL_K;
1008 #else
1009         auxctl &= ~XSCALE_AUXCTL_K;
1010 #endif
1011 #ifdef CPU_XSCALE_CORE3
1012         auxctl |= XSCALE_AUXCTL_LLR;
1013         auxctl |= XSCALE_AUXCTL_MD_MASK;
1014 #endif
1015         __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1016                 : : "r" (auxctl));
1017 }
1018 #endif  /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */