]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm/arm/cpufunc.c
MFH
[FreeBSD/FreeBSD.git] / sys / arm / arm / cpufunc.c
1 /*      $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
2
3 /*-
4  * arm9 support code Copyright (C) 2001 ARM Ltd
5  * Copyright (c) 1997 Mark Brinicombe.
6  * Copyright (c) 1997 Causality Limited
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by Causality Limited.
20  * 4. The name of Causality Limited may not be used to endorse or promote
21  *    products derived from this software without specific prior written
22  *    permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  * RiscBSD kernel project
37  *
38  * cpufuncs.c
39  *
40  * C functions for supporting CPU / MMU / TLB specific operations.
41  *
42  * Created      : 30/01/97
43  */
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/bus.h>
52 #include <machine/bus.h>
53 #include <machine/cpu.h>
54 #include <machine/disassem.h>
55
56 #include <vm/vm.h>
57 #include <vm/pmap.h>
58 #include <vm/uma.h>
59
60 #include <machine/acle-compat.h>
61 #include <machine/cpuconf.h>
62 #include <machine/cpufunc.h>
63
64 #if defined(CPU_XSCALE_81342)
65 #include <arm/xscale/i8134x/i81342reg.h>
66 #endif
67
68 #ifdef CPU_XSCALE_IXP425
69 #include <arm/xscale/ixp425/ixp425reg.h>
70 #include <arm/xscale/ixp425/ixp425var.h>
71 #endif
72
73 /* PRIMARY CACHE VARIABLES */
74 int     arm_picache_size;
75 int     arm_picache_line_size;
76 int     arm_picache_ways;
77
78 int     arm_pdcache_size;       /* and unified */
79 int     arm_pdcache_line_size;
80 int     arm_pdcache_ways;
81
82 int     arm_pcache_type;
83 int     arm_pcache_unified;
84
85 int     arm_dcache_align;
86 int     arm_dcache_align_mask;
87
88 u_int   arm_cache_level;
89 u_int   arm_cache_type[14];
90 u_int   arm_cache_loc;
91
92 #ifdef CPU_ARM9
93 struct cpu_functions arm9_cpufuncs = {
94         /* CPU functions */
95
96         cpufunc_nullop,                 /* cpwait               */
97
98         /* MMU functions */
99
100         cpufunc_control,                /* control              */
101         arm9_setttb,                    /* Setttb               */
102
103         /* TLB functions */
104
105         armv4_tlb_flushID,              /* tlb_flushID          */
106         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
107         armv4_tlb_flushD,               /* tlb_flushD           */
108         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
109
110         /* Cache operations */
111
112         arm9_icache_sync_range,         /* icache_sync_range    */
113
114         arm9_dcache_wbinv_all,          /* dcache_wbinv_all     */
115         arm9_dcache_wbinv_range,        /* dcache_wbinv_range   */
116         arm9_dcache_inv_range,          /* dcache_inv_range     */
117         arm9_dcache_wb_range,           /* dcache_wb_range      */
118
119         armv4_idcache_inv_all,          /* idcache_inv_all      */
120         arm9_idcache_wbinv_all,         /* idcache_wbinv_all    */
121         arm9_idcache_wbinv_range,       /* idcache_wbinv_range  */
122         cpufunc_nullop,                 /* l2cache_wbinv_all    */
123         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
124         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
125         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
126         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
127
128         /* Other functions */
129
130         armv4_drain_writebuf,           /* drain_writebuf       */
131
132         (void *)cpufunc_nullop,         /* sleep                */
133
134         /* Soft functions */
135
136         arm9_context_switch,            /* context_switch       */
137
138         arm9_setup                      /* cpu setup            */
139
140 };
141 #endif /* CPU_ARM9 */
142
143 #if defined(CPU_ARM9E)
144 struct cpu_functions armv5_ec_cpufuncs = {
145         /* CPU functions */
146
147         cpufunc_nullop,                 /* cpwait               */
148
149         /* MMU functions */
150
151         cpufunc_control,                /* control              */
152         armv5_ec_setttb,                /* Setttb               */
153
154         /* TLB functions */
155
156         armv4_tlb_flushID,              /* tlb_flushID          */
157         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
158         armv4_tlb_flushD,               /* tlb_flushD           */
159         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
160
161         /* Cache operations */
162
163         armv5_ec_icache_sync_range,     /* icache_sync_range    */
164
165         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
166         armv5_ec_dcache_wbinv_range,    /* dcache_wbinv_range   */
167         armv5_ec_dcache_inv_range,      /* dcache_inv_range     */
168         armv5_ec_dcache_wb_range,       /* dcache_wb_range      */
169
170         armv4_idcache_inv_all,          /* idcache_inv_all      */
171         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
172         armv5_ec_idcache_wbinv_range,   /* idcache_wbinv_range  */
173
174         cpufunc_nullop,                 /* l2cache_wbinv_all    */
175         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
176         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
177         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
178         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
179
180         /* Other functions */
181
182         armv4_drain_writebuf,           /* drain_writebuf       */
183
184         (void *)cpufunc_nullop,         /* sleep                */
185
186         /* Soft functions */
187
188         arm9_context_switch,            /* context_switch       */
189
190         arm10_setup                     /* cpu setup            */
191
192 };
193
194 struct cpu_functions sheeva_cpufuncs = {
195         /* CPU functions */
196
197         cpufunc_nullop,                 /* cpwait               */
198
199         /* MMU functions */
200
201         cpufunc_control,                /* control              */
202         sheeva_setttb,                  /* Setttb               */
203
204         /* TLB functions */
205
206         armv4_tlb_flushID,              /* tlb_flushID          */
207         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
208         armv4_tlb_flushD,               /* tlb_flushD           */
209         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
210
211         /* Cache operations */
212
213         armv5_ec_icache_sync_range,     /* icache_sync_range    */
214
215         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
216         sheeva_dcache_wbinv_range,      /* dcache_wbinv_range   */
217         sheeva_dcache_inv_range,        /* dcache_inv_range     */
218         sheeva_dcache_wb_range,         /* dcache_wb_range      */
219
220         armv4_idcache_inv_all,          /* idcache_inv_all      */
221         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
222         sheeva_idcache_wbinv_range,     /* idcache_wbinv_all    */
223
224         sheeva_l2cache_wbinv_all,       /* l2cache_wbinv_all    */
225         sheeva_l2cache_wbinv_range,     /* l2cache_wbinv_range  */
226         sheeva_l2cache_inv_range,       /* l2cache_inv_range    */
227         sheeva_l2cache_wb_range,        /* l2cache_wb_range     */
228         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
229
230         /* Other functions */
231
232         armv4_drain_writebuf,           /* drain_writebuf       */
233
234         sheeva_cpu_sleep,               /* sleep                */
235
236         /* Soft functions */
237
238         arm9_context_switch,            /* context_switch       */
239
240         arm10_setup                     /* cpu setup            */
241 };
242 #endif /* CPU_ARM9E */
243
244 #ifdef CPU_MV_PJ4B
245 struct cpu_functions pj4bv7_cpufuncs = {
246         /* CPU functions */
247
248         armv7_drain_writebuf,           /* cpwait               */
249
250         /* MMU functions */
251
252         cpufunc_control,                /* control              */
253         armv7_setttb,                   /* Setttb               */
254
255         /* TLB functions */
256
257         armv7_tlb_flushID,              /* tlb_flushID          */
258         armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
259         armv7_tlb_flushID,              /* tlb_flushD           */
260         armv7_tlb_flushID_SE,           /* tlb_flushD_SE        */
261
262         /* Cache operations */
263         armv7_icache_sync_range,        /* icache_sync_range    */
264
265         armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
266         armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
267         armv7_dcache_inv_range,         /* dcache_inv_range     */
268         armv7_dcache_wb_range,          /* dcache_wb_range      */
269
270         armv7_idcache_inv_all,          /* idcache_inv_all      */
271         armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
272         armv7_idcache_wbinv_range,      /* idcache_wbinv_all    */
273
274         (void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
275         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
276         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
277         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
278         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
279
280         /* Other functions */
281
282         armv7_drain_writebuf,           /* drain_writebuf       */
283
284         (void *)cpufunc_nullop,         /* sleep                */
285
286         /* Soft functions */
287         armv7_context_switch,           /* context_switch       */
288
289         pj4bv7_setup                    /* cpu setup            */
290 };
291 #endif /* CPU_MV_PJ4B */
292
293 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
294
295 struct cpu_functions xscale_cpufuncs = {
296         /* CPU functions */
297
298         xscale_cpwait,                  /* cpwait               */
299
300         /* MMU functions */
301
302         xscale_control,                 /* control              */
303         xscale_setttb,                  /* setttb               */
304
305         /* TLB functions */
306
307         armv4_tlb_flushID,              /* tlb_flushID          */
308         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
309         armv4_tlb_flushD,               /* tlb_flushD           */
310         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
311
312         /* Cache operations */
313
314         xscale_cache_syncI_rng,         /* icache_sync_range    */
315
316         xscale_cache_purgeD,            /* dcache_wbinv_all     */
317         xscale_cache_purgeD_rng,        /* dcache_wbinv_range   */
318         xscale_cache_flushD_rng,        /* dcache_inv_range     */
319         xscale_cache_cleanD_rng,        /* dcache_wb_range      */
320
321         xscale_cache_flushID,           /* idcache_inv_all      */
322         xscale_cache_purgeID,           /* idcache_wbinv_all    */
323         xscale_cache_purgeID_rng,       /* idcache_wbinv_range  */
324         cpufunc_nullop,                 /* l2cache_wbinv_all    */
325         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
326         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
327         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
328         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
329
330         /* Other functions */
331
332         armv4_drain_writebuf,           /* drain_writebuf       */
333
334         xscale_cpu_sleep,               /* sleep                */
335
336         /* Soft functions */
337
338         xscale_context_switch,          /* context_switch       */
339
340         xscale_setup                    /* cpu setup            */
341 };
342 #endif
343 /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
344
345 #ifdef CPU_XSCALE_81342
346 struct cpu_functions xscalec3_cpufuncs = {
347         /* CPU functions */
348
349         xscale_cpwait,                  /* cpwait               */
350
351         /* MMU functions */
352
353         xscale_control,                 /* control              */
354         xscalec3_setttb,                /* setttb               */
355
356         /* TLB functions */
357
358         armv4_tlb_flushID,              /* tlb_flushID          */
359         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
360         armv4_tlb_flushD,               /* tlb_flushD           */
361         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
362
363         /* Cache operations */
364
365         xscalec3_cache_syncI_rng,       /* icache_sync_range    */
366
367         xscalec3_cache_purgeD,          /* dcache_wbinv_all     */
368         xscalec3_cache_purgeD_rng,      /* dcache_wbinv_range   */
369         xscale_cache_flushD_rng,        /* dcache_inv_range     */
370         xscalec3_cache_cleanD_rng,      /* dcache_wb_range      */
371
372         xscale_cache_flushID,           /* idcache_inv_all      */
373         xscalec3_cache_purgeID,         /* idcache_wbinv_all    */
374         xscalec3_cache_purgeID_rng,     /* idcache_wbinv_range  */
375         xscalec3_l2cache_purge,         /* l2cache_wbinv_all    */
376         xscalec3_l2cache_purge_rng,     /* l2cache_wbinv_range  */
377         xscalec3_l2cache_flush_rng,     /* l2cache_inv_range    */
378         xscalec3_l2cache_clean_rng,     /* l2cache_wb_range     */
379         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
380
381         /* Other functions */
382
383         armv4_drain_writebuf,           /* drain_writebuf       */
384
385         xscale_cpu_sleep,               /* sleep                */
386
387         /* Soft functions */
388
389         xscalec3_context_switch,        /* context_switch       */
390
391         xscale_setup                    /* cpu setup            */
392 };
393 #endif /* CPU_XSCALE_81342 */
394
395
396 #if defined(CPU_FA526)
397 struct cpu_functions fa526_cpufuncs = {
398         /* CPU functions */
399
400         cpufunc_nullop,                 /* cpwait               */
401
402         /* MMU functions */
403
404         cpufunc_control,                /* control              */
405         fa526_setttb,                   /* setttb               */
406
407         /* TLB functions */
408
409         armv4_tlb_flushID,              /* tlb_flushID          */
410         fa526_tlb_flushID_SE,           /* tlb_flushID_SE       */
411         armv4_tlb_flushD,               /* tlb_flushD           */
412         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
413
414         /* Cache operations */
415
416         fa526_icache_sync_range,        /* icache_sync_range    */
417
418         fa526_dcache_wbinv_all,         /* dcache_wbinv_all     */
419         fa526_dcache_wbinv_range,       /* dcache_wbinv_range   */
420         fa526_dcache_inv_range,         /* dcache_inv_range     */
421         fa526_dcache_wb_range,          /* dcache_wb_range      */
422
423         armv4_idcache_inv_all,          /* idcache_inv_all      */
424         fa526_idcache_wbinv_all,        /* idcache_wbinv_all    */
425         fa526_idcache_wbinv_range,      /* idcache_wbinv_range  */
426         cpufunc_nullop,                 /* l2cache_wbinv_all    */
427         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
428         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
429         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
430         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
431
432         /* Other functions */
433
434         armv4_drain_writebuf,           /* drain_writebuf       */
435
436         fa526_cpu_sleep,                /* sleep                */
437
438         /* Soft functions */
439
440
441         fa526_context_switch,           /* context_switch       */
442
443         fa526_setup                     /* cpu setup            */
444 };
445 #endif  /* CPU_FA526 */
446
447 #if defined(CPU_ARM1176)
448 struct cpu_functions arm1176_cpufuncs = {
449         /* CPU functions */
450
451         cpufunc_nullop,                 /* cpwait               */
452
453         /* MMU functions */
454
455         cpufunc_control,                /* control              */
456         arm11x6_setttb,                 /* Setttb               */
457
458         /* TLB functions */
459
460         arm11_tlb_flushID,              /* tlb_flushID          */
461         arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
462         arm11_tlb_flushD,               /* tlb_flushD           */
463         arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
464
465         /* Cache operations */
466
467         arm11x6_icache_sync_range,      /* icache_sync_range    */
468
469         arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
470         armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
471         armv6_dcache_inv_range,         /* dcache_inv_range     */
472         armv6_dcache_wb_range,          /* dcache_wb_range      */
473
474         armv6_idcache_inv_all,          /* idcache_inv_all      */
475         arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
476         arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
477
478         (void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
479         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
480         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
481         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
482         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
483
484         /* Other functions */
485
486         arm11_drain_writebuf,           /* drain_writebuf       */
487
488         arm11x6_sleep,                  /* sleep                */
489
490         /* Soft functions */
491
492         arm11_context_switch,           /* context_switch       */
493
494         arm11x6_setup                   /* cpu setup            */
495 };
496 #endif /*CPU_ARM1176 */
497
498 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
499 struct cpu_functions cortexa_cpufuncs = {
500         /* CPU functions */
501
502         cpufunc_nullop,                 /* cpwait               */
503
504         /* MMU functions */
505
506         cpufunc_control,                /* control              */
507         armv7_setttb,                   /* Setttb               */
508
509         /*
510          * TLB functions.  ARMv7 does all TLB ops based on a unified TLB model
511          * whether the hardware implements separate I+D or not, so we use the
512          * same 'ID' functions for all 3 variations.
513          */
514
515         armv7_tlb_flushID,              /* tlb_flushID          */
516         armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
517         armv7_tlb_flushID,              /* tlb_flushD           */
518         armv7_tlb_flushID_SE,           /* tlb_flushD_SE        */
519
520         /* Cache operations */
521
522         armv7_icache_sync_range,        /* icache_sync_range    */
523
524         armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
525         armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
526         armv7_dcache_inv_range,         /* dcache_inv_range     */
527         armv7_dcache_wb_range,          /* dcache_wb_range      */
528
529         armv7_idcache_inv_all,          /* idcache_inv_all      */
530         armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
531         armv7_idcache_wbinv_range,      /* idcache_wbinv_range  */
532
533         /*
534          * Note: For CPUs using the PL310 the L2 ops are filled in when the
535          * L2 cache controller is actually enabled.
536          */
537         cpufunc_nullop,                 /* l2cache_wbinv_all    */
538         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
539         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
540         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
541         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
542
543         /* Other functions */
544
545         armv7_drain_writebuf,           /* drain_writebuf       */
546
547         armv7_cpu_sleep,                /* sleep                */
548
549         /* Soft functions */
550
551         armv7_context_switch,           /* context_switch       */
552
553         cortexa_setup                     /* cpu setup            */
554 };
555 #endif /* CPU_CORTEXA */
556
557 /*
558  * Global constants also used by locore.s
559  */
560
561 struct cpu_functions cpufuncs;
562 u_int cputype;
563 #if __ARM_ARCH <= 5
564 u_int cpu_reset_needs_v4_MMU_disable;   /* flag used in locore-v4.s */
565 #endif
566
567 #if defined(CPU_ARM9) ||        \
568   defined (CPU_ARM9E) ||        \
569   defined(CPU_ARM1176) ||       \
570   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||           \
571   defined(CPU_FA526) || defined(CPU_MV_PJ4B) ||                 \
572   defined(CPU_XSCALE_81342) || \
573   defined(CPU_CORTEXA) || defined(CPU_KRAIT)
574
575 /* Global cache line sizes, use 32 as default */
576 int     arm_dcache_min_line_size = 32;
577 int     arm_icache_min_line_size = 32;
578 int     arm_idcache_min_line_size = 32;
579
580 static void get_cachetype_cp15(void);
581
582 /* Additional cache information local to this file.  Log2 of some of the
583    above numbers.  */
584 static int      arm_dcache_l2_nsets;
585 static int      arm_dcache_l2_assoc;
586 static int      arm_dcache_l2_linesize;
587
588 static void
589 get_cachetype_cp15()
590 {
591         u_int ctype, isize, dsize, cpuid;
592         u_int clevel, csize, i, sel;
593         u_int multiplier;
594         u_char type;
595
596         __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
597                 : "=r" (ctype));
598
599         cpuid = cpu_ident();
600         /*
601          * ...and thus spake the ARM ARM:
602          *
603          * If an <opcode2> value corresponding to an unimplemented or
604          * reserved ID register is encountered, the System Control
605          * processor returns the value of the main ID register.
606          */
607         if (ctype == cpuid)
608                 goto out;
609
610         if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
611                 /* Resolve minimal cache line sizes */
612                 arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
613                 arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
614                 arm_idcache_min_line_size =
615                     min(arm_icache_min_line_size, arm_dcache_min_line_size);
616
617                 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
618                     : "=r" (clevel));
619                 arm_cache_level = clevel;
620                 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
621                 i = 0;
622                 while ((type = (clevel & 0x7)) && i < 7) {
623                         if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
624                             type == CACHE_SEP_CACHE) {
625                                 sel = i << 1;
626                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
627                                     : : "r" (sel));
628                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
629                                     : "=r" (csize));
630                                 arm_cache_type[sel] = csize;
631                                 arm_dcache_align = 1 <<
632                                     (CPUV7_CT_xSIZE_LEN(csize) + 4);
633                                 arm_dcache_align_mask = arm_dcache_align - 1;
634                         }
635                         if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
636                                 sel = (i << 1) | 1;
637                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
638                                     : : "r" (sel));
639                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
640                                     : "=r" (csize));
641                                 arm_cache_type[sel] = csize;
642                         }
643                         i++;
644                         clevel >>= 3;
645                 }
646         } else {
647                 if ((ctype & CPU_CT_S) == 0)
648                         arm_pcache_unified = 1;
649
650                 /*
651                  * If you want to know how this code works, go read the ARM ARM.
652                  */
653
654                 arm_pcache_type = CPU_CT_CTYPE(ctype);
655
656                 if (arm_pcache_unified == 0) {
657                         isize = CPU_CT_ISIZE(ctype);
658                         multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
659                         arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
660                         if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
661                                 if (isize & CPU_CT_xSIZE_M)
662                                         arm_picache_line_size = 0; /* not present */
663                                 else
664                                         arm_picache_ways = 1;
665                         } else {
666                                 arm_picache_ways = multiplier <<
667                                     (CPU_CT_xSIZE_ASSOC(isize) - 1);
668                         }
669                         arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
670                 }
671
672                 dsize = CPU_CT_DSIZE(ctype);
673                 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
674                 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
675                 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
676                         if (dsize & CPU_CT_xSIZE_M)
677                                 arm_pdcache_line_size = 0; /* not present */
678                         else
679                                 arm_pdcache_ways = 1;
680                 } else {
681                         arm_pdcache_ways = multiplier <<
682                             (CPU_CT_xSIZE_ASSOC(dsize) - 1);
683                 }
684                 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
685
686                 arm_dcache_align = arm_pdcache_line_size;
687
688                 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
689                 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
690                 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
691                     CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
692
693         out:
694                 arm_dcache_align_mask = arm_dcache_align - 1;
695         }
696 }
697 #endif /* ARM9 || XSCALE */
698
699 /*
700  * Cannot panic here as we may not have a console yet ...
701  */
702
703 int
704 set_cpufuncs()
705 {
706         cputype = cpu_ident();
707         cputype &= CPU_ID_CPU_MASK;
708
709 #ifdef CPU_ARM9
710         if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
711              (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
712             (cputype & 0x0000f000) == 0x00009000) {
713                 cpufuncs = arm9_cpufuncs;
714                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
715                 get_cachetype_cp15();
716                 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
717                 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
718                     arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
719                 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
720                 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
721                 pmap_pte_init_generic();
722                 goto out;
723         }
724 #endif /* CPU_ARM9 */
725 #if defined(CPU_ARM9E)
726         if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
727             cputype == CPU_ID_MV88FR571_41) {
728                 uint32_t sheeva_ctrl;
729
730                 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
731                     MV_L2_ENABLE);
732                 /*
733                  * Workaround for Marvell MV78100 CPU: Cache prefetch
734                  * mechanism may affect the cache coherency validity,
735                  * so it needs to be disabled.
736                  *
737                  * Refer to errata document MV-S501058-00C.pdf (p. 3.1
738                  * L2 Prefetching Mechanism) for details.
739                  */
740                 if (cputype == CPU_ID_MV88FR571_VD ||
741                     cputype == CPU_ID_MV88FR571_41)
742                         sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
743
744                 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
745
746                 cpufuncs = sheeva_cpufuncs;
747                 get_cachetype_cp15();
748                 pmap_pte_init_generic();
749                 goto out;
750         } else if (cputype == CPU_ID_ARM926EJS) {
751                 cpufuncs = armv5_ec_cpufuncs;
752                 get_cachetype_cp15();
753                 pmap_pte_init_generic();
754                 goto out;
755         }
756 #endif /* CPU_ARM9E */
757 #if defined(CPU_ARM1176)
758         if (cputype == CPU_ID_ARM1176JZS) {
759                 cpufuncs = arm1176_cpufuncs;
760                 get_cachetype_cp15();
761                 goto out;
762         }
763 #endif /* CPU_ARM1176 */
764 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
765         if (cputype == CPU_ID_CORTEXA5 ||
766             cputype == CPU_ID_CORTEXA7 ||
767             cputype == CPU_ID_CORTEXA8R1 ||
768             cputype == CPU_ID_CORTEXA8R2 ||
769             cputype == CPU_ID_CORTEXA8R3 ||
770             cputype == CPU_ID_CORTEXA9R1 ||
771             cputype == CPU_ID_CORTEXA9R2 ||
772             cputype == CPU_ID_CORTEXA9R3 ||
773             cputype == CPU_ID_CORTEXA9R4 ||
774             cputype == CPU_ID_CORTEXA12R0 ||
775             cputype == CPU_ID_CORTEXA15R0 ||
776             cputype == CPU_ID_CORTEXA15R1 ||
777             cputype == CPU_ID_CORTEXA15R2 ||
778             cputype == CPU_ID_CORTEXA15R3 ||
779             cputype == CPU_ID_KRAIT300R0 ||
780             cputype == CPU_ID_KRAIT300R1 ) {
781                 cpufuncs = cortexa_cpufuncs;
782                 get_cachetype_cp15();
783                 goto out;
784         }
785 #endif /* CPU_CORTEXA */
786
787 #if defined(CPU_MV_PJ4B)
788         if (cputype == CPU_ID_MV88SV581X_V7 ||
789             cputype == CPU_ID_MV88SV584X_V7 ||
790             cputype == CPU_ID_ARM_88SV581X_V7) {
791                 cpufuncs = pj4bv7_cpufuncs;
792                 get_cachetype_cp15();
793                 goto out;
794         }
795 #endif /* CPU_MV_PJ4B */
796
797 #if defined(CPU_FA526)
798         if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
799                 cpufuncs = fa526_cpufuncs;
800                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
801                 get_cachetype_cp15();
802                 pmap_pte_init_generic();
803
804                 goto out;
805         }
806 #endif  /* CPU_FA526 */
807
808 #if defined(CPU_XSCALE_81342)
809         if (cputype == CPU_ID_81342) {
810                 cpufuncs = xscalec3_cpufuncs;
811                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
812                 get_cachetype_cp15();
813                 pmap_pte_init_xscale();
814                 goto out;
815         }
816 #endif /* CPU_XSCALE_81342 */
817 #ifdef CPU_XSCALE_PXA2X0
818         /* ignore core revision to test PXA2xx CPUs */
819         if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
820             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
821             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
822
823                 cpufuncs = xscale_cpufuncs;
824                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
825                 get_cachetype_cp15();
826                 pmap_pte_init_xscale();
827
828                 goto out;
829         }
830 #endif /* CPU_XSCALE_PXA2X0 */
831 #ifdef CPU_XSCALE_IXP425
832         if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
833             cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
834
835                 cpufuncs = xscale_cpufuncs;
836                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
837                 get_cachetype_cp15();
838                 pmap_pte_init_xscale();
839
840                 goto out;
841         }
842 #endif /* CPU_XSCALE_IXP425 */
843         /*
844          * Bzzzz. And the answer was ...
845          */
846         panic("No support for this CPU type (%08x) in kernel", cputype);
847         return(ARCHITECTURE_NOT_PRESENT);
848 out:
849         uma_set_align(arm_dcache_align_mask);
850         return (0);
851 }
852
853 /*
854  * CPU Setup code
855  */
856
857 #ifdef CPU_ARM9
858 void
859 arm9_setup(void)
860 {
861         int cpuctrl, cpuctrlmask;
862
863         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
864             | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
865             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
866             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
867             CPU_CONTROL_ROUNDROBIN;
868         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
869                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
870                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
871                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
872                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
873                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
874                  | CPU_CONTROL_ROUNDROBIN;
875
876 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
877         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
878 #endif
879
880 #ifdef __ARMEB__
881         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
882 #endif
883         if (vector_page == ARM_VECTORS_HIGH)
884                 cpuctrl |= CPU_CONTROL_VECRELOC;
885
886         /* Clear out the cache */
887         cpu_idcache_wbinv_all();
888
889         /* Set the control register */
890         cpu_control(cpuctrlmask, cpuctrl);
891
892 }
893 #endif  /* CPU_ARM9 */
894
895 #if defined(CPU_ARM9E)
896 void
897 arm10_setup(void)
898 {
899         int cpuctrl, cpuctrlmask;
900
901         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
902             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
903             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
904         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
905             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
906             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
907             | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
908             | CPU_CONTROL_BPRD_ENABLE
909             | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
910
911 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
912         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
913 #endif
914
915 #ifdef __ARMEB__
916         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
917 #endif
918
919         /* Clear out the cache */
920         cpu_idcache_wbinv_all();
921
922         /* Now really make sure they are clean.  */
923         __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
924
925         if (vector_page == ARM_VECTORS_HIGH)
926                 cpuctrl |= CPU_CONTROL_VECRELOC;
927
928         /* Set the control register */
929         cpu_control(0xffffffff, cpuctrl);
930
931         /* And again. */
932         cpu_idcache_wbinv_all();
933 }
934 #endif  /* CPU_ARM9E || CPU_ARM10 */
935
936 #if defined(CPU_ARM1176) \
937  || defined(CPU_MV_PJ4B) \
938  || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
939 static __inline void
940 cpu_scc_setup_ccnt(void)
941 {
942 /* This is how you give userland access to the CCNT and PMCn
943  * registers.
944  * BEWARE! This gives write access also, which may not be what
945  * you want!
946  */
947 #ifdef _PMC_USER_READ_WRITE_
948         /* Set PMUSERENR[0] to allow userland access */
949         cp15_pmuserenr_set(1);
950 #endif
951 #if defined(CPU_ARM1176)
952         /* Set PMCR[2,0] to enable counters and reset CCNT */
953         cp15_pmcr_set(5);
954 #else
955         /* Set up the PMCCNTR register as a cyclecounter:
956          * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
957          * Set PMCR[2,0] to enable counters and reset CCNT
958          * Set PMCNTENSET to 0x80000000 to enable CCNT */
959         cp15_pminten_clr(0xFFFFFFFF);
960         cp15_pmcr_set(5);
961         cp15_pmcnten_set(0x80000000);
962 #endif
963 }
964 #endif
965
966 #if defined(CPU_ARM1176)
967 void
968 arm11x6_setup(void)
969 {
970         uint32_t auxctrl, auxctrl_wax;
971         uint32_t tmp, tmp2;
972         uint32_t cpuid;
973
974         cpuid = cpu_ident();
975
976         auxctrl = 0;
977         auxctrl_wax = ~0;
978
979         /*
980          * Enable an errata workaround
981          */
982         if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
983                 auxctrl = ARM1176_AUXCTL_PHD;
984                 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
985         }
986
987         tmp = cp15_actlr_get();
988         tmp2 = tmp;
989         tmp &= auxctrl_wax;
990         tmp |= auxctrl;
991         if (tmp != tmp2)
992                 cp15_actlr_set(tmp);
993
994         cpu_scc_setup_ccnt();
995 }
996 #endif  /* CPU_ARM1176 */
997
998 #ifdef CPU_MV_PJ4B
999 void
1000 pj4bv7_setup(void)
1001 {
1002
1003         pj4b_config();
1004         cpu_scc_setup_ccnt();
1005 }
1006 #endif /* CPU_MV_PJ4B */
1007
1008 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1009
1010 void
1011 cortexa_setup(void)
1012 {
1013
1014         cpu_scc_setup_ccnt();
1015 }
1016 #endif  /* CPU_CORTEXA */
1017
1018 #if defined(CPU_FA526)
1019 void
1020 fa526_setup(void)
1021 {
1022         int cpuctrl, cpuctrlmask;
1023
1024         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1025                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1026                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1027                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1028                 | CPU_CONTROL_BPRD_ENABLE;
1029         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1030                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1031                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1032                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1033                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1034                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1035                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1036
1037 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1038         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1039 #endif
1040
1041 #ifdef __ARMEB__
1042         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1043 #endif
1044
1045         if (vector_page == ARM_VECTORS_HIGH)
1046                 cpuctrl |= CPU_CONTROL_VECRELOC;
1047
1048         /* Clear out the cache */
1049         cpu_idcache_wbinv_all();
1050
1051         /* Set the control register */
1052         cpu_control(0xffffffff, cpuctrl);
1053 }
1054 #endif  /* CPU_FA526 */
1055
1056 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1057   defined(CPU_XSCALE_81342)
1058 void
1059 xscale_setup(void)
1060 {
1061         uint32_t auxctl;
1062         int cpuctrl, cpuctrlmask;
1063
1064         /*
1065          * The XScale Write Buffer is always enabled.  Our option
1066          * is to enable/disable coalescing.  Note that bits 6:3
1067          * must always be enabled.
1068          */
1069
1070         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1071                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1072                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1073                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1074                  | CPU_CONTROL_BPRD_ENABLE;
1075         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1076                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1077                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1078                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1079                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1080                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1081                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
1082                  CPU_CONTROL_L2_ENABLE;
1083
1084 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1085         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1086 #endif
1087
1088 #ifdef __ARMEB__
1089         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1090 #endif
1091
1092         if (vector_page == ARM_VECTORS_HIGH)
1093                 cpuctrl |= CPU_CONTROL_VECRELOC;
1094 #ifdef CPU_XSCALE_CORE3
1095         cpuctrl |= CPU_CONTROL_L2_ENABLE;
1096 #endif
1097
1098         /* Clear out the cache */
1099         cpu_idcache_wbinv_all();
1100
1101         /*
1102          * Set the control register.  Note that bits 6:3 must always
1103          * be set to 1.
1104          */
1105 /*      cpu_control(cpuctrlmask, cpuctrl);*/
1106         cpu_control(0xffffffff, cpuctrl);
1107
1108         /* Make sure write coalescing is turned on */
1109         __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1110                 : "=r" (auxctl));
1111 #ifdef XSCALE_NO_COALESCE_WRITES
1112         auxctl |= XSCALE_AUXCTL_K;
1113 #else
1114         auxctl &= ~XSCALE_AUXCTL_K;
1115 #endif
1116 #ifdef CPU_XSCALE_CORE3
1117         auxctl |= XSCALE_AUXCTL_LLR;
1118         auxctl |= XSCALE_AUXCTL_MD_MASK;
1119 #endif
1120         __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1121                 : : "r" (auxctl));
1122 }
1123 #endif  /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */