]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm/arm/cpufunc.c
MFV r302423:
[FreeBSD/FreeBSD.git] / sys / arm / arm / cpufunc.c
1 /*      $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
2
3 /*-
4  * arm9 support code Copyright (C) 2001 ARM Ltd
5  * Copyright (c) 1997 Mark Brinicombe.
6  * Copyright (c) 1997 Causality Limited
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by Causality Limited.
20  * 4. The name of Causality Limited may not be used to endorse or promote
21  *    products derived from this software without specific prior written
22  *    permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  * RiscBSD kernel project
37  *
38  * cpufuncs.c
39  *
40  * C functions for supporting CPU / MMU / TLB specific operations.
41  *
42  * Created      : 30/01/97
43  */
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/bus.h>
52 #include <machine/bus.h>
53 #include <machine/cpu.h>
54 #include <machine/disassem.h>
55
56 #include <vm/vm.h>
57 #include <vm/pmap.h>
58 #include <vm/uma.h>
59
60 #include <machine/cpuconf.h>
61 #include <machine/cpufunc.h>
62
63 #if defined(CPU_XSCALE_81342)
64 #include <arm/xscale/i8134x/i81342reg.h>
65 #endif
66
67 #ifdef CPU_XSCALE_IXP425
68 #include <arm/xscale/ixp425/ixp425reg.h>
69 #include <arm/xscale/ixp425/ixp425var.h>
70 #endif
71
72 /* PRIMARY CACHE VARIABLES */
73 int     arm_picache_size;
74 int     arm_picache_line_size;
75 int     arm_picache_ways;
76
77 int     arm_pdcache_size;       /* and unified */
78 int     arm_pdcache_line_size;
79 int     arm_pdcache_ways;
80
81 int     arm_pcache_type;
82 int     arm_pcache_unified;
83
84 int     arm_dcache_align;
85 int     arm_dcache_align_mask;
86
87 u_int   arm_cache_level;
88 u_int   arm_cache_type[14];
89 u_int   arm_cache_loc;
90
91 #ifdef CPU_ARM9
92 struct cpu_functions arm9_cpufuncs = {
93         /* CPU functions */
94
95         cpufunc_nullop,                 /* cpwait               */
96
97         /* MMU functions */
98
99         cpufunc_control,                /* control              */
100         arm9_setttb,                    /* Setttb               */
101
102         /* TLB functions */
103
104         armv4_tlb_flushID,              /* tlb_flushID          */
105         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
106         armv4_tlb_flushD,               /* tlb_flushD           */
107         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
108
109         /* Cache operations */
110
111         arm9_icache_sync_range,         /* icache_sync_range    */
112
113         arm9_dcache_wbinv_all,          /* dcache_wbinv_all     */
114         arm9_dcache_wbinv_range,        /* dcache_wbinv_range   */
115         arm9_dcache_inv_range,          /* dcache_inv_range     */
116         arm9_dcache_wb_range,           /* dcache_wb_range      */
117
118         armv4_idcache_inv_all,          /* idcache_inv_all      */
119         arm9_idcache_wbinv_all,         /* idcache_wbinv_all    */
120         arm9_idcache_wbinv_range,       /* idcache_wbinv_range  */
121         cpufunc_nullop,                 /* l2cache_wbinv_all    */
122         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
123         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
124         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
125         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
126
127         /* Other functions */
128
129         armv4_drain_writebuf,           /* drain_writebuf       */
130
131         (void *)cpufunc_nullop,         /* sleep                */
132
133         /* Soft functions */
134
135         arm9_context_switch,            /* context_switch       */
136
137         arm9_setup                      /* cpu setup            */
138
139 };
140 #endif /* CPU_ARM9 */
141
142 #if defined(CPU_ARM9E)
143 struct cpu_functions armv5_ec_cpufuncs = {
144         /* CPU functions */
145
146         cpufunc_nullop,                 /* cpwait               */
147
148         /* MMU functions */
149
150         cpufunc_control,                /* control              */
151         armv5_ec_setttb,                /* Setttb               */
152
153         /* TLB functions */
154
155         armv4_tlb_flushID,              /* tlb_flushID          */
156         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
157         armv4_tlb_flushD,               /* tlb_flushD           */
158         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
159
160         /* Cache operations */
161
162         armv5_ec_icache_sync_range,     /* icache_sync_range    */
163
164         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
165         armv5_ec_dcache_wbinv_range,    /* dcache_wbinv_range   */
166         armv5_ec_dcache_inv_range,      /* dcache_inv_range     */
167         armv5_ec_dcache_wb_range,       /* dcache_wb_range      */
168
169         armv4_idcache_inv_all,          /* idcache_inv_all      */
170         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
171         armv5_ec_idcache_wbinv_range,   /* idcache_wbinv_range  */
172
173         cpufunc_nullop,                 /* l2cache_wbinv_all    */
174         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
175         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
176         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
177         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
178
179         /* Other functions */
180
181         armv4_drain_writebuf,           /* drain_writebuf       */
182
183         (void *)cpufunc_nullop,         /* sleep                */
184
185         /* Soft functions */
186
187         arm9_context_switch,            /* context_switch       */
188
189         arm10_setup                     /* cpu setup            */
190
191 };
192
193 struct cpu_functions sheeva_cpufuncs = {
194         /* CPU functions */
195
196         cpufunc_nullop,                 /* cpwait               */
197
198         /* MMU functions */
199
200         cpufunc_control,                /* control              */
201         sheeva_setttb,                  /* Setttb               */
202
203         /* TLB functions */
204
205         armv4_tlb_flushID,              /* tlb_flushID          */
206         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
207         armv4_tlb_flushD,               /* tlb_flushD           */
208         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
209
210         /* Cache operations */
211
212         armv5_ec_icache_sync_range,     /* icache_sync_range    */
213
214         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
215         sheeva_dcache_wbinv_range,      /* dcache_wbinv_range   */
216         sheeva_dcache_inv_range,        /* dcache_inv_range     */
217         sheeva_dcache_wb_range,         /* dcache_wb_range      */
218
219         armv4_idcache_inv_all,          /* idcache_inv_all      */
220         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
221         sheeva_idcache_wbinv_range,     /* idcache_wbinv_all    */
222
223         sheeva_l2cache_wbinv_all,       /* l2cache_wbinv_all    */
224         sheeva_l2cache_wbinv_range,     /* l2cache_wbinv_range  */
225         sheeva_l2cache_inv_range,       /* l2cache_inv_range    */
226         sheeva_l2cache_wb_range,        /* l2cache_wb_range     */
227         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
228
229         /* Other functions */
230
231         armv4_drain_writebuf,           /* drain_writebuf       */
232
233         sheeva_cpu_sleep,               /* sleep                */
234
235         /* Soft functions */
236
237         arm9_context_switch,            /* context_switch       */
238
239         arm10_setup                     /* cpu setup            */
240 };
241 #endif /* CPU_ARM9E */
242
243 #ifdef CPU_MV_PJ4B
244 struct cpu_functions pj4bv7_cpufuncs = {
245         /* CPU functions */
246
247         armv7_drain_writebuf,           /* cpwait               */
248
249         /* MMU functions */
250
251         cpufunc_control,                /* control              */
252         armv7_setttb,                   /* Setttb               */
253
254         /* TLB functions */
255
256         armv7_tlb_flushID,              /* tlb_flushID          */
257         armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
258         armv7_tlb_flushID,              /* tlb_flushD           */
259         armv7_tlb_flushID_SE,           /* tlb_flushD_SE        */
260
261         /* Cache operations */
262         armv7_icache_sync_range,        /* icache_sync_range    */
263
264         armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
265         armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
266         armv7_dcache_inv_range,         /* dcache_inv_range     */
267         armv7_dcache_wb_range,          /* dcache_wb_range      */
268
269         armv7_idcache_inv_all,          /* idcache_inv_all      */
270         armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
271         armv7_idcache_wbinv_range,      /* idcache_wbinv_all    */
272
273         (void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
274         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
275         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
276         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
277         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
278
279         /* Other functions */
280
281         armv7_drain_writebuf,           /* drain_writebuf       */
282
283         (void *)cpufunc_nullop,         /* sleep                */
284
285         /* Soft functions */
286         armv7_context_switch,           /* context_switch       */
287
288         pj4bv7_setup                    /* cpu setup            */
289 };
290 #endif /* CPU_MV_PJ4B */
291
292 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
293
294 struct cpu_functions xscale_cpufuncs = {
295         /* CPU functions */
296
297         xscale_cpwait,                  /* cpwait               */
298
299         /* MMU functions */
300
301         xscale_control,                 /* control              */
302         xscale_setttb,                  /* setttb               */
303
304         /* TLB functions */
305
306         armv4_tlb_flushID,              /* tlb_flushID          */
307         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
308         armv4_tlb_flushD,               /* tlb_flushD           */
309         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
310
311         /* Cache operations */
312
313         xscale_cache_syncI_rng,         /* icache_sync_range    */
314
315         xscale_cache_purgeD,            /* dcache_wbinv_all     */
316         xscale_cache_purgeD_rng,        /* dcache_wbinv_range   */
317         xscale_cache_flushD_rng,        /* dcache_inv_range     */
318         xscale_cache_cleanD_rng,        /* dcache_wb_range      */
319
320         xscale_cache_flushID,           /* idcache_inv_all      */
321         xscale_cache_purgeID,           /* idcache_wbinv_all    */
322         xscale_cache_purgeID_rng,       /* idcache_wbinv_range  */
323         cpufunc_nullop,                 /* l2cache_wbinv_all    */
324         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
325         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
326         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
327         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
328
329         /* Other functions */
330
331         armv4_drain_writebuf,           /* drain_writebuf       */
332
333         xscale_cpu_sleep,               /* sleep                */
334
335         /* Soft functions */
336
337         xscale_context_switch,          /* context_switch       */
338
339         xscale_setup                    /* cpu setup            */
340 };
341 #endif
342 /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
343
344 #ifdef CPU_XSCALE_81342
345 struct cpu_functions xscalec3_cpufuncs = {
346         /* CPU functions */
347
348         xscale_cpwait,                  /* cpwait               */
349
350         /* MMU functions */
351
352         xscale_control,                 /* control              */
353         xscalec3_setttb,                /* setttb               */
354
355         /* TLB functions */
356
357         armv4_tlb_flushID,              /* tlb_flushID          */
358         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
359         armv4_tlb_flushD,               /* tlb_flushD           */
360         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
361
362         /* Cache operations */
363
364         xscalec3_cache_syncI_rng,       /* icache_sync_range    */
365
366         xscalec3_cache_purgeD,          /* dcache_wbinv_all     */
367         xscalec3_cache_purgeD_rng,      /* dcache_wbinv_range   */
368         xscale_cache_flushD_rng,        /* dcache_inv_range     */
369         xscalec3_cache_cleanD_rng,      /* dcache_wb_range      */
370
371         xscale_cache_flushID,           /* idcache_inv_all      */
372         xscalec3_cache_purgeID,         /* idcache_wbinv_all    */
373         xscalec3_cache_purgeID_rng,     /* idcache_wbinv_range  */
374         xscalec3_l2cache_purge,         /* l2cache_wbinv_all    */
375         xscalec3_l2cache_purge_rng,     /* l2cache_wbinv_range  */
376         xscalec3_l2cache_flush_rng,     /* l2cache_inv_range    */
377         xscalec3_l2cache_clean_rng,     /* l2cache_wb_range     */
378         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
379
380         /* Other functions */
381
382         armv4_drain_writebuf,           /* drain_writebuf       */
383
384         xscale_cpu_sleep,               /* sleep                */
385
386         /* Soft functions */
387
388         xscalec3_context_switch,        /* context_switch       */
389
390         xscale_setup                    /* cpu setup            */
391 };
392 #endif /* CPU_XSCALE_81342 */
393
394
395 #if defined(CPU_FA526)
396 struct cpu_functions fa526_cpufuncs = {
397         /* CPU functions */
398
399         cpufunc_nullop,                 /* cpwait               */
400
401         /* MMU functions */
402
403         cpufunc_control,                /* control              */
404         fa526_setttb,                   /* setttb               */
405
406         /* TLB functions */
407
408         armv4_tlb_flushID,              /* tlb_flushID          */
409         fa526_tlb_flushID_SE,           /* tlb_flushID_SE       */
410         armv4_tlb_flushD,               /* tlb_flushD           */
411         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
412
413         /* Cache operations */
414
415         fa526_icache_sync_range,        /* icache_sync_range    */
416
417         fa526_dcache_wbinv_all,         /* dcache_wbinv_all     */
418         fa526_dcache_wbinv_range,       /* dcache_wbinv_range   */
419         fa526_dcache_inv_range,         /* dcache_inv_range     */
420         fa526_dcache_wb_range,          /* dcache_wb_range      */
421
422         armv4_idcache_inv_all,          /* idcache_inv_all      */
423         fa526_idcache_wbinv_all,        /* idcache_wbinv_all    */
424         fa526_idcache_wbinv_range,      /* idcache_wbinv_range  */
425         cpufunc_nullop,                 /* l2cache_wbinv_all    */
426         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
427         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
428         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
429         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
430
431         /* Other functions */
432
433         armv4_drain_writebuf,           /* drain_writebuf       */
434
435         fa526_cpu_sleep,                /* sleep                */
436
437         /* Soft functions */
438
439
440         fa526_context_switch,           /* context_switch       */
441
442         fa526_setup                     /* cpu setup            */
443 };
444 #endif  /* CPU_FA526 */
445
446 #if defined(CPU_ARM1176)
447 struct cpu_functions arm1176_cpufuncs = {
448         /* CPU functions */
449
450         cpufunc_nullop,                 /* cpwait               */
451
452         /* MMU functions */
453
454         cpufunc_control,                /* control              */
455         arm11x6_setttb,                 /* Setttb               */
456
457         /* TLB functions */
458
459         arm11_tlb_flushID,              /* tlb_flushID          */
460         arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
461         arm11_tlb_flushD,               /* tlb_flushD           */
462         arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
463
464         /* Cache operations */
465
466         arm11x6_icache_sync_range,      /* icache_sync_range    */
467
468         arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
469         armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
470         armv6_dcache_inv_range,         /* dcache_inv_range     */
471         armv6_dcache_wb_range,          /* dcache_wb_range      */
472
473         armv6_idcache_inv_all,          /* idcache_inv_all      */
474         arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
475         arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
476
477         (void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
478         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
479         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
480         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
481         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
482
483         /* Other functions */
484
485         arm11_drain_writebuf,           /* drain_writebuf       */
486
487         arm11x6_sleep,                  /* sleep                */
488
489         /* Soft functions */
490
491         arm11_context_switch,           /* context_switch       */
492
493         arm11x6_setup                   /* cpu setup            */
494 };
495 #endif /*CPU_ARM1176 */
496
497 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
498 struct cpu_functions cortexa_cpufuncs = {
499         /* CPU functions */
500
501         cpufunc_nullop,                 /* cpwait               */
502
503         /* MMU functions */
504
505         cpufunc_control,                /* control              */
506         armv7_setttb,                   /* Setttb               */
507
508         /*
509          * TLB functions.  ARMv7 does all TLB ops based on a unified TLB model
510          * whether the hardware implements separate I+D or not, so we use the
511          * same 'ID' functions for all 3 variations.
512          */
513
514         armv7_tlb_flushID,              /* tlb_flushID          */
515         armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
516         armv7_tlb_flushID,              /* tlb_flushD           */
517         armv7_tlb_flushID_SE,           /* tlb_flushD_SE        */
518
519         /* Cache operations */
520
521         armv7_icache_sync_range,        /* icache_sync_range    */
522
523         armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
524         armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
525         armv7_dcache_inv_range,         /* dcache_inv_range     */
526         armv7_dcache_wb_range,          /* dcache_wb_range      */
527
528         armv7_idcache_inv_all,          /* idcache_inv_all      */
529         armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
530         armv7_idcache_wbinv_range,      /* idcache_wbinv_range  */
531
532         /*
533          * Note: For CPUs using the PL310 the L2 ops are filled in when the
534          * L2 cache controller is actually enabled.
535          */
536         cpufunc_nullop,                 /* l2cache_wbinv_all    */
537         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
538         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
539         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
540         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
541
542         /* Other functions */
543
544         armv7_drain_writebuf,           /* drain_writebuf       */
545
546         armv7_cpu_sleep,                /* sleep                */
547
548         /* Soft functions */
549
550         armv7_context_switch,           /* context_switch       */
551
552         cortexa_setup                     /* cpu setup            */
553 };
554 #endif /* CPU_CORTEXA */
555
556 /*
557  * Global constants also used by locore.s
558  */
559
560 struct cpu_functions cpufuncs;
561 u_int cputype;
562 #if __ARM_ARCH <= 5
563 u_int cpu_reset_needs_v4_MMU_disable;   /* flag used in locore-v4.s */
564 #endif
565
566 #if defined(CPU_ARM9) ||        \
567   defined (CPU_ARM9E) ||        \
568   defined(CPU_ARM1176) ||       \
569   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||           \
570   defined(CPU_FA526) || defined(CPU_MV_PJ4B) ||                 \
571   defined(CPU_XSCALE_81342) || \
572   defined(CPU_CORTEXA) || defined(CPU_KRAIT)
573
574 /* Global cache line sizes, use 32 as default */
575 int     arm_dcache_min_line_size = 32;
576 int     arm_icache_min_line_size = 32;
577 int     arm_idcache_min_line_size = 32;
578
579 static void get_cachetype_cp15(void);
580
581 /* Additional cache information local to this file.  Log2 of some of the
582    above numbers.  */
583 static int      arm_dcache_l2_nsets;
584 static int      arm_dcache_l2_assoc;
585 static int      arm_dcache_l2_linesize;
586
587 static void
588 get_cachetype_cp15()
589 {
590         u_int ctype, isize, dsize, cpuid;
591         u_int clevel, csize, i, sel;
592         u_int multiplier;
593         u_char type;
594
595         __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
596                 : "=r" (ctype));
597
598         cpuid = cpu_ident();
599         /*
600          * ...and thus spake the ARM ARM:
601          *
602          * If an <opcode2> value corresponding to an unimplemented or
603          * reserved ID register is encountered, the System Control
604          * processor returns the value of the main ID register.
605          */
606         if (ctype == cpuid)
607                 goto out;
608
609         if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
610                 /* Resolve minimal cache line sizes */
611                 arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
612                 arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
613                 arm_idcache_min_line_size =
614                     min(arm_icache_min_line_size, arm_dcache_min_line_size);
615
616                 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
617                     : "=r" (clevel));
618                 arm_cache_level = clevel;
619                 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
620                 i = 0;
621                 while ((type = (clevel & 0x7)) && i < 7) {
622                         if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
623                             type == CACHE_SEP_CACHE) {
624                                 sel = i << 1;
625                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
626                                     : : "r" (sel));
627                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
628                                     : "=r" (csize));
629                                 arm_cache_type[sel] = csize;
630                                 arm_dcache_align = 1 <<
631                                     (CPUV7_CT_xSIZE_LEN(csize) + 4);
632                                 arm_dcache_align_mask = arm_dcache_align - 1;
633                         }
634                         if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
635                                 sel = (i << 1) | 1;
636                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
637                                     : : "r" (sel));
638                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
639                                     : "=r" (csize));
640                                 arm_cache_type[sel] = csize;
641                         }
642                         i++;
643                         clevel >>= 3;
644                 }
645         } else {
646                 if ((ctype & CPU_CT_S) == 0)
647                         arm_pcache_unified = 1;
648
649                 /*
650                  * If you want to know how this code works, go read the ARM ARM.
651                  */
652
653                 arm_pcache_type = CPU_CT_CTYPE(ctype);
654
655                 if (arm_pcache_unified == 0) {
656                         isize = CPU_CT_ISIZE(ctype);
657                         multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
658                         arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
659                         if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
660                                 if (isize & CPU_CT_xSIZE_M)
661                                         arm_picache_line_size = 0; /* not present */
662                                 else
663                                         arm_picache_ways = 1;
664                         } else {
665                                 arm_picache_ways = multiplier <<
666                                     (CPU_CT_xSIZE_ASSOC(isize) - 1);
667                         }
668                         arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
669                 }
670
671                 dsize = CPU_CT_DSIZE(ctype);
672                 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
673                 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
674                 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
675                         if (dsize & CPU_CT_xSIZE_M)
676                                 arm_pdcache_line_size = 0; /* not present */
677                         else
678                                 arm_pdcache_ways = 1;
679                 } else {
680                         arm_pdcache_ways = multiplier <<
681                             (CPU_CT_xSIZE_ASSOC(dsize) - 1);
682                 }
683                 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
684
685                 arm_dcache_align = arm_pdcache_line_size;
686
687                 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
688                 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
689                 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
690                     CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
691
692         out:
693                 arm_dcache_align_mask = arm_dcache_align - 1;
694         }
695 }
696 #endif /* ARM9 || XSCALE */
697
698 /*
699  * Cannot panic here as we may not have a console yet ...
700  */
701
702 int
703 set_cpufuncs()
704 {
705         cputype = cpu_ident();
706         cputype &= CPU_ID_CPU_MASK;
707
708 #ifdef CPU_ARM9
709         if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
710              (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
711             (cputype & 0x0000f000) == 0x00009000) {
712                 cpufuncs = arm9_cpufuncs;
713                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
714                 get_cachetype_cp15();
715                 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
716                 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
717                     arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
718                 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
719                 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
720                 pmap_pte_init_generic();
721                 goto out;
722         }
723 #endif /* CPU_ARM9 */
724 #if defined(CPU_ARM9E)
725         if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
726             cputype == CPU_ID_MV88FR571_41) {
727                 uint32_t sheeva_ctrl;
728
729                 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
730                     MV_L2_ENABLE);
731                 /*
732                  * Workaround for Marvell MV78100 CPU: Cache prefetch
733                  * mechanism may affect the cache coherency validity,
734                  * so it needs to be disabled.
735                  *
736                  * Refer to errata document MV-S501058-00C.pdf (p. 3.1
737                  * L2 Prefetching Mechanism) for details.
738                  */
739                 if (cputype == CPU_ID_MV88FR571_VD ||
740                     cputype == CPU_ID_MV88FR571_41)
741                         sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
742
743                 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
744
745                 cpufuncs = sheeva_cpufuncs;
746                 get_cachetype_cp15();
747                 pmap_pte_init_generic();
748                 goto out;
749         } else if (cputype == CPU_ID_ARM926EJS) {
750                 cpufuncs = armv5_ec_cpufuncs;
751                 get_cachetype_cp15();
752                 pmap_pte_init_generic();
753                 goto out;
754         }
755 #endif /* CPU_ARM9E */
756 #if defined(CPU_ARM1176)
757         if (cputype == CPU_ID_ARM1176JZS) {
758                 cpufuncs = arm1176_cpufuncs;
759                 get_cachetype_cp15();
760                 goto out;
761         }
762 #endif /* CPU_ARM1176 */
763 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
764         switch(cputype & CPU_ID_SCHEME_MASK) {
765         case CPU_ID_CORTEXA5:
766         case CPU_ID_CORTEXA7:
767         case CPU_ID_CORTEXA8:
768         case CPU_ID_CORTEXA9:
769         case CPU_ID_CORTEXA12:
770         case CPU_ID_CORTEXA15:
771         case CPU_ID_KRAIT300:
772                 cpufuncs = cortexa_cpufuncs;
773                 get_cachetype_cp15();
774                 goto out;
775         default:
776                 break;
777         }
778 #endif /* CPU_CORTEXA */
779
780 #if defined(CPU_MV_PJ4B)
781         if (cputype == CPU_ID_MV88SV581X_V7 ||
782             cputype == CPU_ID_MV88SV584X_V7 ||
783             cputype == CPU_ID_ARM_88SV581X_V7) {
784                 cpufuncs = pj4bv7_cpufuncs;
785                 get_cachetype_cp15();
786                 goto out;
787         }
788 #endif /* CPU_MV_PJ4B */
789
790 #if defined(CPU_FA526)
791         if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
792                 cpufuncs = fa526_cpufuncs;
793                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
794                 get_cachetype_cp15();
795                 pmap_pte_init_generic();
796
797                 goto out;
798         }
799 #endif  /* CPU_FA526 */
800
801 #if defined(CPU_XSCALE_81342)
802         if (cputype == CPU_ID_81342) {
803                 cpufuncs = xscalec3_cpufuncs;
804                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
805                 get_cachetype_cp15();
806                 pmap_pte_init_xscale();
807                 goto out;
808         }
809 #endif /* CPU_XSCALE_81342 */
810 #ifdef CPU_XSCALE_PXA2X0
811         /* ignore core revision to test PXA2xx CPUs */
812         if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
813             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
814             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
815
816                 cpufuncs = xscale_cpufuncs;
817                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
818                 get_cachetype_cp15();
819                 pmap_pte_init_xscale();
820
821                 goto out;
822         }
823 #endif /* CPU_XSCALE_PXA2X0 */
824 #ifdef CPU_XSCALE_IXP425
825         if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
826             cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
827
828                 cpufuncs = xscale_cpufuncs;
829                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
830                 get_cachetype_cp15();
831                 pmap_pte_init_xscale();
832
833                 goto out;
834         }
835 #endif /* CPU_XSCALE_IXP425 */
836         /*
837          * Bzzzz. And the answer was ...
838          */
839         panic("No support for this CPU type (%08x) in kernel", cputype);
840         return(ARCHITECTURE_NOT_PRESENT);
841 out:
842         uma_set_align(arm_dcache_align_mask);
843         return (0);
844 }
845
846 /*
847  * CPU Setup code
848  */
849
850 #ifdef CPU_ARM9
851 void
852 arm9_setup(void)
853 {
854         int cpuctrl, cpuctrlmask;
855
856         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
857             | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
858             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
859             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
860             CPU_CONTROL_ROUNDROBIN;
861         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
862                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
863                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
864                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
865                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
866                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
867                  | CPU_CONTROL_ROUNDROBIN;
868
869 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
870         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
871 #endif
872
873 #ifdef __ARMEB__
874         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
875 #endif
876         if (vector_page == ARM_VECTORS_HIGH)
877                 cpuctrl |= CPU_CONTROL_VECRELOC;
878
879         /* Clear out the cache */
880         cpu_idcache_wbinv_all();
881
882         /* Set the control register (SCTLR)   */
883         cpu_control(cpuctrlmask, cpuctrl);
884
885 }
886 #endif  /* CPU_ARM9 */
887
888 #if defined(CPU_ARM9E)
889 void
890 arm10_setup(void)
891 {
892         int cpuctrl, cpuctrlmask;
893
894         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
895             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
896             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
897         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
898             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
899             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
900             | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
901             | CPU_CONTROL_BPRD_ENABLE
902             | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
903
904 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
905         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
906 #endif
907
908 #ifdef __ARMEB__
909         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
910 #endif
911
912         /* Clear out the cache */
913         cpu_idcache_wbinv_all();
914
915         /* Now really make sure they are clean.  */
916         __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
917
918         if (vector_page == ARM_VECTORS_HIGH)
919                 cpuctrl |= CPU_CONTROL_VECRELOC;
920
921         /* Set the control register */
922         cpu_control(0xffffffff, cpuctrl);
923
924         /* And again. */
925         cpu_idcache_wbinv_all();
926 }
927 #endif  /* CPU_ARM9E || CPU_ARM10 */
928
929 #if defined(CPU_ARM1176) \
930  || defined(CPU_MV_PJ4B) \
931  || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
932 static __inline void
933 cpu_scc_setup_ccnt(void)
934 {
935 /* This is how you give userland access to the CCNT and PMCn
936  * registers.
937  * BEWARE! This gives write access also, which may not be what
938  * you want!
939  */
940 #ifdef _PMC_USER_READ_WRITE_
941         /* Set PMUSERENR[0] to allow userland access */
942         cp15_pmuserenr_set(1);
943 #endif
944 #if defined(CPU_ARM1176)
945         /* Set PMCR[2,0] to enable counters and reset CCNT */
946         cp15_pmcr_set(5);
947 #else
948         /* Set up the PMCCNTR register as a cyclecounter:
949          * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
950          * Set PMCR[2,0] to enable counters and reset CCNT
951          * Set PMCNTENSET to 0x80000000 to enable CCNT */
952         cp15_pminten_clr(0xFFFFFFFF);
953         cp15_pmcr_set(5);
954         cp15_pmcnten_set(0x80000000);
955 #endif
956 }
957 #endif
958
959 #if defined(CPU_ARM1176)
960 void
961 arm11x6_setup(void)
962 {
963         uint32_t auxctrl, auxctrl_wax;
964         uint32_t tmp, tmp2;
965         uint32_t cpuid;
966
967         cpuid = cpu_ident();
968
969         auxctrl = 0;
970         auxctrl_wax = ~0;
971
972         /*
973          * Enable an errata workaround
974          */
975         if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
976                 auxctrl = ARM1176_AUXCTL_PHD;
977                 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
978         }
979
980         tmp = cp15_actlr_get();
981         tmp2 = tmp;
982         tmp &= auxctrl_wax;
983         tmp |= auxctrl;
984         if (tmp != tmp2)
985                 cp15_actlr_set(tmp);
986
987         cpu_scc_setup_ccnt();
988 }
989 #endif  /* CPU_ARM1176 */
990
991 #ifdef CPU_MV_PJ4B
992 void
993 pj4bv7_setup(void)
994 {
995
996         pj4b_config();
997         cpu_scc_setup_ccnt();
998 }
999 #endif /* CPU_MV_PJ4B */
1000
1001 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1002
1003 void
1004 cortexa_setup(void)
1005 {
1006
1007         cpu_scc_setup_ccnt();
1008 }
1009 #endif  /* CPU_CORTEXA */
1010
1011 #if defined(CPU_FA526)
1012 void
1013 fa526_setup(void)
1014 {
1015         int cpuctrl, cpuctrlmask;
1016
1017         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1018                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1019                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1020                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1021                 | CPU_CONTROL_BPRD_ENABLE;
1022         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1023                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1024                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1025                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1026                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1027                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1028                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1029
1030 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1031         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1032 #endif
1033
1034 #ifdef __ARMEB__
1035         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1036 #endif
1037
1038         if (vector_page == ARM_VECTORS_HIGH)
1039                 cpuctrl |= CPU_CONTROL_VECRELOC;
1040
1041         /* Clear out the cache */
1042         cpu_idcache_wbinv_all();
1043
1044         /* Set the control register */
1045         cpu_control(0xffffffff, cpuctrl);
1046 }
1047 #endif  /* CPU_FA526 */
1048
1049 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1050   defined(CPU_XSCALE_81342)
1051 void
1052 xscale_setup(void)
1053 {
1054         uint32_t auxctl;
1055         int cpuctrl, cpuctrlmask;
1056
1057         /*
1058          * The XScale Write Buffer is always enabled.  Our option
1059          * is to enable/disable coalescing.  Note that bits 6:3
1060          * must always be enabled.
1061          */
1062
1063         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1064                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1065                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1066                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1067                  | CPU_CONTROL_BPRD_ENABLE;
1068         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1069                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1070                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1071                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1072                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1073                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1074                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
1075                  CPU_CONTROL_L2_ENABLE;
1076
1077 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1078         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1079 #endif
1080
1081 #ifdef __ARMEB__
1082         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1083 #endif
1084
1085         if (vector_page == ARM_VECTORS_HIGH)
1086                 cpuctrl |= CPU_CONTROL_VECRELOC;
1087 #ifdef CPU_XSCALE_CORE3
1088         cpuctrl |= CPU_CONTROL_L2_ENABLE;
1089 #endif
1090
1091         /* Clear out the cache */
1092         cpu_idcache_wbinv_all();
1093
1094         /*
1095          * Set the control register.  Note that bits 6:3 must always
1096          * be set to 1.
1097          */
1098 /*      cpu_control(cpuctrlmask, cpuctrl);*/
1099         cpu_control(0xffffffff, cpuctrl);
1100
1101         /* Make sure write coalescing is turned on */
1102         __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1103                 : "=r" (auxctl));
1104 #ifdef XSCALE_NO_COALESCE_WRITES
1105         auxctl |= XSCALE_AUXCTL_K;
1106 #else
1107         auxctl &= ~XSCALE_AUXCTL_K;
1108 #endif
1109 #ifdef CPU_XSCALE_CORE3
1110         auxctl |= XSCALE_AUXCTL_LLR;
1111         auxctl |= XSCALE_AUXCTL_MD_MASK;
1112 #endif
1113         __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1114                 : : "r" (auxctl));
1115 }
1116 #endif  /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */