]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm/arm/cpufunc.c
Resolve cache line size from CP15
[FreeBSD/FreeBSD.git] / sys / arm / arm / cpufunc.c
1 /*      $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
2
3 /*-
4  * arm9 support code Copyright (C) 2001 ARM Ltd
5  * Copyright (c) 1997 Mark Brinicombe.
6  * Copyright (c) 1997 Causality Limited
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by Causality Limited.
20  * 4. The name of Causality Limited may not be used to endorse or promote
21  *    products derived from this software without specific prior written
22  *    permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  * RiscBSD kernel project
37  *
38  * cpufuncs.c
39  *
40  * C functions for supporting CPU / MMU / TLB specific operations.
41  *
42  * Created      : 30/01/97
43  */
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/bus.h>
52 #include <machine/bus.h>
53 #include <machine/cpu.h>
54 #include <machine/disassem.h>
55
56 #include <vm/vm.h>
57 #include <vm/pmap.h>
58 #include <vm/uma.h>
59
60 #include <machine/cpuconf.h>
61 #include <machine/cpufunc.h>
62 #include <machine/bootconfig.h>
63
64 #ifdef CPU_XSCALE_80200
65 #include <arm/xscale/i80200/i80200reg.h>
66 #include <arm/xscale/i80200/i80200var.h>
67 #endif
68
69 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
70 #include <arm/xscale/i80321/i80321reg.h>
71 #include <arm/xscale/i80321/i80321var.h>
72 #endif
73
74 /*
75  * Some definitions in i81342reg.h clash with i80321reg.h.
76  * This only happens for the LINT kernel. As it happens,
77  * we don't need anything from i81342reg.h that we already
78  * got from somewhere else during a LINT compile.
79  */
80 #if defined(CPU_XSCALE_81342) && !defined(COMPILING_LINT)
81 #include <arm/xscale/i8134x/i81342reg.h>
82 #endif
83
84 #ifdef CPU_XSCALE_IXP425
85 #include <arm/xscale/ixp425/ixp425reg.h>
86 #include <arm/xscale/ixp425/ixp425var.h>
87 #endif
88
89 /* PRIMARY CACHE VARIABLES */
90 int     arm_picache_size;
91 int     arm_picache_line_size;
92 int     arm_picache_ways;
93
94 int     arm_pdcache_size;       /* and unified */
95 int     arm_pdcache_line_size;
96 int     arm_pdcache_ways;
97
98 int     arm_pcache_type;
99 int     arm_pcache_unified;
100
101 int     arm_dcache_align;
102 int     arm_dcache_align_mask;
103
104 u_int   arm_cache_level;
105 u_int   arm_cache_type[14];
106 u_int   arm_cache_loc;
107
108 /* 1 == use cpu_sleep(), 0 == don't */
109 int cpu_do_powersave;
110 int ctrl;
111
112 #ifdef CPU_ARM9
113 struct cpu_functions arm9_cpufuncs = {
114         /* CPU functions */
115
116         cpufunc_id,                     /* id                   */
117         cpufunc_nullop,                 /* cpwait               */
118
119         /* MMU functions */
120
121         cpufunc_control,                /* control              */
122         cpufunc_domains,                /* Domain               */
123         arm9_setttb,                    /* Setttb               */
124         cpufunc_faultstatus,            /* Faultstatus          */
125         cpufunc_faultaddress,           /* Faultaddress         */
126
127         /* TLB functions */
128
129         armv4_tlb_flushID,              /* tlb_flushID          */
130         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
131         armv4_tlb_flushI,               /* tlb_flushI           */
132         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
133         armv4_tlb_flushD,               /* tlb_flushD           */
134         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
135
136         /* Cache operations */
137
138         arm9_icache_sync_all,           /* icache_sync_all      */
139         arm9_icache_sync_range,         /* icache_sync_range    */
140
141         arm9_dcache_wbinv_all,          /* dcache_wbinv_all     */
142         arm9_dcache_wbinv_range,        /* dcache_wbinv_range   */
143         arm9_dcache_inv_range,          /* dcache_inv_range     */
144         arm9_dcache_wb_range,           /* dcache_wb_range      */
145
146         armv4_idcache_inv_all,          /* idcache_inv_all      */
147         arm9_idcache_wbinv_all,         /* idcache_wbinv_all    */
148         arm9_idcache_wbinv_range,       /* idcache_wbinv_range  */
149         cpufunc_nullop,                 /* l2cache_wbinv_all    */
150         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
151         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
152         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
153         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
154
155         /* Other functions */
156
157         cpufunc_nullop,                 /* flush_prefetchbuf    */
158         armv4_drain_writebuf,           /* drain_writebuf       */
159         cpufunc_nullop,                 /* flush_brnchtgt_C     */
160         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
161
162         (void *)cpufunc_nullop,         /* sleep                */
163
164         /* Soft functions */
165
166         cpufunc_null_fixup,             /* dataabt_fixup        */
167         cpufunc_null_fixup,             /* prefetchabt_fixup    */
168
169         arm9_context_switch,            /* context_switch       */
170
171         arm9_setup                      /* cpu setup            */
172
173 };
174 #endif /* CPU_ARM9 */
175
176 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
177 struct cpu_functions armv5_ec_cpufuncs = {
178         /* CPU functions */
179
180         cpufunc_id,                     /* id                   */
181         cpufunc_nullop,                 /* cpwait               */
182
183         /* MMU functions */
184
185         cpufunc_control,                /* control              */
186         cpufunc_domains,                /* Domain               */
187         armv5_ec_setttb,                /* Setttb               */
188         cpufunc_faultstatus,            /* Faultstatus          */
189         cpufunc_faultaddress,           /* Faultaddress         */
190
191         /* TLB functions */
192
193         armv4_tlb_flushID,              /* tlb_flushID          */
194         arm10_tlb_flushID_SE,           /* tlb_flushID_SE       */
195         armv4_tlb_flushI,               /* tlb_flushI           */
196         arm10_tlb_flushI_SE,            /* tlb_flushI_SE        */
197         armv4_tlb_flushD,               /* tlb_flushD           */
198         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
199
200         /* Cache operations */
201
202         armv5_ec_icache_sync_all,       /* icache_sync_all      */
203         armv5_ec_icache_sync_range,     /* icache_sync_range    */
204
205         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
206         armv5_ec_dcache_wbinv_range,    /* dcache_wbinv_range   */
207         armv5_ec_dcache_inv_range,      /* dcache_inv_range     */
208         armv5_ec_dcache_wb_range,       /* dcache_wb_range      */
209
210         armv4_idcache_inv_all,          /* idcache_inv_all      */
211         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
212         armv5_ec_idcache_wbinv_range,   /* idcache_wbinv_range  */
213
214         cpufunc_nullop,                 /* l2cache_wbinv_all    */
215         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
216         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
217         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
218         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
219
220         /* Other functions */
221
222         cpufunc_nullop,                 /* flush_prefetchbuf    */
223         armv4_drain_writebuf,           /* drain_writebuf       */
224         cpufunc_nullop,                 /* flush_brnchtgt_C     */
225         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
226
227         (void *)cpufunc_nullop,         /* sleep                */
228
229         /* Soft functions */
230
231         cpufunc_null_fixup,             /* dataabt_fixup        */
232         cpufunc_null_fixup,             /* prefetchabt_fixup    */
233
234         arm10_context_switch,           /* context_switch       */
235
236         arm10_setup                     /* cpu setup            */
237
238 };
239
240 struct cpu_functions sheeva_cpufuncs = {
241         /* CPU functions */
242
243         cpufunc_id,                     /* id                   */
244         cpufunc_nullop,                 /* cpwait               */
245
246         /* MMU functions */
247
248         cpufunc_control,                /* control              */
249         cpufunc_domains,                /* Domain               */
250         sheeva_setttb,                  /* Setttb               */
251         cpufunc_faultstatus,            /* Faultstatus          */
252         cpufunc_faultaddress,           /* Faultaddress         */
253
254         /* TLB functions */
255
256         armv4_tlb_flushID,              /* tlb_flushID          */
257         arm10_tlb_flushID_SE,           /* tlb_flushID_SE       */
258         armv4_tlb_flushI,               /* tlb_flushI           */
259         arm10_tlb_flushI_SE,            /* tlb_flushI_SE        */
260         armv4_tlb_flushD,               /* tlb_flushD           */
261         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
262
263         /* Cache operations */
264
265         armv5_ec_icache_sync_all,       /* icache_sync_all      */
266         armv5_ec_icache_sync_range,     /* icache_sync_range    */
267
268         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
269         sheeva_dcache_wbinv_range,      /* dcache_wbinv_range   */
270         sheeva_dcache_inv_range,        /* dcache_inv_range     */
271         sheeva_dcache_wb_range,         /* dcache_wb_range      */
272
273         armv4_idcache_inv_all,          /* idcache_inv_all      */
274         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
275         sheeva_idcache_wbinv_range,     /* idcache_wbinv_all    */
276
277         sheeva_l2cache_wbinv_all,       /* l2cache_wbinv_all    */
278         sheeva_l2cache_wbinv_range,     /* l2cache_wbinv_range  */
279         sheeva_l2cache_inv_range,       /* l2cache_inv_range    */
280         sheeva_l2cache_wb_range,        /* l2cache_wb_range     */
281         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
282
283         /* Other functions */
284
285         cpufunc_nullop,                 /* flush_prefetchbuf    */
286         armv4_drain_writebuf,           /* drain_writebuf       */
287         cpufunc_nullop,                 /* flush_brnchtgt_C     */
288         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
289
290         sheeva_cpu_sleep,               /* sleep                */
291
292         /* Soft functions */
293
294         cpufunc_null_fixup,             /* dataabt_fixup        */
295         cpufunc_null_fixup,             /* prefetchabt_fixup    */
296
297         arm10_context_switch,           /* context_switch       */
298
299         arm10_setup                     /* cpu setup            */
300 };
301 #endif /* CPU_ARM9E || CPU_ARM10 */
302
303 #ifdef CPU_ARM10
304 struct cpu_functions arm10_cpufuncs = {
305         /* CPU functions */
306
307         cpufunc_id,                     /* id                   */
308         cpufunc_nullop,                 /* cpwait               */
309
310         /* MMU functions */
311
312         cpufunc_control,                /* control              */
313         cpufunc_domains,                /* Domain               */
314         arm10_setttb,                   /* Setttb               */
315         cpufunc_faultstatus,            /* Faultstatus          */
316         cpufunc_faultaddress,           /* Faultaddress         */
317
318         /* TLB functions */
319
320         armv4_tlb_flushID,              /* tlb_flushID          */
321         arm10_tlb_flushID_SE,           /* tlb_flushID_SE       */
322         armv4_tlb_flushI,               /* tlb_flushI           */
323         arm10_tlb_flushI_SE,            /* tlb_flushI_SE        */
324         armv4_tlb_flushD,               /* tlb_flushD           */
325         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
326
327         /* Cache operations */
328
329         arm10_icache_sync_all,          /* icache_sync_all      */
330         arm10_icache_sync_range,        /* icache_sync_range    */
331
332         arm10_dcache_wbinv_all,         /* dcache_wbinv_all     */
333         arm10_dcache_wbinv_range,       /* dcache_wbinv_range   */
334         arm10_dcache_inv_range,         /* dcache_inv_range     */
335         arm10_dcache_wb_range,          /* dcache_wb_range      */
336
337         armv4_idcache_inv_all,          /* idcache_inv_all      */
338         arm10_idcache_wbinv_all,        /* idcache_wbinv_all    */
339         arm10_idcache_wbinv_range,      /* idcache_wbinv_range  */
340         cpufunc_nullop,                 /* l2cache_wbinv_all    */
341         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
342         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
343         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
344         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
345
346         /* Other functions */
347
348         cpufunc_nullop,                 /* flush_prefetchbuf    */
349         armv4_drain_writebuf,           /* drain_writebuf       */
350         cpufunc_nullop,                 /* flush_brnchtgt_C     */
351         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
352
353         (void *)cpufunc_nullop,         /* sleep                */
354
355         /* Soft functions */
356
357         cpufunc_null_fixup,             /* dataabt_fixup        */
358         cpufunc_null_fixup,             /* prefetchabt_fixup    */
359
360         arm10_context_switch,           /* context_switch       */
361
362         arm10_setup                     /* cpu setup            */
363
364 };
365 #endif /* CPU_ARM10 */
366
367 #ifdef CPU_MV_PJ4B
368 struct cpu_functions pj4bv7_cpufuncs = {
369         /* CPU functions */
370
371         cpufunc_id,                     /* id                   */
372         armv7_drain_writebuf,           /* cpwait               */
373
374         /* MMU functions */
375
376         cpufunc_control,                /* control              */
377         cpufunc_domains,                /* Domain               */
378         armv7_setttb,                   /* Setttb               */
379         cpufunc_faultstatus,            /* Faultstatus          */
380         cpufunc_faultaddress,           /* Faultaddress         */
381
382         /* TLB functions */
383
384         armv7_tlb_flushID,              /* tlb_flushID          */
385         armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
386         armv7_tlb_flushID,              /* tlb_flushI           */
387         armv7_tlb_flushID_SE,           /* tlb_flushI_SE        */
388         armv7_tlb_flushID,              /* tlb_flushD           */
389         armv7_tlb_flushID_SE,           /* tlb_flushD_SE        */
390
391         /* Cache operations */
392         armv7_idcache_wbinv_all,        /* icache_sync_all      */
393         armv7_icache_sync_range,        /* icache_sync_range    */
394
395         armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
396         armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
397         armv7_dcache_inv_range,         /* dcache_inv_range     */
398         armv7_dcache_wb_range,          /* dcache_wb_range      */
399
400         armv7_idcache_inv_all,          /* idcache_inv_all      */
401         armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
402         armv7_idcache_wbinv_range,      /* idcache_wbinv_all    */
403
404         (void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
405         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
406         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
407         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
408         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
409
410         /* Other functions */
411
412         cpufunc_nullop,                 /* flush_prefetchbuf    */
413         armv7_drain_writebuf,           /* drain_writebuf       */
414         cpufunc_nullop,                 /* flush_brnchtgt_C     */
415         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
416
417         (void *)cpufunc_nullop,         /* sleep                */
418
419         /* Soft functions */
420
421         cpufunc_null_fixup,             /* dataabt_fixup        */
422         cpufunc_null_fixup,             /* prefetchabt_fixup    */
423
424         armv7_context_switch,           /* context_switch       */
425
426         pj4bv7_setup                    /* cpu setup            */
427 };
428 #endif /* CPU_MV_PJ4B */
429
430 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
431   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
432   defined(CPU_XSCALE_80219)
433
434 struct cpu_functions xscale_cpufuncs = {
435         /* CPU functions */
436         
437         cpufunc_id,                     /* id                   */
438         xscale_cpwait,                  /* cpwait               */
439
440         /* MMU functions */
441
442         xscale_control,                 /* control              */
443         cpufunc_domains,                /* domain               */
444         xscale_setttb,                  /* setttb               */
445         cpufunc_faultstatus,            /* faultstatus          */
446         cpufunc_faultaddress,           /* faultaddress         */
447
448         /* TLB functions */
449
450         armv4_tlb_flushID,              /* tlb_flushID          */
451         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
452         armv4_tlb_flushI,               /* tlb_flushI           */
453         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
454         armv4_tlb_flushD,               /* tlb_flushD           */
455         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
456
457         /* Cache operations */
458
459         xscale_cache_syncI,             /* icache_sync_all      */
460         xscale_cache_syncI_rng,         /* icache_sync_range    */
461
462         xscale_cache_purgeD,            /* dcache_wbinv_all     */
463         xscale_cache_purgeD_rng,        /* dcache_wbinv_range   */
464         xscale_cache_flushD_rng,        /* dcache_inv_range     */
465         xscale_cache_cleanD_rng,        /* dcache_wb_range      */
466
467         xscale_cache_flushID,           /* idcache_inv_all      */
468         xscale_cache_purgeID,           /* idcache_wbinv_all    */
469         xscale_cache_purgeID_rng,       /* idcache_wbinv_range  */
470         cpufunc_nullop,                 /* l2cache_wbinv_all    */
471         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
472         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
473         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
474         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
475
476         /* Other functions */
477
478         cpufunc_nullop,                 /* flush_prefetchbuf    */
479         armv4_drain_writebuf,           /* drain_writebuf       */
480         cpufunc_nullop,                 /* flush_brnchtgt_C     */
481         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
482
483         xscale_cpu_sleep,               /* sleep                */
484
485         /* Soft functions */
486
487         cpufunc_null_fixup,             /* dataabt_fixup        */
488         cpufunc_null_fixup,             /* prefetchabt_fixup    */
489
490         xscale_context_switch,          /* context_switch       */
491
492         xscale_setup                    /* cpu setup            */
493 };
494 #endif
495 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
496    CPU_XSCALE_80219 */
497
498 #ifdef CPU_XSCALE_81342
499 struct cpu_functions xscalec3_cpufuncs = {
500         /* CPU functions */
501         
502         cpufunc_id,                     /* id                   */
503         xscale_cpwait,                  /* cpwait               */
504
505         /* MMU functions */
506
507         xscale_control,                 /* control              */
508         cpufunc_domains,                /* domain               */
509         xscalec3_setttb,                /* setttb               */
510         cpufunc_faultstatus,            /* faultstatus          */
511         cpufunc_faultaddress,           /* faultaddress         */
512
513         /* TLB functions */
514
515         armv4_tlb_flushID,              /* tlb_flushID          */
516         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
517         armv4_tlb_flushI,               /* tlb_flushI           */
518         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
519         armv4_tlb_flushD,               /* tlb_flushD           */
520         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
521
522         /* Cache operations */
523
524         xscalec3_cache_syncI,           /* icache_sync_all      */
525         xscalec3_cache_syncI_rng,       /* icache_sync_range    */
526
527         xscalec3_cache_purgeD,          /* dcache_wbinv_all     */
528         xscalec3_cache_purgeD_rng,      /* dcache_wbinv_range   */
529         xscale_cache_flushD_rng,        /* dcache_inv_range     */
530         xscalec3_cache_cleanD_rng,      /* dcache_wb_range      */
531
532         xscale_cache_flushID,           /* idcache_inv_all      */
533         xscalec3_cache_purgeID,         /* idcache_wbinv_all    */
534         xscalec3_cache_purgeID_rng,     /* idcache_wbinv_range  */
535         xscalec3_l2cache_purge,         /* l2cache_wbinv_all    */
536         xscalec3_l2cache_purge_rng,     /* l2cache_wbinv_range  */
537         xscalec3_l2cache_flush_rng,     /* l2cache_inv_range    */
538         xscalec3_l2cache_clean_rng,     /* l2cache_wb_range     */
539         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
540
541         /* Other functions */
542
543         cpufunc_nullop,                 /* flush_prefetchbuf    */
544         armv4_drain_writebuf,           /* drain_writebuf       */
545         cpufunc_nullop,                 /* flush_brnchtgt_C     */
546         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
547
548         xscale_cpu_sleep,               /* sleep                */
549
550         /* Soft functions */
551
552         cpufunc_null_fixup,             /* dataabt_fixup        */
553         cpufunc_null_fixup,             /* prefetchabt_fixup    */
554
555         xscalec3_context_switch,        /* context_switch       */
556
557         xscale_setup                    /* cpu setup            */
558 };
559 #endif /* CPU_XSCALE_81342 */
560
561
562 #if defined(CPU_FA526) || defined(CPU_FA626TE)
563 struct cpu_functions fa526_cpufuncs = {
564         /* CPU functions */
565
566         cpufunc_id,                     /* id                   */
567         cpufunc_nullop,                 /* cpwait               */
568
569         /* MMU functions */
570
571         cpufunc_control,                /* control              */
572         cpufunc_domains,                /* domain               */
573         fa526_setttb,                   /* setttb               */
574         cpufunc_faultstatus,            /* faultstatus          */
575         cpufunc_faultaddress,           /* faultaddress         */
576
577         /* TLB functions */
578
579         armv4_tlb_flushID,              /* tlb_flushID          */
580         fa526_tlb_flushID_SE,           /* tlb_flushID_SE       */
581         armv4_tlb_flushI,               /* tlb_flushI           */
582         fa526_tlb_flushI_SE,            /* tlb_flushI_SE        */
583         armv4_tlb_flushD,               /* tlb_flushD           */
584         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
585
586         /* Cache operations */
587
588         fa526_icache_sync_all,          /* icache_sync_all      */
589         fa526_icache_sync_range,        /* icache_sync_range    */
590
591         fa526_dcache_wbinv_all,         /* dcache_wbinv_all     */
592         fa526_dcache_wbinv_range,       /* dcache_wbinv_range   */
593         fa526_dcache_inv_range,         /* dcache_inv_range     */
594         fa526_dcache_wb_range,          /* dcache_wb_range      */
595
596         armv4_idcache_inv_all,          /* idcache_inv_all      */
597         fa526_idcache_wbinv_all,        /* idcache_wbinv_all    */
598         fa526_idcache_wbinv_range,      /* idcache_wbinv_range  */
599         cpufunc_nullop,                 /* l2cache_wbinv_all    */
600         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
601         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
602         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
603         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
604
605         /* Other functions */
606
607         fa526_flush_prefetchbuf,        /* flush_prefetchbuf    */
608         armv4_drain_writebuf,           /* drain_writebuf       */
609         cpufunc_nullop,                 /* flush_brnchtgt_C     */
610         fa526_flush_brnchtgt_E,         /* flush_brnchtgt_E     */
611
612         fa526_cpu_sleep,                /* sleep                */
613
614         /* Soft functions */
615
616         cpufunc_null_fixup,             /* dataabt_fixup        */
617         cpufunc_null_fixup,             /* prefetchabt_fixup    */
618
619         fa526_context_switch,           /* context_switch       */
620
621         fa526_setup                     /* cpu setup            */
622 };
623 #endif  /* CPU_FA526 || CPU_FA626TE */
624
625 #if defined(CPU_ARM1136)
626 struct cpu_functions arm1136_cpufuncs = {
627         /* CPU functions */
628         
629         cpufunc_id,                     /* id                   */
630         cpufunc_nullop,                 /* cpwait               */
631         
632         /* MMU functions */
633         
634         cpufunc_control,                /* control              */
635         cpufunc_domains,                /* Domain               */
636         arm11x6_setttb,                 /* Setttb               */
637         cpufunc_faultstatus,            /* Faultstatus          */
638         cpufunc_faultaddress,           /* Faultaddress         */
639         
640         /* TLB functions */
641         
642         arm11_tlb_flushID,              /* tlb_flushID          */
643         arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
644         arm11_tlb_flushI,               /* tlb_flushI           */
645         arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
646         arm11_tlb_flushD,               /* tlb_flushD           */
647         arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
648         
649         /* Cache operations */
650         
651         arm11x6_icache_sync_all,        /* icache_sync_all      */
652         arm11x6_icache_sync_range,      /* icache_sync_range    */
653         
654         arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
655         armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
656         armv6_dcache_inv_range,         /* dcache_inv_range     */
657         armv6_dcache_wb_range,          /* dcache_wb_range      */
658         
659         armv6_idcache_inv_all,          /* idcache_inv_all      */
660         arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
661         arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
662         
663         (void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
664         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
665         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
666         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
667         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
668         
669         /* Other functions */
670         
671         arm11x6_flush_prefetchbuf,      /* flush_prefetchbuf    */
672         arm11_drain_writebuf,           /* drain_writebuf       */
673         cpufunc_nullop,                 /* flush_brnchtgt_C     */
674         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
675         
676         arm11_sleep,                    /* sleep                */
677         
678         /* Soft functions */
679         
680         cpufunc_null_fixup,             /* dataabt_fixup        */
681         cpufunc_null_fixup,             /* prefetchabt_fixup    */
682         
683         arm11_context_switch,           /* context_switch       */
684         
685         arm11x6_setup                   /* cpu setup            */
686 };
687 #endif /* CPU_ARM1136 */
688 #if defined(CPU_ARM1176)
689 struct cpu_functions arm1176_cpufuncs = {
690         /* CPU functions */
691         
692         cpufunc_id,                     /* id                   */
693         cpufunc_nullop,                 /* cpwait               */
694         
695         /* MMU functions */
696         
697         cpufunc_control,                /* control              */
698         cpufunc_domains,                /* Domain               */
699         arm11x6_setttb,                 /* Setttb               */
700         cpufunc_faultstatus,            /* Faultstatus          */
701         cpufunc_faultaddress,           /* Faultaddress         */
702         
703         /* TLB functions */
704         
705         arm11_tlb_flushID,              /* tlb_flushID          */
706         arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
707         arm11_tlb_flushI,               /* tlb_flushI           */
708         arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
709         arm11_tlb_flushD,               /* tlb_flushD           */
710         arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
711         
712         /* Cache operations */
713         
714         arm11x6_icache_sync_all,        /* icache_sync_all      */
715         arm11x6_icache_sync_range,      /* icache_sync_range    */
716         
717         arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
718         armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
719         armv6_dcache_inv_range,         /* dcache_inv_range     */
720         armv6_dcache_wb_range,          /* dcache_wb_range      */
721         
722         armv6_idcache_inv_all,          /* idcache_inv_all      */
723         arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
724         arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
725         
726         (void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
727         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
728         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
729         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
730         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
731         
732         /* Other functions */
733         
734         arm11x6_flush_prefetchbuf,      /* flush_prefetchbuf    */
735         arm11_drain_writebuf,           /* drain_writebuf       */
736         cpufunc_nullop,                 /* flush_brnchtgt_C     */
737         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
738         
739         arm11x6_sleep,                  /* sleep                */
740         
741         /* Soft functions */
742         
743         cpufunc_null_fixup,             /* dataabt_fixup        */
744         cpufunc_null_fixup,             /* prefetchabt_fixup    */
745         
746         arm11_context_switch,           /* context_switch       */
747         
748         arm11x6_setup                   /* cpu setup            */
749 };
750 #endif /*CPU_ARM1176 */
751
752 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
753 struct cpu_functions cortexa_cpufuncs = {
754         /* CPU functions */
755         
756         cpufunc_id,                     /* id                   */
757         cpufunc_nullop,                 /* cpwait               */
758         
759         /* MMU functions */
760         
761         cpufunc_control,                /* control              */
762         cpufunc_domains,                /* Domain               */
763         armv7_setttb,                   /* Setttb               */
764         cpufunc_faultstatus,            /* Faultstatus          */
765         cpufunc_faultaddress,           /* Faultaddress         */
766         
767         /* 
768          * TLB functions.  ARMv7 does all TLB ops based on a unified TLB model
769          * whether the hardware implements separate I+D or not, so we use the
770          * same 'ID' functions for all 3 variations.
771          */
772         
773         armv7_tlb_flushID,              /* tlb_flushID          */
774         armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
775         armv7_tlb_flushID,              /* tlb_flushI           */
776         armv7_tlb_flushID_SE,           /* tlb_flushI_SE        */
777         armv7_tlb_flushID,              /* tlb_flushD           */
778         armv7_tlb_flushID_SE,           /* tlb_flushD_SE        */
779         
780         /* Cache operations */
781         
782         armv7_icache_sync_all,          /* icache_sync_all      */
783         armv7_icache_sync_range,        /* icache_sync_range    */
784         
785         armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
786         armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
787         armv7_dcache_inv_range,         /* dcache_inv_range     */
788         armv7_dcache_wb_range,          /* dcache_wb_range      */
789         
790         armv7_idcache_inv_all,          /* idcache_inv_all      */
791         armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
792         armv7_idcache_wbinv_range,      /* idcache_wbinv_range  */
793         
794         /* 
795          * Note: For CPUs using the PL310 the L2 ops are filled in when the
796          * L2 cache controller is actually enabled.
797          */
798         cpufunc_nullop,                 /* l2cache_wbinv_all    */
799         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
800         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
801         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
802         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
803         
804         /* Other functions */
805         
806         cpufunc_nullop,                 /* flush_prefetchbuf    */
807         armv7_drain_writebuf,           /* drain_writebuf       */
808         cpufunc_nullop,                 /* flush_brnchtgt_C     */
809         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
810         
811         armv7_sleep,                    /* sleep                */
812         
813         /* Soft functions */
814         
815         cpufunc_null_fixup,             /* dataabt_fixup        */
816         cpufunc_null_fixup,             /* prefetchabt_fixup    */
817         
818         armv7_context_switch,           /* context_switch       */
819         
820         cortexa_setup                     /* cpu setup            */
821 };
822 #endif /* CPU_CORTEXA */
823
824 /*
825  * Global constants also used by locore.s
826  */
827
828 struct cpu_functions cpufuncs;
829 u_int cputype;
830 u_int cpu_reset_needs_v4_MMU_disable;   /* flag used in locore.s */
831
832 #if defined(CPU_ARM9) ||        \
833   defined (CPU_ARM9E) || defined (CPU_ARM10) || defined (CPU_ARM1136) ||        \
834   defined(CPU_ARM1176) || defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||             \
835   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||           \
836   defined(CPU_FA526) || defined(CPU_FA626TE) || defined(CPU_MV_PJ4B) ||                 \
837   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
838   defined(CPU_CORTEXA) || defined(CPU_KRAIT)
839
840 /* Global cache line sizes, use 32 as default */
841 int     arm_dcache_min_line_size = 32;
842 int     arm_icache_min_line_size = 32;
843 int     arm_idcache_min_line_size = 32;
844
845 static void get_cachetype_cp15(void);
846
847 /* Additional cache information local to this file.  Log2 of some of the
848    above numbers.  */
849 static int      arm_dcache_l2_nsets;
850 static int      arm_dcache_l2_assoc;
851 static int      arm_dcache_l2_linesize;
852
853 static void
854 get_cachetype_cp15()
855 {
856         u_int ctype, isize, dsize, cpuid;
857         u_int clevel, csize, i, sel;
858         u_int multiplier;
859         u_char type;
860
861         __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
862                 : "=r" (ctype));
863
864         cpuid = cpufunc_id();
865         /*
866          * ...and thus spake the ARM ARM:
867          *
868          * If an <opcode2> value corresponding to an unimplemented or
869          * reserved ID register is encountered, the System Control
870          * processor returns the value of the main ID register.
871          */
872         if (ctype == cpuid)
873                 goto out;
874
875         if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
876                 /* Resolve minimal cache line sizes */
877                 arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
878                 arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
879                 arm_idcache_min_line_size =
880                     min(arm_icache_min_line_size, arm_dcache_min_line_size);
881
882                 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
883                     : "=r" (clevel));
884                 arm_cache_level = clevel;
885                 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
886                 i = 0;
887                 while ((type = (clevel & 0x7)) && i < 7) {
888                         if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
889                             type == CACHE_SEP_CACHE) {
890                                 sel = i << 1;
891                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
892                                     : : "r" (sel));
893                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
894                                     : "=r" (csize));
895                                 arm_cache_type[sel] = csize;
896                                 arm_dcache_align = 1 << 
897                                     (CPUV7_CT_xSIZE_LEN(csize) + 4);
898                                 arm_dcache_align_mask = arm_dcache_align - 1;
899                         }
900                         if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
901                                 sel = (i << 1) | 1;
902                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
903                                     : : "r" (sel));
904                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
905                                     : "=r" (csize));
906                                 arm_cache_type[sel] = csize;
907                         }
908                         i++;
909                         clevel >>= 3;
910                 }
911         } else {
912                 if ((ctype & CPU_CT_S) == 0)
913                         arm_pcache_unified = 1;
914
915                 /*
916                  * If you want to know how this code works, go read the ARM ARM.
917                  */
918
919                 arm_pcache_type = CPU_CT_CTYPE(ctype);
920
921                 if (arm_pcache_unified == 0) {
922                         isize = CPU_CT_ISIZE(ctype);
923                         multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
924                         arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
925                         if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
926                                 if (isize & CPU_CT_xSIZE_M)
927                                         arm_picache_line_size = 0; /* not present */
928                                 else
929                                         arm_picache_ways = 1;
930                         } else {
931                                 arm_picache_ways = multiplier <<
932                                     (CPU_CT_xSIZE_ASSOC(isize) - 1);
933                         }
934                         arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
935                 }
936
937                 dsize = CPU_CT_DSIZE(ctype);
938                 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
939                 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
940                 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
941                         if (dsize & CPU_CT_xSIZE_M)
942                                 arm_pdcache_line_size = 0; /* not present */
943                         else
944                                 arm_pdcache_ways = 1;
945                 } else {
946                         arm_pdcache_ways = multiplier <<
947                             (CPU_CT_xSIZE_ASSOC(dsize) - 1);
948                 }
949                 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
950
951                 arm_dcache_align = arm_pdcache_line_size;
952
953                 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
954                 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
955                 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
956                     CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
957
958         out:
959                 arm_dcache_align_mask = arm_dcache_align - 1;
960         }
961 }
962 #endif /* ARM9 || XSCALE */
963
964 /*
965  * Cannot panic here as we may not have a console yet ...
966  */
967
968 int
969 set_cpufuncs()
970 {
971         cputype = cpufunc_id();
972         cputype &= CPU_ID_CPU_MASK;
973
974         /*
975          * NOTE: cpu_do_powersave defaults to off.  If we encounter a
976          * CPU type where we want to use it by default, then we set it.
977          */
978
979 #ifdef CPU_ARM9
980         if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
981              (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
982             (cputype & 0x0000f000) == 0x00009000) {
983                 cpufuncs = arm9_cpufuncs;
984                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
985                 get_cachetype_cp15();
986                 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
987                 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
988                     arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
989                 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
990                 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
991 #ifdef ARM9_CACHE_WRITE_THROUGH
992                 pmap_pte_init_arm9();
993 #else
994                 pmap_pte_init_generic();
995 #endif
996                 goto out;
997         }
998 #endif /* CPU_ARM9 */
999 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
1000         if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
1001             cputype == CPU_ID_MV88FR571_41) {
1002                 uint32_t sheeva_ctrl;
1003
1004                 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
1005                     MV_L2_ENABLE);
1006                 /*
1007                  * Workaround for Marvell MV78100 CPU: Cache prefetch
1008                  * mechanism may affect the cache coherency validity,
1009                  * so it needs to be disabled.
1010                  *
1011                  * Refer to errata document MV-S501058-00C.pdf (p. 3.1
1012                  * L2 Prefetching Mechanism) for details.
1013                  */
1014                 if (cputype == CPU_ID_MV88FR571_VD ||
1015                     cputype == CPU_ID_MV88FR571_41)
1016                         sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
1017
1018                 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
1019
1020                 cpufuncs = sheeva_cpufuncs;
1021                 get_cachetype_cp15();
1022                 pmap_pte_init_generic();
1023                 goto out;
1024         } else if (cputype == CPU_ID_ARM926EJS || cputype == CPU_ID_ARM1026EJS) {
1025                 cpufuncs = armv5_ec_cpufuncs;
1026                 get_cachetype_cp15();
1027                 pmap_pte_init_generic();
1028                 goto out;
1029         }
1030 #endif /* CPU_ARM9E || CPU_ARM10 */
1031 #ifdef CPU_ARM10
1032         if (/* cputype == CPU_ID_ARM1020T || */
1033             cputype == CPU_ID_ARM1020E) {
1034                 /*
1035                  * Select write-through cacheing (this isn't really an
1036                  * option on ARM1020T).
1037                  */
1038                 cpufuncs = arm10_cpufuncs;
1039                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
1040                 get_cachetype_cp15();
1041                 arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1042                 arm10_dcache_sets_max =
1043                     (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1044                     arm10_dcache_sets_inc;
1045                 arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1046                 arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
1047                 pmap_pte_init_generic();
1048                 goto out;
1049         }
1050 #endif /* CPU_ARM10 */
1051 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1052         if (cputype == CPU_ID_ARM1136JS
1053             || cputype == CPU_ID_ARM1136JSR1
1054             || cputype == CPU_ID_ARM1176JZS) {
1055 #ifdef CPU_ARM1136
1056                 if (cputype == CPU_ID_ARM1136JS
1057                     || cputype == CPU_ID_ARM1136JSR1)
1058                         cpufuncs = arm1136_cpufuncs;
1059 #endif
1060 #ifdef CPU_ARM1176
1061                 if (cputype == CPU_ID_ARM1176JZS)
1062                         cpufuncs = arm1176_cpufuncs;
1063 #endif
1064                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
1065                 get_cachetype_cp15();
1066
1067                 pmap_pte_init_mmu_v6();
1068
1069                 goto out;
1070         }
1071 #endif /* CPU_ARM1136 || CPU_ARM1176 */
1072 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1073         if (cputype == CPU_ID_CORTEXA5 ||
1074             cputype == CPU_ID_CORTEXA7 ||
1075             cputype == CPU_ID_CORTEXA8R1 ||
1076             cputype == CPU_ID_CORTEXA8R2 ||
1077             cputype == CPU_ID_CORTEXA8R3 ||
1078             cputype == CPU_ID_CORTEXA9R1 ||
1079             cputype == CPU_ID_CORTEXA9R2 ||
1080             cputype == CPU_ID_CORTEXA9R3 ||
1081             cputype == CPU_ID_CORTEXA12R0 ||
1082             cputype == CPU_ID_CORTEXA15R0 ||
1083             cputype == CPU_ID_CORTEXA15R1 ||
1084             cputype == CPU_ID_CORTEXA15R2 ||
1085             cputype == CPU_ID_CORTEXA15R3 ||
1086             cputype == CPU_ID_KRAIT ) {
1087                 cpufuncs = cortexa_cpufuncs;
1088                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
1089                 get_cachetype_cp15();
1090                 
1091                 pmap_pte_init_mmu_v6();
1092                 /* Use powersave on this CPU. */
1093                 cpu_do_powersave = 1;
1094                 goto out;
1095         }
1096 #endif /* CPU_CORTEXA */
1097                 
1098 #if defined(CPU_MV_PJ4B)
1099         if (cputype == CPU_ID_MV88SV581X_V7 ||
1100             cputype == CPU_ID_MV88SV584X_V7 ||
1101             cputype == CPU_ID_ARM_88SV581X_V7) {
1102                 cpufuncs = pj4bv7_cpufuncs;
1103                 get_cachetype_cp15();
1104                 pmap_pte_init_mmu_v6();
1105                 goto out;
1106         }
1107 #endif /* CPU_MV_PJ4B */
1108
1109 #if defined(CPU_FA526) || defined(CPU_FA626TE)
1110         if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
1111                 cpufuncs = fa526_cpufuncs;
1112                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
1113                 get_cachetype_cp15();
1114                 pmap_pte_init_generic();
1115
1116                 /* Use powersave on this CPU. */
1117                 cpu_do_powersave = 1;
1118
1119                 goto out;
1120         }
1121 #endif  /* CPU_FA526 || CPU_FA626TE */
1122
1123 #ifdef CPU_XSCALE_80200
1124         if (cputype == CPU_ID_80200) {
1125                 int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1126
1127                 i80200_icu_init();
1128
1129 #if defined(XSCALE_CCLKCFG)
1130                 /*
1131                  * Crank CCLKCFG to maximum legal value.
1132                  */
1133                 __asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
1134                         :
1135                         : "r" (XSCALE_CCLKCFG));
1136 #endif
1137
1138                 /*
1139                  * XXX Disable ECC in the Bus Controller Unit; we
1140                  * don't really support it, yet.  Clear any pending
1141                  * error indications.
1142                  */
1143                 __asm __volatile("mcr p13, 0, %0, c0, c1, 0"
1144                         :
1145                         : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1146
1147                 cpufuncs = xscale_cpufuncs;
1148                 /*
1149                  * i80200 errata: Step-A0 and A1 have a bug where
1150                  * D$ dirty bits are not cleared on "invalidate by
1151                  * address".
1152                  *
1153                  * Workaround: Clean cache line before invalidating.
1154                  */
1155                 if (rev == 0 || rev == 1)
1156                         cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1157
1158                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
1159                 get_cachetype_cp15();
1160                 pmap_pte_init_xscale();
1161                 goto out;
1162         }
1163 #endif /* CPU_XSCALE_80200 */
1164 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
1165         if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1166             cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1167             cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1168                 cpufuncs = xscale_cpufuncs;
1169                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
1170                 get_cachetype_cp15();
1171                 pmap_pte_init_xscale();
1172                 goto out;
1173         }
1174 #endif /* CPU_XSCALE_80321 */
1175
1176 #if defined(CPU_XSCALE_81342)
1177         if (cputype == CPU_ID_81342) {
1178                 cpufuncs = xscalec3_cpufuncs;
1179                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
1180                 get_cachetype_cp15();
1181                 pmap_pte_init_xscale();
1182                 goto out;
1183         }
1184 #endif /* CPU_XSCALE_81342 */
1185 #ifdef CPU_XSCALE_PXA2X0
1186         /* ignore core revision to test PXA2xx CPUs */
1187         if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1188             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1189             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1190
1191                 cpufuncs = xscale_cpufuncs;
1192                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
1193                 get_cachetype_cp15();
1194                 pmap_pte_init_xscale();
1195
1196                 /* Use powersave on this CPU. */
1197                 cpu_do_powersave = 1;
1198
1199                 goto out;
1200         }
1201 #endif /* CPU_XSCALE_PXA2X0 */
1202 #ifdef CPU_XSCALE_IXP425
1203         if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1204             cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
1205
1206                 cpufuncs = xscale_cpufuncs;
1207                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
1208                 get_cachetype_cp15();
1209                 pmap_pte_init_xscale();
1210
1211                 goto out;
1212         }
1213 #endif /* CPU_XSCALE_IXP425 */
1214         /*
1215          * Bzzzz. And the answer was ...
1216          */
1217         panic("No support for this CPU type (%08x) in kernel", cputype);
1218         return(ARCHITECTURE_NOT_PRESENT);
1219 out:
1220         uma_set_align(arm_dcache_align_mask);
1221         return (0);
1222 }
1223
1224 /*
1225  * Fixup routines for data and prefetch aborts.
1226  *
1227  * Several compile time symbols are used
1228  *
1229  * DEBUG_FAULT_CORRECTION - Print debugging information during the
1230  * correction of registers after a fault.
1231  */
1232
1233
1234 /*
1235  * Null abort fixup routine.
1236  * For use when no fixup is required.
1237  */
1238 int
1239 cpufunc_null_fixup(arg)
1240         void *arg;
1241 {
1242         return(ABORT_FIXUP_OK);
1243 }
1244
1245 /*
1246  * CPU Setup code
1247  */
1248
1249 #if defined (CPU_ARM9) || \
1250   defined(CPU_ARM9E) || \
1251   defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||             \
1252   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||           \
1253   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
1254   defined(CPU_ARM10) ||  defined(CPU_ARM1136) || defined(CPU_ARM1176) ||\
1255   defined(CPU_FA526) || defined(CPU_FA626TE)
1256
1257 #define IGN     0
1258 #define OR      1
1259 #define BIC     2
1260
1261 struct cpu_option {
1262         char    *co_name;
1263         int     co_falseop;
1264         int     co_trueop;
1265         int     co_value;
1266 };
1267
1268 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
1269
1270 static u_int
1271 parse_cpu_options(args, optlist, cpuctrl)
1272         char *args;
1273         struct cpu_option *optlist;
1274         u_int cpuctrl;
1275 {
1276         int integer;
1277
1278         if (args == NULL)
1279                 return(cpuctrl);
1280
1281         while (optlist->co_name) {
1282                 if (get_bootconf_option(args, optlist->co_name,
1283                     BOOTOPT_TYPE_BOOLEAN, &integer)) {
1284                         if (integer) {
1285                                 if (optlist->co_trueop == OR)
1286                                         cpuctrl |= optlist->co_value;
1287                                 else if (optlist->co_trueop == BIC)
1288                                         cpuctrl &= ~optlist->co_value;
1289                         } else {
1290                                 if (optlist->co_falseop == OR)
1291                                         cpuctrl |= optlist->co_value;
1292                                 else if (optlist->co_falseop == BIC)
1293                                         cpuctrl &= ~optlist->co_value;
1294                         }
1295                 }
1296                 ++optlist;
1297         }
1298         return(cpuctrl);
1299 }
1300 #endif /* CPU_ARM9 || XSCALE*/
1301
1302 #ifdef CPU_ARM9
1303 struct cpu_option arm9_options[] = {
1304         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1305         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1306         { "arm9.cache", BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1307         { "arm9.icache",        BIC, OR,  CPU_CONTROL_IC_ENABLE },
1308         { "arm9.dcache",        BIC, OR,  CPU_CONTROL_DC_ENABLE },
1309         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1310         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1311         { "arm9.writebuf",      BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1312         { NULL,                 IGN, IGN, 0 }
1313 };
1314
1315 void
1316 arm9_setup(args)
1317         char *args;
1318 {
1319         int cpuctrl, cpuctrlmask;
1320
1321         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1322             | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1323             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1324             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1325             CPU_CONTROL_ROUNDROBIN;
1326         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1327                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1328                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1329                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1330                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1331                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1332                  | CPU_CONTROL_ROUNDROBIN;
1333
1334 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1335         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1336 #endif
1337
1338         cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1339
1340 #ifdef __ARMEB__
1341         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1342 #endif
1343         if (vector_page == ARM_VECTORS_HIGH)
1344                 cpuctrl |= CPU_CONTROL_VECRELOC;
1345
1346         /* Clear out the cache */
1347         cpu_idcache_wbinv_all();
1348
1349         /* Set the control register */
1350         cpu_control(cpuctrlmask, cpuctrl);
1351         ctrl = cpuctrl;
1352
1353 }
1354 #endif  /* CPU_ARM9 */
1355
1356 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
1357 struct cpu_option arm10_options[] = {
1358         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1359         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1360         { "arm10.cache",        BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1361         { "arm10.icache",       BIC, OR,  CPU_CONTROL_IC_ENABLE },
1362         { "arm10.dcache",       BIC, OR,  CPU_CONTROL_DC_ENABLE },
1363         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1364         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1365         { "arm10.writebuf",     BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1366         { NULL,                 IGN, IGN, 0 }
1367 };
1368
1369 void
1370 arm10_setup(args)
1371         char *args;
1372 {
1373         int cpuctrl, cpuctrlmask;
1374
1375         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1376             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1377             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1378         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1379             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1380             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1381             | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1382             | CPU_CONTROL_BPRD_ENABLE
1383             | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1384
1385 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1386         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1387 #endif
1388
1389         cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
1390
1391 #ifdef __ARMEB__
1392         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1393 #endif
1394
1395         /* Clear out the cache */
1396         cpu_idcache_wbinv_all();
1397
1398         /* Now really make sure they are clean.  */
1399         __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1400
1401         if (vector_page == ARM_VECTORS_HIGH)
1402                 cpuctrl |= CPU_CONTROL_VECRELOC;
1403
1404         /* Set the control register */
1405         ctrl = cpuctrl;
1406         cpu_control(0xffffffff, cpuctrl);
1407
1408         /* And again. */
1409         cpu_idcache_wbinv_all();
1410 }
1411 #endif  /* CPU_ARM9E || CPU_ARM10 */
1412
1413 #if defined(CPU_ARM1136) || defined(CPU_ARM1176) \
1414  || defined(CPU_MV_PJ4B) \
1415  || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1416 static __inline void
1417 cpu_scc_setup_ccnt(void)
1418 {
1419 /* This is how you give userland access to the CCNT and PMCn
1420  * registers.
1421  * BEWARE! This gives write access also, which may not be what
1422  * you want!
1423  */
1424 #ifdef _PMC_USER_READ_WRITE_
1425 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1426         /* Use the Secure User and Non-secure Access Validation Control Register
1427          * to allow userland access
1428          */
1429         __asm volatile ("mcr    p15, 0, %0, c15, c9, 0\n\t"
1430                         :
1431                         : "r"(0x00000001));
1432 #else
1433         /* Set PMUSERENR[0] to allow userland access */
1434         __asm volatile ("mcr    p15, 0, %0, c9, c14, 0\n\t"
1435                         :
1436                         : "r"(0x00000001));
1437 #endif
1438 #endif
1439 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1440         /* Set PMCR[2,0] to enable counters and reset CCNT */
1441         __asm volatile ("mcr    p15, 0, %0, c15, c12, 0\n\t"
1442                         :
1443                         : "r"(0x00000005));
1444 #else
1445         /* Set up the PMCCNTR register as a cyclecounter:
1446          * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
1447          * Set PMCR[2,0] to enable counters and reset CCNT
1448          * Set PMCNTENSET to 0x80000000 to enable CCNT */
1449         __asm volatile ("mcr    p15, 0, %0, c9, c14, 2\n\t"
1450                         "mcr    p15, 0, %1, c9, c12, 0\n\t"
1451                         "mcr    p15, 0, %2, c9, c12, 1\n\t"
1452                         :
1453                         : "r"(0xFFFFFFFF),
1454                           "r"(0x00000005),
1455                           "r"(0x80000000));
1456 #endif
1457 }
1458 #endif
1459
1460 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1461 struct cpu_option arm11_options[] = {
1462         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1463         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1464         { "arm11.cache",        BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1465         { "arm11.icache",       BIC, OR,  CPU_CONTROL_IC_ENABLE },
1466         { "arm11.dcache",       BIC, OR,  CPU_CONTROL_DC_ENABLE },
1467         { NULL,                 IGN, IGN, 0 }
1468 };
1469
1470 void
1471 arm11x6_setup(char *args)
1472 {
1473         int cpuctrl, cpuctrl_wax;
1474         uint32_t auxctrl, auxctrl_wax;
1475         uint32_t tmp, tmp2;
1476         uint32_t sbz=0;
1477         uint32_t cpuid;
1478
1479         cpuid = cpufunc_id();
1480
1481         cpuctrl =
1482                 CPU_CONTROL_MMU_ENABLE  |
1483                 CPU_CONTROL_DC_ENABLE   |
1484                 CPU_CONTROL_WBUF_ENABLE |
1485                 CPU_CONTROL_32BP_ENABLE |
1486                 CPU_CONTROL_32BD_ENABLE |
1487                 CPU_CONTROL_LABT_ENABLE |
1488                 CPU_CONTROL_SYST_ENABLE |
1489                 CPU_CONTROL_IC_ENABLE;
1490
1491         /*
1492          * "write as existing" bits
1493          * inverse of this is mask
1494          */
1495         cpuctrl_wax =
1496                 (3 << 30) | /* SBZ */
1497                 (1 << 29) | /* FA */
1498                 (1 << 28) | /* TR */
1499                 (3 << 26) | /* SBZ */ 
1500                 (3 << 19) | /* SBZ */
1501                 (1 << 17);  /* SBZ */
1502
1503         cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1504         cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1505
1506         cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
1507
1508 #ifdef __ARMEB__
1509         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1510 #endif
1511
1512         if (vector_page == ARM_VECTORS_HIGH)
1513                 cpuctrl |= CPU_CONTROL_VECRELOC;
1514
1515         auxctrl = 0;
1516         auxctrl_wax = ~0;
1517         /*
1518          * This options enables the workaround for the 364296 ARM1136
1519          * r0pX errata (possible cache data corruption with
1520          * hit-under-miss enabled). It sets the undocumented bit 31 in
1521          * the auxiliary control register and the FI bit in the control
1522          * register, thus disabling hit-under-miss without putting the
1523          * processor into full low interrupt latency mode. ARM11MPCore
1524          * is not affected.
1525          */
1526         if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
1527                 cpuctrl |= CPU_CONTROL_FI_ENABLE;
1528                 auxctrl = ARM1136_AUXCTL_PFI;
1529                 auxctrl_wax = ~ARM1136_AUXCTL_PFI;
1530         }
1531
1532         /*
1533          * Enable an errata workaround
1534          */
1535         if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
1536                 auxctrl = ARM1176_AUXCTL_PHD;
1537                 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
1538         }
1539
1540         /* Clear out the cache */
1541         cpu_idcache_wbinv_all();
1542
1543         /* Now really make sure they are clean.  */
1544         __asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
1545
1546         /* Allow detection code to find the VFP if it's fitted.  */
1547         __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
1548
1549         /* Set the control register */
1550         ctrl = cpuctrl;
1551         cpu_control(~cpuctrl_wax, cpuctrl);
1552
1553         __asm volatile ("mrc    p15, 0, %0, c1, c0, 1\n\t"
1554                         "and    %1, %0, %2\n\t"
1555                         "orr    %1, %1, %3\n\t"
1556                         "teq    %0, %1\n\t"
1557                         "mcrne  p15, 0, %1, c1, c0, 1\n\t"
1558                         : "=r"(tmp), "=r"(tmp2) :
1559                           "r"(auxctrl_wax), "r"(auxctrl));
1560
1561         /* And again. */
1562         cpu_idcache_wbinv_all();
1563
1564         cpu_scc_setup_ccnt();
1565 }
1566 #endif  /* CPU_ARM1136 || CPU_ARM1176 */
1567
1568 #ifdef CPU_MV_PJ4B
1569 void
1570 pj4bv7_setup(args)
1571         char *args;
1572 {
1573         int cpuctrl;
1574
1575         pj4b_config();
1576
1577         cpuctrl = CPU_CONTROL_MMU_ENABLE;
1578 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1579         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1580 #endif
1581         cpuctrl |= CPU_CONTROL_DC_ENABLE;
1582         cpuctrl |= (0xf << 3);
1583         cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1584         cpuctrl |= CPU_CONTROL_IC_ENABLE;
1585         if (vector_page == ARM_VECTORS_HIGH)
1586                 cpuctrl |= CPU_CONTROL_VECRELOC;
1587         cpuctrl |= (0x5 << 16) | (1 < 22);
1588         cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1589
1590         /* Clear out the cache */
1591         cpu_idcache_wbinv_all();
1592
1593         /* Set the control register */
1594         ctrl = cpuctrl;
1595         cpu_control(0xFFFFFFFF, cpuctrl);
1596
1597         /* And again. */
1598         cpu_idcache_wbinv_all();
1599
1600         cpu_scc_setup_ccnt();
1601 }
1602 #endif /* CPU_MV_PJ4B */
1603
1604 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1605
1606 void
1607 cortexa_setup(char *args)
1608 {
1609         int cpuctrl, cpuctrlmask;
1610         
1611         cpuctrlmask = CPU_CONTROL_MMU_ENABLE |     /* MMU enable         [0] */
1612             CPU_CONTROL_AFLT_ENABLE |    /* Alignment fault    [1] */
1613             CPU_CONTROL_DC_ENABLE |      /* DCache enable      [2] */
1614             CPU_CONTROL_BPRD_ENABLE |    /* Branch prediction [11] */
1615             CPU_CONTROL_IC_ENABLE |      /* ICache enable     [12] */
1616             CPU_CONTROL_VECRELOC;        /* Vector relocation [13] */
1617         
1618         cpuctrl = CPU_CONTROL_MMU_ENABLE |
1619             CPU_CONTROL_IC_ENABLE |
1620             CPU_CONTROL_DC_ENABLE |
1621             CPU_CONTROL_BPRD_ENABLE;
1622         
1623 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1624         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1625 #endif
1626         
1627         /* Switch to big endian */
1628 #ifdef __ARMEB__
1629         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1630 #endif
1631         
1632         /* Check if the vector page is at the high address (0xffff0000) */
1633         if (vector_page == ARM_VECTORS_HIGH)
1634                 cpuctrl |= CPU_CONTROL_VECRELOC;
1635         
1636         /* Clear out the cache */
1637         cpu_idcache_wbinv_all();
1638         
1639         /* Set the control register */
1640         ctrl = cpuctrl;
1641         cpu_control(cpuctrlmask, cpuctrl);
1642         
1643         /* And again. */
1644         cpu_idcache_wbinv_all();
1645 #ifdef SMP
1646         armv7_auxctrl((1 << 6) | (1 << 0), (1 << 6) | (1 << 0)); /* Enable SMP + TLB broadcasting  */
1647 #endif
1648
1649         cpu_scc_setup_ccnt();
1650 }
1651 #endif  /* CPU_CORTEXA */
1652
1653 #if defined(CPU_FA526) || defined(CPU_FA626TE)
1654 struct cpu_option fa526_options[] = {
1655 #ifdef COMPAT_12
1656         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE |
1657                                            CPU_CONTROL_DC_ENABLE) },
1658         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1659 #endif  /* COMPAT_12 */
1660         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE |
1661                                            CPU_CONTROL_DC_ENABLE) },
1662         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE |
1663                                            CPU_CONTROL_DC_ENABLE) },
1664         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1665         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1666         { NULL,                 IGN, IGN, 0 }
1667 };
1668
1669 void
1670 fa526_setup(char *args)
1671 {
1672         int cpuctrl, cpuctrlmask;
1673
1674         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1675                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1676                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1677                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1678                 | CPU_CONTROL_BPRD_ENABLE;
1679         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1680                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1681                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1682                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1683                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1684                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1685                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1686
1687 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1688         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1689 #endif
1690
1691         cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
1692
1693 #ifdef __ARMEB__
1694         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1695 #endif
1696
1697         if (vector_page == ARM_VECTORS_HIGH)
1698                 cpuctrl |= CPU_CONTROL_VECRELOC;
1699
1700         /* Clear out the cache */
1701         cpu_idcache_wbinv_all();
1702
1703         /* Set the control register */
1704         ctrl = cpuctrl;
1705         cpu_control(0xffffffff, cpuctrl);
1706 }
1707 #endif  /* CPU_FA526 || CPU_FA626TE */
1708
1709 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1710   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1711   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
1712 struct cpu_option xscale_options[] = {
1713 #ifdef COMPAT_12
1714         { "branchpredict",      BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1715         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1716 #endif  /* COMPAT_12 */
1717         { "cpu.branchpredict",  BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1718         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1719         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1720         { "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1721         { "xscale.cache",       BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1722         { "xscale.icache",      BIC, OR,  CPU_CONTROL_IC_ENABLE },
1723         { "xscale.dcache",      BIC, OR,  CPU_CONTROL_DC_ENABLE },
1724         { NULL,                 IGN, IGN, 0 }
1725 };
1726
1727 void
1728 xscale_setup(args)
1729         char *args;
1730 {
1731         uint32_t auxctl;
1732         int cpuctrl, cpuctrlmask;
1733
1734         /*
1735          * The XScale Write Buffer is always enabled.  Our option
1736          * is to enable/disable coalescing.  Note that bits 6:3
1737          * must always be enabled.
1738          */
1739
1740         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1741                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1742                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1743                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1744                  | CPU_CONTROL_BPRD_ENABLE;
1745         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1746                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1747                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1748                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1749                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1750                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1751                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
1752                  CPU_CONTROL_L2_ENABLE;
1753
1754 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1755         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1756 #endif
1757
1758         cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
1759
1760 #ifdef __ARMEB__
1761         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1762 #endif
1763
1764         if (vector_page == ARM_VECTORS_HIGH)
1765                 cpuctrl |= CPU_CONTROL_VECRELOC;
1766 #ifdef CPU_XSCALE_CORE3
1767         cpuctrl |= CPU_CONTROL_L2_ENABLE;
1768 #endif
1769
1770         /* Clear out the cache */
1771         cpu_idcache_wbinv_all();
1772
1773         /*
1774          * Set the control register.  Note that bits 6:3 must always
1775          * be set to 1.
1776          */
1777         ctrl = cpuctrl;
1778 /*      cpu_control(cpuctrlmask, cpuctrl);*/
1779         cpu_control(0xffffffff, cpuctrl);
1780
1781         /* Make sure write coalescing is turned on */
1782         __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1783                 : "=r" (auxctl));
1784 #ifdef XSCALE_NO_COALESCE_WRITES
1785         auxctl |= XSCALE_AUXCTL_K;
1786 #else
1787         auxctl &= ~XSCALE_AUXCTL_K;
1788 #endif
1789 #ifdef CPU_XSCALE_CORE3
1790         auxctl |= XSCALE_AUXCTL_LLR;
1791         auxctl |= XSCALE_AUXCTL_MD_MASK;
1792 #endif
1793         __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1794                 : : "r" (auxctl));
1795 }
1796 #endif  /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
1797            CPU_XSCALE_80219 */