]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm/arm/cpufunc.c
Use ARMv7 style unaligned access on ARMv6. We set this bit in locore, but
[FreeBSD/FreeBSD.git] / sys / arm / arm / cpufunc.c
1 /*      $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
2
3 /*-
4  * arm9 support code Copyright (C) 2001 ARM Ltd
5  * Copyright (c) 1997 Mark Brinicombe.
6  * Copyright (c) 1997 Causality Limited
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by Causality Limited.
20  * 4. The name of Causality Limited may not be used to endorse or promote
21  *    products derived from this software without specific prior written
22  *    permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  * RiscBSD kernel project
37  *
38  * cpufuncs.c
39  *
40  * C functions for supporting CPU / MMU / TLB specific operations.
41  *
42  * Created      : 30/01/97
43  */
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/bus.h>
52 #include <machine/bus.h>
53 #include <machine/cpu.h>
54 #include <machine/disassem.h>
55
56 #include <vm/vm.h>
57 #include <vm/pmap.h>
58 #include <vm/uma.h>
59
60 #include <machine/cpuconf.h>
61 #include <machine/cpufunc.h>
62
63 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
64 #include <arm/xscale/i80321/i80321reg.h>
65 #include <arm/xscale/i80321/i80321var.h>
66 #endif
67
68 /*
69  * Some definitions in i81342reg.h clash with i80321reg.h.
70  * This only happens for the LINT kernel. As it happens,
71  * we don't need anything from i81342reg.h that we already
72  * got from somewhere else during a LINT compile.
73  */
74 #if defined(CPU_XSCALE_81342) && !defined(COMPILING_LINT)
75 #include <arm/xscale/i8134x/i81342reg.h>
76 #endif
77
78 #ifdef CPU_XSCALE_IXP425
79 #include <arm/xscale/ixp425/ixp425reg.h>
80 #include <arm/xscale/ixp425/ixp425var.h>
81 #endif
82
83 /* PRIMARY CACHE VARIABLES */
84 int     arm_picache_size;
85 int     arm_picache_line_size;
86 int     arm_picache_ways;
87
88 int     arm_pdcache_size;       /* and unified */
89 int     arm_pdcache_line_size;
90 int     arm_pdcache_ways;
91
92 int     arm_pcache_type;
93 int     arm_pcache_unified;
94
95 int     arm_dcache_align;
96 int     arm_dcache_align_mask;
97
98 u_int   arm_cache_level;
99 u_int   arm_cache_type[14];
100 u_int   arm_cache_loc;
101
102 /* 1 == use cpu_sleep(), 0 == don't */
103 int cpu_do_powersave;
104 int ctrl;
105
106 #ifdef CPU_ARM9
107 struct cpu_functions arm9_cpufuncs = {
108         /* CPU functions */
109
110         cpufunc_id,                     /* id                   */
111         cpufunc_nullop,                 /* cpwait               */
112
113         /* MMU functions */
114
115         cpufunc_control,                /* control              */
116         cpufunc_domains,                /* Domain               */
117         arm9_setttb,                    /* Setttb               */
118         cpufunc_faultstatus,            /* Faultstatus          */
119         cpufunc_faultaddress,           /* Faultaddress         */
120
121         /* TLB functions */
122
123         armv4_tlb_flushID,              /* tlb_flushID          */
124         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
125         armv4_tlb_flushI,               /* tlb_flushI           */
126         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
127         armv4_tlb_flushD,               /* tlb_flushD           */
128         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
129
130         /* Cache operations */
131
132         arm9_icache_sync_all,           /* icache_sync_all      */
133         arm9_icache_sync_range,         /* icache_sync_range    */
134
135         arm9_dcache_wbinv_all,          /* dcache_wbinv_all     */
136         arm9_dcache_wbinv_range,        /* dcache_wbinv_range   */
137         arm9_dcache_inv_range,          /* dcache_inv_range     */
138         arm9_dcache_wb_range,           /* dcache_wb_range      */
139
140         armv4_idcache_inv_all,          /* idcache_inv_all      */
141         arm9_idcache_wbinv_all,         /* idcache_wbinv_all    */
142         arm9_idcache_wbinv_range,       /* idcache_wbinv_range  */
143         cpufunc_nullop,                 /* l2cache_wbinv_all    */
144         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
145         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
146         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
147         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
148
149         /* Other functions */
150
151         cpufunc_nullop,                 /* flush_prefetchbuf    */
152         armv4_drain_writebuf,           /* drain_writebuf       */
153         cpufunc_nullop,                 /* flush_brnchtgt_C     */
154         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
155
156         (void *)cpufunc_nullop,         /* sleep                */
157
158         /* Soft functions */
159
160         cpufunc_null_fixup,             /* dataabt_fixup        */
161         cpufunc_null_fixup,             /* prefetchabt_fixup    */
162
163         arm9_context_switch,            /* context_switch       */
164
165         arm9_setup                      /* cpu setup            */
166
167 };
168 #endif /* CPU_ARM9 */
169
170 #if defined(CPU_ARM9E)
171 struct cpu_functions armv5_ec_cpufuncs = {
172         /* CPU functions */
173
174         cpufunc_id,                     /* id                   */
175         cpufunc_nullop,                 /* cpwait               */
176
177         /* MMU functions */
178
179         cpufunc_control,                /* control              */
180         cpufunc_domains,                /* Domain               */
181         armv5_ec_setttb,                /* Setttb               */
182         cpufunc_faultstatus,            /* Faultstatus          */
183         cpufunc_faultaddress,           /* Faultaddress         */
184
185         /* TLB functions */
186
187         armv4_tlb_flushID,              /* tlb_flushID          */
188         arm10_tlb_flushID_SE,           /* tlb_flushID_SE       */
189         armv4_tlb_flushI,               /* tlb_flushI           */
190         arm10_tlb_flushI_SE,            /* tlb_flushI_SE        */
191         armv4_tlb_flushD,               /* tlb_flushD           */
192         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
193
194         /* Cache operations */
195
196         armv5_ec_icache_sync_all,       /* icache_sync_all      */
197         armv5_ec_icache_sync_range,     /* icache_sync_range    */
198
199         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
200         armv5_ec_dcache_wbinv_range,    /* dcache_wbinv_range   */
201         armv5_ec_dcache_inv_range,      /* dcache_inv_range     */
202         armv5_ec_dcache_wb_range,       /* dcache_wb_range      */
203
204         armv4_idcache_inv_all,          /* idcache_inv_all      */
205         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
206         armv5_ec_idcache_wbinv_range,   /* idcache_wbinv_range  */
207
208         cpufunc_nullop,                 /* l2cache_wbinv_all    */
209         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
210         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
211         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
212         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
213
214         /* Other functions */
215
216         cpufunc_nullop,                 /* flush_prefetchbuf    */
217         armv4_drain_writebuf,           /* drain_writebuf       */
218         cpufunc_nullop,                 /* flush_brnchtgt_C     */
219         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
220
221         (void *)cpufunc_nullop,         /* sleep                */
222
223         /* Soft functions */
224
225         cpufunc_null_fixup,             /* dataabt_fixup        */
226         cpufunc_null_fixup,             /* prefetchabt_fixup    */
227
228         arm10_context_switch,           /* context_switch       */
229
230         arm10_setup                     /* cpu setup            */
231
232 };
233
234 struct cpu_functions sheeva_cpufuncs = {
235         /* CPU functions */
236
237         cpufunc_id,                     /* id                   */
238         cpufunc_nullop,                 /* cpwait               */
239
240         /* MMU functions */
241
242         cpufunc_control,                /* control              */
243         cpufunc_domains,                /* Domain               */
244         sheeva_setttb,                  /* Setttb               */
245         cpufunc_faultstatus,            /* Faultstatus          */
246         cpufunc_faultaddress,           /* Faultaddress         */
247
248         /* TLB functions */
249
250         armv4_tlb_flushID,              /* tlb_flushID          */
251         arm10_tlb_flushID_SE,           /* tlb_flushID_SE       */
252         armv4_tlb_flushI,               /* tlb_flushI           */
253         arm10_tlb_flushI_SE,            /* tlb_flushI_SE        */
254         armv4_tlb_flushD,               /* tlb_flushD           */
255         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
256
257         /* Cache operations */
258
259         armv5_ec_icache_sync_all,       /* icache_sync_all      */
260         armv5_ec_icache_sync_range,     /* icache_sync_range    */
261
262         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
263         sheeva_dcache_wbinv_range,      /* dcache_wbinv_range   */
264         sheeva_dcache_inv_range,        /* dcache_inv_range     */
265         sheeva_dcache_wb_range,         /* dcache_wb_range      */
266
267         armv4_idcache_inv_all,          /* idcache_inv_all      */
268         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
269         sheeva_idcache_wbinv_range,     /* idcache_wbinv_all    */
270
271         sheeva_l2cache_wbinv_all,       /* l2cache_wbinv_all    */
272         sheeva_l2cache_wbinv_range,     /* l2cache_wbinv_range  */
273         sheeva_l2cache_inv_range,       /* l2cache_inv_range    */
274         sheeva_l2cache_wb_range,        /* l2cache_wb_range     */
275         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
276
277         /* Other functions */
278
279         cpufunc_nullop,                 /* flush_prefetchbuf    */
280         armv4_drain_writebuf,           /* drain_writebuf       */
281         cpufunc_nullop,                 /* flush_brnchtgt_C     */
282         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
283
284         sheeva_cpu_sleep,               /* sleep                */
285
286         /* Soft functions */
287
288         cpufunc_null_fixup,             /* dataabt_fixup        */
289         cpufunc_null_fixup,             /* prefetchabt_fixup    */
290
291         arm10_context_switch,           /* context_switch       */
292
293         arm10_setup                     /* cpu setup            */
294 };
295 #endif /* CPU_ARM9E */
296
297 #ifdef CPU_MV_PJ4B
298 struct cpu_functions pj4bv7_cpufuncs = {
299         /* CPU functions */
300
301         cpufunc_id,                     /* id                   */
302         armv7_drain_writebuf,           /* cpwait               */
303
304         /* MMU functions */
305
306         cpufunc_control,                /* control              */
307         cpufunc_domains,                /* Domain               */
308         armv7_setttb,                   /* Setttb               */
309         cpufunc_faultstatus,            /* Faultstatus          */
310         cpufunc_faultaddress,           /* Faultaddress         */
311
312         /* TLB functions */
313
314         armv7_tlb_flushID,              /* tlb_flushID          */
315         armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
316         armv7_tlb_flushID,              /* tlb_flushI           */
317         armv7_tlb_flushID_SE,           /* tlb_flushI_SE        */
318         armv7_tlb_flushID,              /* tlb_flushD           */
319         armv7_tlb_flushID_SE,           /* tlb_flushD_SE        */
320
321         /* Cache operations */
322         armv7_idcache_wbinv_all,        /* icache_sync_all      */
323         armv7_icache_sync_range,        /* icache_sync_range    */
324
325         armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
326         armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
327         armv7_dcache_inv_range,         /* dcache_inv_range     */
328         armv7_dcache_wb_range,          /* dcache_wb_range      */
329
330         armv7_idcache_inv_all,          /* idcache_inv_all      */
331         armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
332         armv7_idcache_wbinv_range,      /* idcache_wbinv_all    */
333
334         (void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
335         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
336         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
337         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
338         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
339
340         /* Other functions */
341
342         cpufunc_nullop,                 /* flush_prefetchbuf    */
343         armv7_drain_writebuf,           /* drain_writebuf       */
344         cpufunc_nullop,                 /* flush_brnchtgt_C     */
345         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
346
347         (void *)cpufunc_nullop,         /* sleep                */
348
349         /* Soft functions */
350
351         cpufunc_null_fixup,             /* dataabt_fixup        */
352         cpufunc_null_fixup,             /* prefetchabt_fixup    */
353
354         armv7_context_switch,           /* context_switch       */
355
356         pj4bv7_setup                    /* cpu setup            */
357 };
358 #endif /* CPU_MV_PJ4B */
359
360 #if defined(CPU_XSCALE_80321) || \
361   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
362   defined(CPU_XSCALE_80219)
363
364 struct cpu_functions xscale_cpufuncs = {
365         /* CPU functions */
366         
367         cpufunc_id,                     /* id                   */
368         xscale_cpwait,                  /* cpwait               */
369
370         /* MMU functions */
371
372         xscale_control,                 /* control              */
373         cpufunc_domains,                /* domain               */
374         xscale_setttb,                  /* setttb               */
375         cpufunc_faultstatus,            /* faultstatus          */
376         cpufunc_faultaddress,           /* faultaddress         */
377
378         /* TLB functions */
379
380         armv4_tlb_flushID,              /* tlb_flushID          */
381         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
382         armv4_tlb_flushI,               /* tlb_flushI           */
383         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
384         armv4_tlb_flushD,               /* tlb_flushD           */
385         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
386
387         /* Cache operations */
388
389         xscale_cache_syncI,             /* icache_sync_all      */
390         xscale_cache_syncI_rng,         /* icache_sync_range    */
391
392         xscale_cache_purgeD,            /* dcache_wbinv_all     */
393         xscale_cache_purgeD_rng,        /* dcache_wbinv_range   */
394         xscale_cache_flushD_rng,        /* dcache_inv_range     */
395         xscale_cache_cleanD_rng,        /* dcache_wb_range      */
396
397         xscale_cache_flushID,           /* idcache_inv_all      */
398         xscale_cache_purgeID,           /* idcache_wbinv_all    */
399         xscale_cache_purgeID_rng,       /* idcache_wbinv_range  */
400         cpufunc_nullop,                 /* l2cache_wbinv_all    */
401         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
402         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
403         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
404         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
405
406         /* Other functions */
407
408         cpufunc_nullop,                 /* flush_prefetchbuf    */
409         armv4_drain_writebuf,           /* drain_writebuf       */
410         cpufunc_nullop,                 /* flush_brnchtgt_C     */
411         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
412
413         xscale_cpu_sleep,               /* sleep                */
414
415         /* Soft functions */
416
417         cpufunc_null_fixup,             /* dataabt_fixup        */
418         cpufunc_null_fixup,             /* prefetchabt_fixup    */
419
420         xscale_context_switch,          /* context_switch       */
421
422         xscale_setup                    /* cpu setup            */
423 };
424 #endif
425 /* CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
426    CPU_XSCALE_80219 */
427
428 #ifdef CPU_XSCALE_81342
429 struct cpu_functions xscalec3_cpufuncs = {
430         /* CPU functions */
431         
432         cpufunc_id,                     /* id                   */
433         xscale_cpwait,                  /* cpwait               */
434
435         /* MMU functions */
436
437         xscale_control,                 /* control              */
438         cpufunc_domains,                /* domain               */
439         xscalec3_setttb,                /* setttb               */
440         cpufunc_faultstatus,            /* faultstatus          */
441         cpufunc_faultaddress,           /* faultaddress         */
442
443         /* TLB functions */
444
445         armv4_tlb_flushID,              /* tlb_flushID          */
446         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
447         armv4_tlb_flushI,               /* tlb_flushI           */
448         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
449         armv4_tlb_flushD,               /* tlb_flushD           */
450         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
451
452         /* Cache operations */
453
454         xscalec3_cache_syncI,           /* icache_sync_all      */
455         xscalec3_cache_syncI_rng,       /* icache_sync_range    */
456
457         xscalec3_cache_purgeD,          /* dcache_wbinv_all     */
458         xscalec3_cache_purgeD_rng,      /* dcache_wbinv_range   */
459         xscale_cache_flushD_rng,        /* dcache_inv_range     */
460         xscalec3_cache_cleanD_rng,      /* dcache_wb_range      */
461
462         xscale_cache_flushID,           /* idcache_inv_all      */
463         xscalec3_cache_purgeID,         /* idcache_wbinv_all    */
464         xscalec3_cache_purgeID_rng,     /* idcache_wbinv_range  */
465         xscalec3_l2cache_purge,         /* l2cache_wbinv_all    */
466         xscalec3_l2cache_purge_rng,     /* l2cache_wbinv_range  */
467         xscalec3_l2cache_flush_rng,     /* l2cache_inv_range    */
468         xscalec3_l2cache_clean_rng,     /* l2cache_wb_range     */
469         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
470
471         /* Other functions */
472
473         cpufunc_nullop,                 /* flush_prefetchbuf    */
474         armv4_drain_writebuf,           /* drain_writebuf       */
475         cpufunc_nullop,                 /* flush_brnchtgt_C     */
476         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
477
478         xscale_cpu_sleep,               /* sleep                */
479
480         /* Soft functions */
481
482         cpufunc_null_fixup,             /* dataabt_fixup        */
483         cpufunc_null_fixup,             /* prefetchabt_fixup    */
484
485         xscalec3_context_switch,        /* context_switch       */
486
487         xscale_setup                    /* cpu setup            */
488 };
489 #endif /* CPU_XSCALE_81342 */
490
491
492 #if defined(CPU_FA526)
493 struct cpu_functions fa526_cpufuncs = {
494         /* CPU functions */
495
496         cpufunc_id,                     /* id                   */
497         cpufunc_nullop,                 /* cpwait               */
498
499         /* MMU functions */
500
501         cpufunc_control,                /* control              */
502         cpufunc_domains,                /* domain               */
503         fa526_setttb,                   /* setttb               */
504         cpufunc_faultstatus,            /* faultstatus          */
505         cpufunc_faultaddress,           /* faultaddress         */
506
507         /* TLB functions */
508
509         armv4_tlb_flushID,              /* tlb_flushID          */
510         fa526_tlb_flushID_SE,           /* tlb_flushID_SE       */
511         armv4_tlb_flushI,               /* tlb_flushI           */
512         fa526_tlb_flushI_SE,            /* tlb_flushI_SE        */
513         armv4_tlb_flushD,               /* tlb_flushD           */
514         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
515
516         /* Cache operations */
517
518         fa526_icache_sync_all,          /* icache_sync_all      */
519         fa526_icache_sync_range,        /* icache_sync_range    */
520
521         fa526_dcache_wbinv_all,         /* dcache_wbinv_all     */
522         fa526_dcache_wbinv_range,       /* dcache_wbinv_range   */
523         fa526_dcache_inv_range,         /* dcache_inv_range     */
524         fa526_dcache_wb_range,          /* dcache_wb_range      */
525
526         armv4_idcache_inv_all,          /* idcache_inv_all      */
527         fa526_idcache_wbinv_all,        /* idcache_wbinv_all    */
528         fa526_idcache_wbinv_range,      /* idcache_wbinv_range  */
529         cpufunc_nullop,                 /* l2cache_wbinv_all    */
530         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
531         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
532         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
533         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
534
535         /* Other functions */
536
537         fa526_flush_prefetchbuf,        /* flush_prefetchbuf    */
538         armv4_drain_writebuf,           /* drain_writebuf       */
539         cpufunc_nullop,                 /* flush_brnchtgt_C     */
540         fa526_flush_brnchtgt_E,         /* flush_brnchtgt_E     */
541
542         fa526_cpu_sleep,                /* sleep                */
543
544         /* Soft functions */
545
546         cpufunc_null_fixup,             /* dataabt_fixup        */
547         cpufunc_null_fixup,             /* prefetchabt_fixup    */
548
549         fa526_context_switch,           /* context_switch       */
550
551         fa526_setup                     /* cpu setup            */
552 };
553 #endif  /* CPU_FA526 */
554
555 #if defined(CPU_ARM1176)
556 struct cpu_functions arm1176_cpufuncs = {
557         /* CPU functions */
558         
559         cpufunc_id,                     /* id                   */
560         cpufunc_nullop,                 /* cpwait               */
561         
562         /* MMU functions */
563         
564         cpufunc_control,                /* control              */
565         cpufunc_domains,                /* Domain               */
566         arm11x6_setttb,                 /* Setttb               */
567         cpufunc_faultstatus,            /* Faultstatus          */
568         cpufunc_faultaddress,           /* Faultaddress         */
569         
570         /* TLB functions */
571         
572         arm11_tlb_flushID,              /* tlb_flushID          */
573         arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
574         arm11_tlb_flushI,               /* tlb_flushI           */
575         arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
576         arm11_tlb_flushD,               /* tlb_flushD           */
577         arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
578         
579         /* Cache operations */
580         
581         arm11x6_icache_sync_all,        /* icache_sync_all      */
582         arm11x6_icache_sync_range,      /* icache_sync_range    */
583         
584         arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
585         armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
586         armv6_dcache_inv_range,         /* dcache_inv_range     */
587         armv6_dcache_wb_range,          /* dcache_wb_range      */
588         
589         armv6_idcache_inv_all,          /* idcache_inv_all      */
590         arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
591         arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
592         
593         (void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
594         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
595         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
596         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
597         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
598         
599         /* Other functions */
600         
601         arm11x6_flush_prefetchbuf,      /* flush_prefetchbuf    */
602         arm11_drain_writebuf,           /* drain_writebuf       */
603         cpufunc_nullop,                 /* flush_brnchtgt_C     */
604         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
605         
606         arm11x6_sleep,                  /* sleep                */
607         
608         /* Soft functions */
609         
610         cpufunc_null_fixup,             /* dataabt_fixup        */
611         cpufunc_null_fixup,             /* prefetchabt_fixup    */
612         
613         arm11_context_switch,           /* context_switch       */
614         
615         arm11x6_setup                   /* cpu setup            */
616 };
617 #endif /*CPU_ARM1176 */
618
619 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
620 struct cpu_functions cortexa_cpufuncs = {
621         /* CPU functions */
622         
623         cpufunc_id,                     /* id                   */
624         cpufunc_nullop,                 /* cpwait               */
625         
626         /* MMU functions */
627         
628         cpufunc_control,                /* control              */
629         cpufunc_domains,                /* Domain               */
630         armv7_setttb,                   /* Setttb               */
631         cpufunc_faultstatus,            /* Faultstatus          */
632         cpufunc_faultaddress,           /* Faultaddress         */
633         
634         /* 
635          * TLB functions.  ARMv7 does all TLB ops based on a unified TLB model
636          * whether the hardware implements separate I+D or not, so we use the
637          * same 'ID' functions for all 3 variations.
638          */
639         
640         armv7_tlb_flushID,              /* tlb_flushID          */
641         armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
642         armv7_tlb_flushID,              /* tlb_flushI           */
643         armv7_tlb_flushID_SE,           /* tlb_flushI_SE        */
644         armv7_tlb_flushID,              /* tlb_flushD           */
645         armv7_tlb_flushID_SE,           /* tlb_flushD_SE        */
646         
647         /* Cache operations */
648         
649         armv7_icache_sync_all,          /* icache_sync_all      */
650         armv7_icache_sync_range,        /* icache_sync_range    */
651         
652         armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
653         armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
654         armv7_dcache_inv_range,         /* dcache_inv_range     */
655         armv7_dcache_wb_range,          /* dcache_wb_range      */
656         
657         armv7_idcache_inv_all,          /* idcache_inv_all      */
658         armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
659         armv7_idcache_wbinv_range,      /* idcache_wbinv_range  */
660         
661         /* 
662          * Note: For CPUs using the PL310 the L2 ops are filled in when the
663          * L2 cache controller is actually enabled.
664          */
665         cpufunc_nullop,                 /* l2cache_wbinv_all    */
666         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
667         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
668         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
669         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
670         
671         /* Other functions */
672         
673         cpufunc_nullop,                 /* flush_prefetchbuf    */
674         armv7_drain_writebuf,           /* drain_writebuf       */
675         cpufunc_nullop,                 /* flush_brnchtgt_C     */
676         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
677         
678         armv7_sleep,                    /* sleep                */
679         
680         /* Soft functions */
681         
682         cpufunc_null_fixup,             /* dataabt_fixup        */
683         cpufunc_null_fixup,             /* prefetchabt_fixup    */
684         
685         armv7_context_switch,           /* context_switch       */
686         
687         cortexa_setup                     /* cpu setup            */
688 };
689 #endif /* CPU_CORTEXA */
690
691 /*
692  * Global constants also used by locore.s
693  */
694
695 struct cpu_functions cpufuncs;
696 u_int cputype;
697 u_int cpu_reset_needs_v4_MMU_disable;   /* flag used in locore.s */
698
699 #if defined(CPU_ARM9) ||        \
700   defined (CPU_ARM9E) ||        \
701   defined(CPU_ARM1176) || defined(CPU_XSCALE_80321) ||          \
702   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||           \
703   defined(CPU_FA526) || defined(CPU_MV_PJ4B) ||                 \
704   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
705   defined(CPU_CORTEXA) || defined(CPU_KRAIT)
706
707 /* Global cache line sizes, use 32 as default */
708 int     arm_dcache_min_line_size = 32;
709 int     arm_icache_min_line_size = 32;
710 int     arm_idcache_min_line_size = 32;
711
712 static void get_cachetype_cp15(void);
713
714 /* Additional cache information local to this file.  Log2 of some of the
715    above numbers.  */
716 static int      arm_dcache_l2_nsets;
717 static int      arm_dcache_l2_assoc;
718 static int      arm_dcache_l2_linesize;
719
720 static void
721 get_cachetype_cp15()
722 {
723         u_int ctype, isize, dsize, cpuid;
724         u_int clevel, csize, i, sel;
725         u_int multiplier;
726         u_char type;
727
728         __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
729                 : "=r" (ctype));
730
731         cpuid = cpufunc_id();
732         /*
733          * ...and thus spake the ARM ARM:
734          *
735          * If an <opcode2> value corresponding to an unimplemented or
736          * reserved ID register is encountered, the System Control
737          * processor returns the value of the main ID register.
738          */
739         if (ctype == cpuid)
740                 goto out;
741
742         if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
743                 /* Resolve minimal cache line sizes */
744                 arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
745                 arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
746                 arm_idcache_min_line_size =
747                     min(arm_icache_min_line_size, arm_dcache_min_line_size);
748
749                 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
750                     : "=r" (clevel));
751                 arm_cache_level = clevel;
752                 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
753                 i = 0;
754                 while ((type = (clevel & 0x7)) && i < 7) {
755                         if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
756                             type == CACHE_SEP_CACHE) {
757                                 sel = i << 1;
758                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
759                                     : : "r" (sel));
760                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
761                                     : "=r" (csize));
762                                 arm_cache_type[sel] = csize;
763                                 arm_dcache_align = 1 << 
764                                     (CPUV7_CT_xSIZE_LEN(csize) + 4);
765                                 arm_dcache_align_mask = arm_dcache_align - 1;
766                         }
767                         if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
768                                 sel = (i << 1) | 1;
769                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
770                                     : : "r" (sel));
771                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
772                                     : "=r" (csize));
773                                 arm_cache_type[sel] = csize;
774                         }
775                         i++;
776                         clevel >>= 3;
777                 }
778         } else {
779                 if ((ctype & CPU_CT_S) == 0)
780                         arm_pcache_unified = 1;
781
782                 /*
783                  * If you want to know how this code works, go read the ARM ARM.
784                  */
785
786                 arm_pcache_type = CPU_CT_CTYPE(ctype);
787
788                 if (arm_pcache_unified == 0) {
789                         isize = CPU_CT_ISIZE(ctype);
790                         multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
791                         arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
792                         if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
793                                 if (isize & CPU_CT_xSIZE_M)
794                                         arm_picache_line_size = 0; /* not present */
795                                 else
796                                         arm_picache_ways = 1;
797                         } else {
798                                 arm_picache_ways = multiplier <<
799                                     (CPU_CT_xSIZE_ASSOC(isize) - 1);
800                         }
801                         arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
802                 }
803
804                 dsize = CPU_CT_DSIZE(ctype);
805                 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
806                 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
807                 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
808                         if (dsize & CPU_CT_xSIZE_M)
809                                 arm_pdcache_line_size = 0; /* not present */
810                         else
811                                 arm_pdcache_ways = 1;
812                 } else {
813                         arm_pdcache_ways = multiplier <<
814                             (CPU_CT_xSIZE_ASSOC(dsize) - 1);
815                 }
816                 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
817
818                 arm_dcache_align = arm_pdcache_line_size;
819
820                 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
821                 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
822                 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
823                     CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
824
825         out:
826                 arm_dcache_align_mask = arm_dcache_align - 1;
827         }
828 }
829 #endif /* ARM9 || XSCALE */
830
831 /*
832  * Cannot panic here as we may not have a console yet ...
833  */
834
835 int
836 set_cpufuncs()
837 {
838         cputype = cpufunc_id();
839         cputype &= CPU_ID_CPU_MASK;
840
841         /*
842          * NOTE: cpu_do_powersave defaults to off.  If we encounter a
843          * CPU type where we want to use it by default, then we set it.
844          */
845
846 #ifdef CPU_ARM9
847         if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
848              (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
849             (cputype & 0x0000f000) == 0x00009000) {
850                 cpufuncs = arm9_cpufuncs;
851                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
852                 get_cachetype_cp15();
853                 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
854                 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
855                     arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
856                 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
857                 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
858                 pmap_pte_init_generic();
859                 goto out;
860         }
861 #endif /* CPU_ARM9 */
862 #if defined(CPU_ARM9E)
863         if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
864             cputype == CPU_ID_MV88FR571_41) {
865                 uint32_t sheeva_ctrl;
866
867                 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
868                     MV_L2_ENABLE);
869                 /*
870                  * Workaround for Marvell MV78100 CPU: Cache prefetch
871                  * mechanism may affect the cache coherency validity,
872                  * so it needs to be disabled.
873                  *
874                  * Refer to errata document MV-S501058-00C.pdf (p. 3.1
875                  * L2 Prefetching Mechanism) for details.
876                  */
877                 if (cputype == CPU_ID_MV88FR571_VD ||
878                     cputype == CPU_ID_MV88FR571_41)
879                         sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
880
881                 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
882
883                 cpufuncs = sheeva_cpufuncs;
884                 get_cachetype_cp15();
885                 pmap_pte_init_generic();
886                 goto out;
887         } else if (cputype == CPU_ID_ARM926EJS) {
888                 cpufuncs = armv5_ec_cpufuncs;
889                 get_cachetype_cp15();
890                 pmap_pte_init_generic();
891                 goto out;
892         }
893 #endif /* CPU_ARM9E */
894 #if defined(CPU_ARM1176)
895         if (cputype == CPU_ID_ARM1176JZS) {
896                 cpufuncs = arm1176_cpufuncs;
897                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
898                 get_cachetype_cp15();
899
900                 pmap_pte_init_mmu_v6();
901
902                 goto out;
903         }
904 #endif /* CPU_ARM1176 */
905 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
906         if (cputype == CPU_ID_CORTEXA5 ||
907             cputype == CPU_ID_CORTEXA7 ||
908             cputype == CPU_ID_CORTEXA8R1 ||
909             cputype == CPU_ID_CORTEXA8R2 ||
910             cputype == CPU_ID_CORTEXA8R3 ||
911             cputype == CPU_ID_CORTEXA9R1 ||
912             cputype == CPU_ID_CORTEXA9R2 ||
913             cputype == CPU_ID_CORTEXA9R3 ||
914             cputype == CPU_ID_CORTEXA12R0 ||
915             cputype == CPU_ID_CORTEXA15R0 ||
916             cputype == CPU_ID_CORTEXA15R1 ||
917             cputype == CPU_ID_CORTEXA15R2 ||
918             cputype == CPU_ID_CORTEXA15R3 ||
919             cputype == CPU_ID_KRAIT ) {
920                 cpufuncs = cortexa_cpufuncs;
921                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
922                 get_cachetype_cp15();
923                 
924                 pmap_pte_init_mmu_v6();
925                 /* Use powersave on this CPU. */
926                 cpu_do_powersave = 1;
927                 goto out;
928         }
929 #endif /* CPU_CORTEXA */
930                 
931 #if defined(CPU_MV_PJ4B)
932         if (cputype == CPU_ID_MV88SV581X_V7 ||
933             cputype == CPU_ID_MV88SV584X_V7 ||
934             cputype == CPU_ID_ARM_88SV581X_V7) {
935                 cpufuncs = pj4bv7_cpufuncs;
936                 get_cachetype_cp15();
937                 pmap_pte_init_mmu_v6();
938                 goto out;
939         }
940 #endif /* CPU_MV_PJ4B */
941
942 #if defined(CPU_FA526)
943         if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
944                 cpufuncs = fa526_cpufuncs;
945                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
946                 get_cachetype_cp15();
947                 pmap_pte_init_generic();
948
949                 /* Use powersave on this CPU. */
950                 cpu_do_powersave = 1;
951
952                 goto out;
953         }
954 #endif  /* CPU_FA526 */
955
956 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
957         if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
958             cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
959             cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
960                 cpufuncs = xscale_cpufuncs;
961                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
962                 get_cachetype_cp15();
963                 pmap_pte_init_xscale();
964                 goto out;
965         }
966 #endif /* CPU_XSCALE_80321 */
967
968 #if defined(CPU_XSCALE_81342)
969         if (cputype == CPU_ID_81342) {
970                 cpufuncs = xscalec3_cpufuncs;
971                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
972                 get_cachetype_cp15();
973                 pmap_pte_init_xscale();
974                 goto out;
975         }
976 #endif /* CPU_XSCALE_81342 */
977 #ifdef CPU_XSCALE_PXA2X0
978         /* ignore core revision to test PXA2xx CPUs */
979         if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
980             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
981             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
982
983                 cpufuncs = xscale_cpufuncs;
984                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
985                 get_cachetype_cp15();
986                 pmap_pte_init_xscale();
987
988                 /* Use powersave on this CPU. */
989                 cpu_do_powersave = 1;
990
991                 goto out;
992         }
993 #endif /* CPU_XSCALE_PXA2X0 */
994 #ifdef CPU_XSCALE_IXP425
995         if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
996             cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
997
998                 cpufuncs = xscale_cpufuncs;
999                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
1000                 get_cachetype_cp15();
1001                 pmap_pte_init_xscale();
1002
1003                 goto out;
1004         }
1005 #endif /* CPU_XSCALE_IXP425 */
1006         /*
1007          * Bzzzz. And the answer was ...
1008          */
1009         panic("No support for this CPU type (%08x) in kernel", cputype);
1010         return(ARCHITECTURE_NOT_PRESENT);
1011 out:
1012         uma_set_align(arm_dcache_align_mask);
1013         return (0);
1014 }
1015
1016 /*
1017  * Fixup routines for data and prefetch aborts.
1018  *
1019  * Several compile time symbols are used
1020  *
1021  * DEBUG_FAULT_CORRECTION - Print debugging information during the
1022  * correction of registers after a fault.
1023  */
1024
1025
1026 /*
1027  * Null abort fixup routine.
1028  * For use when no fixup is required.
1029  */
1030 int
1031 cpufunc_null_fixup(arg)
1032         void *arg;
1033 {
1034         return(ABORT_FIXUP_OK);
1035 }
1036
1037 /*
1038  * CPU Setup code
1039  */
1040
1041 #ifdef CPU_ARM9
1042 void
1043 arm9_setup(void)
1044 {
1045         int cpuctrl, cpuctrlmask;
1046
1047         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1048             | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1049             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1050             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1051             CPU_CONTROL_ROUNDROBIN;
1052         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1053                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1054                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1055                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1056                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1057                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1058                  | CPU_CONTROL_ROUNDROBIN;
1059
1060 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1061         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1062 #endif
1063
1064 #ifdef __ARMEB__
1065         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1066 #endif
1067         if (vector_page == ARM_VECTORS_HIGH)
1068                 cpuctrl |= CPU_CONTROL_VECRELOC;
1069
1070         /* Clear out the cache */
1071         cpu_idcache_wbinv_all();
1072
1073         /* Set the control register */
1074         cpu_control(cpuctrlmask, cpuctrl);
1075         ctrl = cpuctrl;
1076
1077 }
1078 #endif  /* CPU_ARM9 */
1079
1080 #if defined(CPU_ARM9E)
1081 void
1082 arm10_setup(void)
1083 {
1084         int cpuctrl, cpuctrlmask;
1085
1086         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1087             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1088             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1089         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1090             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1091             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1092             | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1093             | CPU_CONTROL_BPRD_ENABLE
1094             | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1095
1096 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1097         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1098 #endif
1099
1100 #ifdef __ARMEB__
1101         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1102 #endif
1103
1104         /* Clear out the cache */
1105         cpu_idcache_wbinv_all();
1106
1107         /* Now really make sure they are clean.  */
1108         __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1109
1110         if (vector_page == ARM_VECTORS_HIGH)
1111                 cpuctrl |= CPU_CONTROL_VECRELOC;
1112
1113         /* Set the control register */
1114         ctrl = cpuctrl;
1115         cpu_control(0xffffffff, cpuctrl);
1116
1117         /* And again. */
1118         cpu_idcache_wbinv_all();
1119 }
1120 #endif  /* CPU_ARM9E || CPU_ARM10 */
1121
1122 #if defined(CPU_ARM1176) \
1123  || defined(CPU_MV_PJ4B) \
1124  || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1125 static __inline void
1126 cpu_scc_setup_ccnt(void)
1127 {
1128 /* This is how you give userland access to the CCNT and PMCn
1129  * registers.
1130  * BEWARE! This gives write access also, which may not be what
1131  * you want!
1132  */
1133 #ifdef _PMC_USER_READ_WRITE_
1134 #if defined(CPU_ARM1176)
1135         /* Use the Secure User and Non-secure Access Validation Control Register
1136          * to allow userland access
1137          */
1138         __asm volatile ("mcr    p15, 0, %0, c15, c9, 0\n\t"
1139                         :
1140                         : "r"(0x00000001));
1141 #else
1142         /* Set PMUSERENR[0] to allow userland access */
1143         __asm volatile ("mcr    p15, 0, %0, c9, c14, 0\n\t"
1144                         :
1145                         : "r"(0x00000001));
1146 #endif
1147 #endif
1148 #if defined(CPU_ARM1176)
1149         /* Set PMCR[2,0] to enable counters and reset CCNT */
1150         __asm volatile ("mcr    p15, 0, %0, c15, c12, 0\n\t"
1151                         :
1152                         : "r"(0x00000005));
1153 #else
1154         /* Set up the PMCCNTR register as a cyclecounter:
1155          * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
1156          * Set PMCR[2,0] to enable counters and reset CCNT
1157          * Set PMCNTENSET to 0x80000000 to enable CCNT */
1158         __asm volatile ("mcr    p15, 0, %0, c9, c14, 2\n\t"
1159                         "mcr    p15, 0, %1, c9, c12, 0\n\t"
1160                         "mcr    p15, 0, %2, c9, c12, 1\n\t"
1161                         :
1162                         : "r"(0xFFFFFFFF),
1163                           "r"(0x00000005),
1164                           "r"(0x80000000));
1165 #endif
1166 }
1167 #endif
1168
1169 #if defined(CPU_ARM1176)
1170 void
1171 arm11x6_setup(void)
1172 {
1173         int cpuctrl, cpuctrl_wax;
1174         uint32_t auxctrl, auxctrl_wax;
1175         uint32_t tmp, tmp2;
1176         uint32_t sbz=0;
1177         uint32_t cpuid;
1178
1179         cpuid = cpufunc_id();
1180
1181         cpuctrl =
1182                 CPU_CONTROL_MMU_ENABLE  |
1183                 CPU_CONTROL_DC_ENABLE   |
1184                 CPU_CONTROL_WBUF_ENABLE |
1185                 CPU_CONTROL_32BP_ENABLE |
1186                 CPU_CONTROL_32BD_ENABLE |
1187                 CPU_CONTROL_LABT_ENABLE |
1188                 CPU_CONTROL_SYST_ENABLE |
1189                 CPU_CONTROL_IC_ENABLE   |
1190                 CPU_CONTROL_UNAL_ENABLE;
1191
1192         /*
1193          * "write as existing" bits
1194          * inverse of this is mask
1195          */
1196         cpuctrl_wax =
1197                 (3 << 30) | /* SBZ */
1198                 (1 << 29) | /* FA */
1199                 (1 << 28) | /* TR */
1200                 (3 << 26) | /* SBZ */ 
1201                 (3 << 19) | /* SBZ */
1202                 (1 << 17);  /* SBZ */
1203
1204         cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1205         cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1206
1207 #ifdef __ARMEB__
1208         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1209 #endif
1210
1211         if (vector_page == ARM_VECTORS_HIGH)
1212                 cpuctrl |= CPU_CONTROL_VECRELOC;
1213
1214         auxctrl = 0;
1215         auxctrl_wax = ~0;
1216
1217         /*
1218          * Enable an errata workaround
1219          */
1220         if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
1221                 auxctrl = ARM1176_AUXCTL_PHD;
1222                 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
1223         }
1224
1225         /* Clear out the cache */
1226         cpu_idcache_wbinv_all();
1227
1228         /* Now really make sure they are clean.  */
1229         __asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
1230
1231         /* Allow detection code to find the VFP if it's fitted.  */
1232         __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
1233
1234         /* Set the control register */
1235         ctrl = cpuctrl;
1236         cpu_control(~cpuctrl_wax, cpuctrl);
1237
1238         __asm volatile ("mrc    p15, 0, %0, c1, c0, 1\n\t"
1239                         "and    %1, %0, %2\n\t"
1240                         "orr    %1, %1, %3\n\t"
1241                         "teq    %0, %1\n\t"
1242                         "mcrne  p15, 0, %1, c1, c0, 1\n\t"
1243                         : "=r"(tmp), "=r"(tmp2) :
1244                           "r"(auxctrl_wax), "r"(auxctrl));
1245
1246         /* And again. */
1247         cpu_idcache_wbinv_all();
1248
1249         cpu_scc_setup_ccnt();
1250 }
1251 #endif  /* CPU_ARM1176 */
1252
1253 #ifdef CPU_MV_PJ4B
1254 void
1255 pj4bv7_setup(void)
1256 {
1257         int cpuctrl;
1258
1259         pj4b_config();
1260
1261         cpuctrl = CPU_CONTROL_MMU_ENABLE;
1262 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1263         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1264 #endif
1265         cpuctrl |= CPU_CONTROL_DC_ENABLE;
1266         cpuctrl |= (0xf << 3);
1267         cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1268         cpuctrl |= CPU_CONTROL_IC_ENABLE;
1269         if (vector_page == ARM_VECTORS_HIGH)
1270                 cpuctrl |= CPU_CONTROL_VECRELOC;
1271         cpuctrl |= (0x5 << 16) | (1 < 22);
1272         cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1273
1274         /* Clear out the cache */
1275         cpu_idcache_wbinv_all();
1276
1277         /* Set the control register */
1278         ctrl = cpuctrl;
1279         cpu_control(0xFFFFFFFF, cpuctrl);
1280
1281         /* And again. */
1282         cpu_idcache_wbinv_all();
1283
1284         cpu_scc_setup_ccnt();
1285 }
1286 #endif /* CPU_MV_PJ4B */
1287
1288 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1289
1290 void
1291 cortexa_setup(void)
1292 {
1293         int cpuctrl, cpuctrlmask;
1294         
1295         cpuctrlmask = CPU_CONTROL_MMU_ENABLE |     /* MMU enable         [0] */
1296             CPU_CONTROL_AFLT_ENABLE |    /* Alignment fault    [1] */
1297             CPU_CONTROL_DC_ENABLE |      /* DCache enable      [2] */
1298             CPU_CONTROL_BPRD_ENABLE |    /* Branch prediction [11] */
1299             CPU_CONTROL_IC_ENABLE |      /* ICache enable     [12] */
1300             CPU_CONTROL_VECRELOC;        /* Vector relocation [13] */
1301         
1302         cpuctrl = CPU_CONTROL_MMU_ENABLE |
1303             CPU_CONTROL_IC_ENABLE |
1304             CPU_CONTROL_DC_ENABLE |
1305             CPU_CONTROL_BPRD_ENABLE;
1306         
1307 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1308         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1309 #endif
1310         
1311         /* Switch to big endian */
1312 #ifdef __ARMEB__
1313         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1314 #endif
1315         
1316         /* Check if the vector page is at the high address (0xffff0000) */
1317         if (vector_page == ARM_VECTORS_HIGH)
1318                 cpuctrl |= CPU_CONTROL_VECRELOC;
1319         
1320         /* Clear out the cache */
1321         cpu_idcache_wbinv_all();
1322         
1323         /* Set the control register */
1324         ctrl = cpuctrl;
1325         cpu_control(cpuctrlmask, cpuctrl);
1326         
1327         /* And again. */
1328         cpu_idcache_wbinv_all();
1329 #ifdef SMP
1330         armv7_auxctrl((1 << 6) | (1 << 0), (1 << 6) | (1 << 0)); /* Enable SMP + TLB broadcasting  */
1331 #endif
1332
1333         cpu_scc_setup_ccnt();
1334 }
1335 #endif  /* CPU_CORTEXA */
1336
1337 #if defined(CPU_FA526)
1338 void
1339 fa526_setup(void)
1340 {
1341         int cpuctrl, cpuctrlmask;
1342
1343         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1344                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1345                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1346                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1347                 | CPU_CONTROL_BPRD_ENABLE;
1348         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1349                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1350                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1351                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1352                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1353                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1354                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1355
1356 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1357         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1358 #endif
1359
1360 #ifdef __ARMEB__
1361         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1362 #endif
1363
1364         if (vector_page == ARM_VECTORS_HIGH)
1365                 cpuctrl |= CPU_CONTROL_VECRELOC;
1366
1367         /* Clear out the cache */
1368         cpu_idcache_wbinv_all();
1369
1370         /* Set the control register */
1371         ctrl = cpuctrl;
1372         cpu_control(0xffffffff, cpuctrl);
1373 }
1374 #endif  /* CPU_FA526 */
1375
1376 #if defined(CPU_XSCALE_80321) || \
1377   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1378   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
1379 void
1380 xscale_setup(void)
1381 {
1382         uint32_t auxctl;
1383         int cpuctrl, cpuctrlmask;
1384
1385         /*
1386          * The XScale Write Buffer is always enabled.  Our option
1387          * is to enable/disable coalescing.  Note that bits 6:3
1388          * must always be enabled.
1389          */
1390
1391         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1392                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1393                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1394                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1395                  | CPU_CONTROL_BPRD_ENABLE;
1396         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1397                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1398                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1399                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1400                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1401                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1402                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
1403                  CPU_CONTROL_L2_ENABLE;
1404
1405 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1406         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1407 #endif
1408
1409 #ifdef __ARMEB__
1410         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1411 #endif
1412
1413         if (vector_page == ARM_VECTORS_HIGH)
1414                 cpuctrl |= CPU_CONTROL_VECRELOC;
1415 #ifdef CPU_XSCALE_CORE3
1416         cpuctrl |= CPU_CONTROL_L2_ENABLE;
1417 #endif
1418
1419         /* Clear out the cache */
1420         cpu_idcache_wbinv_all();
1421
1422         /*
1423          * Set the control register.  Note that bits 6:3 must always
1424          * be set to 1.
1425          */
1426         ctrl = cpuctrl;
1427 /*      cpu_control(cpuctrlmask, cpuctrl);*/
1428         cpu_control(0xffffffff, cpuctrl);
1429
1430         /* Make sure write coalescing is turned on */
1431         __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1432                 : "=r" (auxctl));
1433 #ifdef XSCALE_NO_COALESCE_WRITES
1434         auxctl |= XSCALE_AUXCTL_K;
1435 #else
1436         auxctl &= ~XSCALE_AUXCTL_K;
1437 #endif
1438 #ifdef CPU_XSCALE_CORE3
1439         auxctl |= XSCALE_AUXCTL_LLR;
1440         auxctl |= XSCALE_AUXCTL_MD_MASK;
1441 #endif
1442         __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1443                 : : "r" (auxctl));
1444 }
1445 #endif  /* CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
1446            CPU_XSCALE_80219 */