]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/arm/arm/cpufunc.c
MFC 264990, 264994, 265020, 265025:
[FreeBSD/stable/10.git] / sys / arm / arm / cpufunc.c
1 /*      $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
2
3 /*-
4  * arm9 support code Copyright (C) 2001 ARM Ltd
5  * Copyright (c) 1997 Mark Brinicombe.
6  * Copyright (c) 1997 Causality Limited
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by Causality Limited.
20  * 4. The name of Causality Limited may not be used to endorse or promote
21  *    products derived from this software without specific prior written
22  *    permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  * RiscBSD kernel project
37  *
38  * cpufuncs.c
39  *
40  * C functions for supporting CPU / MMU / TLB specific operations.
41  *
42  * Created      : 30/01/97
43  */
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/bus.h>
52 #include <machine/bus.h>
53 #include <machine/cpu.h>
54 #include <machine/disassem.h>
55
56 #include <vm/vm.h>
57 #include <vm/pmap.h>
58 #include <vm/uma.h>
59
60 #include <machine/cpuconf.h>
61 #include <machine/cpufunc.h>
62 #include <machine/bootconfig.h>
63
64 #ifdef CPU_XSCALE_80200
65 #include <arm/xscale/i80200/i80200reg.h>
66 #include <arm/xscale/i80200/i80200var.h>
67 #endif
68
69 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
70 #include <arm/xscale/i80321/i80321reg.h>
71 #include <arm/xscale/i80321/i80321var.h>
72 #endif
73
74 /*
75  * Some definitions in i81342reg.h clash with i80321reg.h.
76  * This only happens for the LINT kernel. As it happens,
77  * we don't need anything from i81342reg.h that we already
78  * got from somewhere else during a LINT compile.
79  */
80 #if defined(CPU_XSCALE_81342) && !defined(COMPILING_LINT)
81 #include <arm/xscale/i8134x/i81342reg.h>
82 #endif
83
84 #ifdef CPU_XSCALE_IXP425
85 #include <arm/xscale/ixp425/ixp425reg.h>
86 #include <arm/xscale/ixp425/ixp425var.h>
87 #endif
88
89 /* PRIMARY CACHE VARIABLES */
90 int     arm_picache_size;
91 int     arm_picache_line_size;
92 int     arm_picache_ways;
93
94 int     arm_pdcache_size;       /* and unified */
95 int     arm_pdcache_line_size;
96 int     arm_pdcache_ways;
97
98 int     arm_pcache_type;
99 int     arm_pcache_unified;
100
101 int     arm_dcache_align;
102 int     arm_dcache_align_mask;
103
104 u_int   arm_cache_level;
105 u_int   arm_cache_type[14];
106 u_int   arm_cache_loc;
107
108 /* 1 == use cpu_sleep(), 0 == don't */
109 int cpu_do_powersave;
110 int ctrl;
111
112 #ifdef CPU_ARM9
113 struct cpu_functions arm9_cpufuncs = {
114         /* CPU functions */
115
116         cpufunc_id,                     /* id                   */
117         cpufunc_nullop,                 /* cpwait               */
118
119         /* MMU functions */
120
121         cpufunc_control,                /* control              */
122         cpufunc_domains,                /* Domain               */
123         arm9_setttb,                    /* Setttb               */
124         cpufunc_faultstatus,            /* Faultstatus          */
125         cpufunc_faultaddress,           /* Faultaddress         */
126
127         /* TLB functions */
128
129         armv4_tlb_flushID,              /* tlb_flushID          */
130         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
131         armv4_tlb_flushI,               /* tlb_flushI           */
132         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
133         armv4_tlb_flushD,               /* tlb_flushD           */
134         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
135
136         /* Cache operations */
137
138         arm9_icache_sync_all,           /* icache_sync_all      */
139         arm9_icache_sync_range,         /* icache_sync_range    */
140
141         arm9_dcache_wbinv_all,          /* dcache_wbinv_all     */
142         arm9_dcache_wbinv_range,        /* dcache_wbinv_range   */
143         arm9_dcache_inv_range,          /* dcache_inv_range     */
144         arm9_dcache_wb_range,           /* dcache_wb_range      */
145
146         armv4_idcache_inv_all,          /* idcache_inv_all      */
147         arm9_idcache_wbinv_all,         /* idcache_wbinv_all    */
148         arm9_idcache_wbinv_range,       /* idcache_wbinv_range  */
149         cpufunc_nullop,                 /* l2cache_wbinv_all    */
150         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
151         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
152         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
153
154         /* Other functions */
155
156         cpufunc_nullop,                 /* flush_prefetchbuf    */
157         armv4_drain_writebuf,           /* drain_writebuf       */
158         cpufunc_nullop,                 /* flush_brnchtgt_C     */
159         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
160
161         (void *)cpufunc_nullop,         /* sleep                */
162
163         /* Soft functions */
164
165         cpufunc_null_fixup,             /* dataabt_fixup        */
166         cpufunc_null_fixup,             /* prefetchabt_fixup    */
167
168         arm9_context_switch,            /* context_switch       */
169
170         arm9_setup                      /* cpu setup            */
171
172 };
173 #endif /* CPU_ARM9 */
174
175 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
176 struct cpu_functions armv5_ec_cpufuncs = {
177         /* CPU functions */
178
179         cpufunc_id,                     /* id                   */
180         cpufunc_nullop,                 /* cpwait               */
181
182         /* MMU functions */
183
184         cpufunc_control,                /* control              */
185         cpufunc_domains,                /* Domain               */
186         armv5_ec_setttb,                /* Setttb               */
187         cpufunc_faultstatus,            /* Faultstatus          */
188         cpufunc_faultaddress,           /* Faultaddress         */
189
190         /* TLB functions */
191
192         armv4_tlb_flushID,              /* tlb_flushID          */
193         arm10_tlb_flushID_SE,           /* tlb_flushID_SE       */
194         armv4_tlb_flushI,               /* tlb_flushI           */
195         arm10_tlb_flushI_SE,            /* tlb_flushI_SE        */
196         armv4_tlb_flushD,               /* tlb_flushD           */
197         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
198
199         /* Cache operations */
200
201         armv5_ec_icache_sync_all,       /* icache_sync_all      */
202         armv5_ec_icache_sync_range,     /* icache_sync_range    */
203
204         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
205         armv5_ec_dcache_wbinv_range,    /* dcache_wbinv_range   */
206         armv5_ec_dcache_inv_range,      /* dcache_inv_range     */
207         armv5_ec_dcache_wb_range,       /* dcache_wb_range      */
208
209         armv4_idcache_inv_all,          /* idcache_inv_all      */
210         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
211         armv5_ec_idcache_wbinv_range,   /* idcache_wbinv_range  */
212
213         cpufunc_nullop,                 /* l2cache_wbinv_all    */
214         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
215         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
216         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
217
218         /* Other functions */
219
220         cpufunc_nullop,                 /* flush_prefetchbuf    */
221         armv4_drain_writebuf,           /* drain_writebuf       */
222         cpufunc_nullop,                 /* flush_brnchtgt_C     */
223         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
224
225         (void *)cpufunc_nullop,         /* sleep                */
226
227         /* Soft functions */
228
229         cpufunc_null_fixup,             /* dataabt_fixup        */
230         cpufunc_null_fixup,             /* prefetchabt_fixup    */
231
232         arm10_context_switch,           /* context_switch       */
233
234         arm10_setup                     /* cpu setup            */
235
236 };
237
238 struct cpu_functions sheeva_cpufuncs = {
239         /* CPU functions */
240
241         cpufunc_id,                     /* id                   */
242         cpufunc_nullop,                 /* cpwait               */
243
244         /* MMU functions */
245
246         cpufunc_control,                /* control              */
247         cpufunc_domains,                /* Domain               */
248         sheeva_setttb,                  /* Setttb               */
249         cpufunc_faultstatus,            /* Faultstatus          */
250         cpufunc_faultaddress,           /* Faultaddress         */
251
252         /* TLB functions */
253
254         armv4_tlb_flushID,              /* tlb_flushID          */
255         arm10_tlb_flushID_SE,           /* tlb_flushID_SE       */
256         armv4_tlb_flushI,               /* tlb_flushI           */
257         arm10_tlb_flushI_SE,            /* tlb_flushI_SE        */
258         armv4_tlb_flushD,               /* tlb_flushD           */
259         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
260
261         /* Cache operations */
262
263         armv5_ec_icache_sync_all,       /* icache_sync_all      */
264         armv5_ec_icache_sync_range,     /* icache_sync_range    */
265
266         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
267         sheeva_dcache_wbinv_range,      /* dcache_wbinv_range   */
268         sheeva_dcache_inv_range,        /* dcache_inv_range     */
269         sheeva_dcache_wb_range,         /* dcache_wb_range      */
270
271         armv4_idcache_inv_all,          /* idcache_inv_all      */
272         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
273         sheeva_idcache_wbinv_range,     /* idcache_wbinv_all    */
274
275         sheeva_l2cache_wbinv_all,       /* l2cache_wbinv_all    */
276         sheeva_l2cache_wbinv_range,     /* l2cache_wbinv_range  */
277         sheeva_l2cache_inv_range,       /* l2cache_inv_range    */
278         sheeva_l2cache_wb_range,        /* l2cache_wb_range     */
279
280         /* Other functions */
281
282         cpufunc_nullop,                 /* flush_prefetchbuf    */
283         armv4_drain_writebuf,           /* drain_writebuf       */
284         cpufunc_nullop,                 /* flush_brnchtgt_C     */
285         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
286
287         sheeva_cpu_sleep,               /* sleep                */
288
289         /* Soft functions */
290
291         cpufunc_null_fixup,             /* dataabt_fixup        */
292         cpufunc_null_fixup,             /* prefetchabt_fixup    */
293
294         arm10_context_switch,           /* context_switch       */
295
296         arm10_setup                     /* cpu setup            */
297 };
298 #endif /* CPU_ARM9E || CPU_ARM10 */
299
300 #ifdef CPU_ARM10
301 struct cpu_functions arm10_cpufuncs = {
302         /* CPU functions */
303
304         cpufunc_id,                     /* id                   */
305         cpufunc_nullop,                 /* cpwait               */
306
307         /* MMU functions */
308
309         cpufunc_control,                /* control              */
310         cpufunc_domains,                /* Domain               */
311         arm10_setttb,                   /* Setttb               */
312         cpufunc_faultstatus,            /* Faultstatus          */
313         cpufunc_faultaddress,           /* Faultaddress         */
314
315         /* TLB functions */
316
317         armv4_tlb_flushID,              /* tlb_flushID          */
318         arm10_tlb_flushID_SE,           /* tlb_flushID_SE       */
319         armv4_tlb_flushI,               /* tlb_flushI           */
320         arm10_tlb_flushI_SE,            /* tlb_flushI_SE        */
321         armv4_tlb_flushD,               /* tlb_flushD           */
322         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
323
324         /* Cache operations */
325
326         arm10_icache_sync_all,          /* icache_sync_all      */
327         arm10_icache_sync_range,        /* icache_sync_range    */
328
329         arm10_dcache_wbinv_all,         /* dcache_wbinv_all     */
330         arm10_dcache_wbinv_range,       /* dcache_wbinv_range   */
331         arm10_dcache_inv_range,         /* dcache_inv_range     */
332         arm10_dcache_wb_range,          /* dcache_wb_range      */
333
334         armv4_idcache_inv_all,          /* idcache_inv_all      */
335         arm10_idcache_wbinv_all,        /* idcache_wbinv_all    */
336         arm10_idcache_wbinv_range,      /* idcache_wbinv_range  */
337         cpufunc_nullop,                 /* l2cache_wbinv_all    */
338         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
339         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
340         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
341
342         /* Other functions */
343
344         cpufunc_nullop,                 /* flush_prefetchbuf    */
345         armv4_drain_writebuf,           /* drain_writebuf       */
346         cpufunc_nullop,                 /* flush_brnchtgt_C     */
347         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
348
349         (void *)cpufunc_nullop,         /* sleep                */
350
351         /* Soft functions */
352
353         cpufunc_null_fixup,             /* dataabt_fixup        */
354         cpufunc_null_fixup,             /* prefetchabt_fixup    */
355
356         arm10_context_switch,           /* context_switch       */
357
358         arm10_setup                     /* cpu setup            */
359
360 };
361 #endif /* CPU_ARM10 */
362
363 #ifdef CPU_MV_PJ4B
364 struct cpu_functions pj4bv7_cpufuncs = {
365         /* CPU functions */
366
367         cpufunc_id,                     /* id                   */
368         arm11_drain_writebuf,           /* cpwait               */
369
370         /* MMU functions */
371
372         cpufunc_control,                /* control              */
373         cpufunc_domains,                /* Domain               */
374         pj4b_setttb,                    /* Setttb               */
375         cpufunc_faultstatus,            /* Faultstatus          */
376         cpufunc_faultaddress,           /* Faultaddress         */
377
378         /* TLB functions */
379
380         armv7_tlb_flushID,              /* tlb_flushID          */
381         armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
382         armv7_tlb_flushID,              /* tlb_flushI           */
383         armv7_tlb_flushID_SE,           /* tlb_flushI_SE        */
384         armv7_tlb_flushID,              /* tlb_flushD           */
385         armv7_tlb_flushID_SE,           /* tlb_flushD_SE        */
386
387         /* Cache operations */
388         armv7_idcache_wbinv_all,        /* icache_sync_all      */
389         armv7_icache_sync_range,        /* icache_sync_range    */
390
391         armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
392         armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
393         armv7_dcache_inv_range,         /* dcache_inv_range     */
394         armv7_dcache_wb_range,          /* dcache_wb_range      */
395
396         armv7_idcache_inv_all,          /* idcache_inv_all      */
397         armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
398         armv7_idcache_wbinv_range,      /* idcache_wbinv_all    */
399
400         (void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
401         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
402         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
403         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
404
405         /* Other functions */
406
407         pj4b_drain_readbuf,             /* flush_prefetchbuf    */
408         arm11_drain_writebuf,           /* drain_writebuf       */
409         pj4b_flush_brnchtgt_all,        /* flush_brnchtgt_C     */
410         pj4b_flush_brnchtgt_va,         /* flush_brnchtgt_E     */
411
412         (void *)cpufunc_nullop,         /* sleep                */
413
414         /* Soft functions */
415
416         cpufunc_null_fixup,             /* dataabt_fixup        */
417         cpufunc_null_fixup,             /* prefetchabt_fixup    */
418
419         arm11_context_switch,           /* context_switch       */
420
421         pj4bv7_setup                    /* cpu setup            */
422 };
423 #endif /* CPU_MV_PJ4B */
424
425 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
426   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
427   defined(CPU_XSCALE_80219)
428
429 struct cpu_functions xscale_cpufuncs = {
430         /* CPU functions */
431         
432         cpufunc_id,                     /* id                   */
433         xscale_cpwait,                  /* cpwait               */
434
435         /* MMU functions */
436
437         xscale_control,                 /* control              */
438         cpufunc_domains,                /* domain               */
439         xscale_setttb,                  /* setttb               */
440         cpufunc_faultstatus,            /* faultstatus          */
441         cpufunc_faultaddress,           /* faultaddress         */
442
443         /* TLB functions */
444
445         armv4_tlb_flushID,              /* tlb_flushID          */
446         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
447         armv4_tlb_flushI,               /* tlb_flushI           */
448         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
449         armv4_tlb_flushD,               /* tlb_flushD           */
450         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
451
452         /* Cache operations */
453
454         xscale_cache_syncI,             /* icache_sync_all      */
455         xscale_cache_syncI_rng,         /* icache_sync_range    */
456
457         xscale_cache_purgeD,            /* dcache_wbinv_all     */
458         xscale_cache_purgeD_rng,        /* dcache_wbinv_range   */
459         xscale_cache_flushD_rng,        /* dcache_inv_range     */
460         xscale_cache_cleanD_rng,        /* dcache_wb_range      */
461
462         xscale_cache_flushID,           /* idcache_inv_all      */
463         xscale_cache_purgeID,           /* idcache_wbinv_all    */
464         xscale_cache_purgeID_rng,       /* idcache_wbinv_range  */
465         cpufunc_nullop,                 /* l2cache_wbinv_all    */
466         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
467         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
468         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
469
470         /* Other functions */
471
472         cpufunc_nullop,                 /* flush_prefetchbuf    */
473         armv4_drain_writebuf,           /* drain_writebuf       */
474         cpufunc_nullop,                 /* flush_brnchtgt_C     */
475         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
476
477         xscale_cpu_sleep,               /* sleep                */
478
479         /* Soft functions */
480
481         cpufunc_null_fixup,             /* dataabt_fixup        */
482         cpufunc_null_fixup,             /* prefetchabt_fixup    */
483
484         xscale_context_switch,          /* context_switch       */
485
486         xscale_setup                    /* cpu setup            */
487 };
488 #endif
489 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
490    CPU_XSCALE_80219 */
491
492 #ifdef CPU_XSCALE_81342
493 struct cpu_functions xscalec3_cpufuncs = {
494         /* CPU functions */
495         
496         cpufunc_id,                     /* id                   */
497         xscale_cpwait,                  /* cpwait               */
498
499         /* MMU functions */
500
501         xscale_control,                 /* control              */
502         cpufunc_domains,                /* domain               */
503         xscalec3_setttb,                /* setttb               */
504         cpufunc_faultstatus,            /* faultstatus          */
505         cpufunc_faultaddress,           /* faultaddress         */
506
507         /* TLB functions */
508
509         armv4_tlb_flushID,              /* tlb_flushID          */
510         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
511         armv4_tlb_flushI,               /* tlb_flushI           */
512         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
513         armv4_tlb_flushD,               /* tlb_flushD           */
514         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
515
516         /* Cache operations */
517
518         xscalec3_cache_syncI,           /* icache_sync_all      */
519         xscalec3_cache_syncI_rng,       /* icache_sync_range    */
520
521         xscalec3_cache_purgeD,          /* dcache_wbinv_all     */
522         xscalec3_cache_purgeD_rng,      /* dcache_wbinv_range   */
523         xscale_cache_flushD_rng,        /* dcache_inv_range     */
524         xscalec3_cache_cleanD_rng,      /* dcache_wb_range      */
525
526         xscale_cache_flushID,           /* idcache_inv_all      */
527         xscalec3_cache_purgeID,         /* idcache_wbinv_all    */
528         xscalec3_cache_purgeID_rng,     /* idcache_wbinv_range  */
529         xscalec3_l2cache_purge,         /* l2cache_wbinv_all    */
530         xscalec3_l2cache_purge_rng,     /* l2cache_wbinv_range  */
531         xscalec3_l2cache_flush_rng,     /* l2cache_inv_range    */
532         xscalec3_l2cache_clean_rng,     /* l2cache_wb_range     */
533
534         /* Other functions */
535
536         cpufunc_nullop,                 /* flush_prefetchbuf    */
537         armv4_drain_writebuf,           /* drain_writebuf       */
538         cpufunc_nullop,                 /* flush_brnchtgt_C     */
539         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
540
541         xscale_cpu_sleep,               /* sleep                */
542
543         /* Soft functions */
544
545         cpufunc_null_fixup,             /* dataabt_fixup        */
546         cpufunc_null_fixup,             /* prefetchabt_fixup    */
547
548         xscalec3_context_switch,        /* context_switch       */
549
550         xscale_setup                    /* cpu setup            */
551 };
552 #endif /* CPU_XSCALE_81342 */
553
554
555 #if defined(CPU_FA526) || defined(CPU_FA626TE)
556 struct cpu_functions fa526_cpufuncs = {
557         /* CPU functions */
558
559         cpufunc_id,                     /* id                   */
560         cpufunc_nullop,                 /* cpwait               */
561
562         /* MMU functions */
563
564         cpufunc_control,                /* control              */
565         cpufunc_domains,                /* domain               */
566         fa526_setttb,                   /* setttb               */
567         cpufunc_faultstatus,            /* faultstatus          */
568         cpufunc_faultaddress,           /* faultaddress         */
569
570         /* TLB functions */
571
572         armv4_tlb_flushID,              /* tlb_flushID          */
573         fa526_tlb_flushID_SE,           /* tlb_flushID_SE       */
574         armv4_tlb_flushI,               /* tlb_flushI           */
575         fa526_tlb_flushI_SE,            /* tlb_flushI_SE        */
576         armv4_tlb_flushD,               /* tlb_flushD           */
577         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
578
579         /* Cache operations */
580
581         fa526_icache_sync_all,          /* icache_sync_all      */
582         fa526_icache_sync_range,        /* icache_sync_range    */
583
584         fa526_dcache_wbinv_all,         /* dcache_wbinv_all     */
585         fa526_dcache_wbinv_range,       /* dcache_wbinv_range   */
586         fa526_dcache_inv_range,         /* dcache_inv_range     */
587         fa526_dcache_wb_range,          /* dcache_wb_range      */
588
589         armv4_idcache_inv_all,          /* idcache_inv_all      */
590         fa526_idcache_wbinv_all,        /* idcache_wbinv_all    */
591         fa526_idcache_wbinv_range,      /* idcache_wbinv_range  */
592         cpufunc_nullop,                 /* l2cache_wbinv_all    */
593         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
594         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
595         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
596
597         /* Other functions */
598
599         fa526_flush_prefetchbuf,        /* flush_prefetchbuf    */
600         armv4_drain_writebuf,           /* drain_writebuf       */
601         cpufunc_nullop,                 /* flush_brnchtgt_C     */
602         fa526_flush_brnchtgt_E,         /* flush_brnchtgt_E     */
603
604         fa526_cpu_sleep,                /* sleep                */
605
606         /* Soft functions */
607
608         cpufunc_null_fixup,             /* dataabt_fixup        */
609         cpufunc_null_fixup,             /* prefetchabt_fixup    */
610
611         fa526_context_switch,           /* context_switch       */
612
613         fa526_setup                     /* cpu setup            */
614 };
615 #endif  /* CPU_FA526 || CPU_FA626TE */
616
617 #if defined(CPU_ARM1136)
618 struct cpu_functions arm1136_cpufuncs = {
619         /* CPU functions */
620         
621         cpufunc_id,                     /* id                   */
622         cpufunc_nullop,                 /* cpwait               */
623         
624         /* MMU functions */
625         
626         cpufunc_control,                /* control              */
627         cpufunc_domains,                /* Domain               */
628         arm11x6_setttb,                 /* Setttb               */
629         cpufunc_faultstatus,            /* Faultstatus          */
630         cpufunc_faultaddress,           /* Faultaddress         */
631         
632         /* TLB functions */
633         
634         arm11_tlb_flushID,              /* tlb_flushID          */
635         arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
636         arm11_tlb_flushI,               /* tlb_flushI           */
637         arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
638         arm11_tlb_flushD,               /* tlb_flushD           */
639         arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
640         
641         /* Cache operations */
642         
643         arm11x6_icache_sync_all,        /* icache_sync_all      */
644         arm11x6_icache_sync_range,      /* icache_sync_range    */
645         
646         arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
647         armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
648         armv6_dcache_inv_range,         /* dcache_inv_range     */
649         armv6_dcache_wb_range,          /* dcache_wb_range      */
650         
651         armv6_idcache_inv_all,          /* idcache_inv_all      */
652         arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
653         arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
654         
655         (void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
656         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
657         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
658         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
659         
660         /* Other functions */
661         
662         arm11x6_flush_prefetchbuf,      /* flush_prefetchbuf    */
663         arm11_drain_writebuf,           /* drain_writebuf       */
664         cpufunc_nullop,                 /* flush_brnchtgt_C     */
665         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
666         
667         arm11_sleep,                    /* sleep                */
668         
669         /* Soft functions */
670         
671         cpufunc_null_fixup,             /* dataabt_fixup        */
672         cpufunc_null_fixup,             /* prefetchabt_fixup    */
673         
674         arm11_context_switch,           /* context_switch       */
675         
676         arm11x6_setup                   /* cpu setup            */
677 };
678 #endif /* CPU_ARM1136 */
679 #if defined(CPU_ARM1176)
680 struct cpu_functions arm1176_cpufuncs = {
681         /* CPU functions */
682         
683         cpufunc_id,                     /* id                   */
684         cpufunc_nullop,                 /* cpwait               */
685         
686         /* MMU functions */
687         
688         cpufunc_control,                /* control              */
689         cpufunc_domains,                /* Domain               */
690         arm11x6_setttb,                 /* Setttb               */
691         cpufunc_faultstatus,            /* Faultstatus          */
692         cpufunc_faultaddress,           /* Faultaddress         */
693         
694         /* TLB functions */
695         
696         arm11_tlb_flushID,              /* tlb_flushID          */
697         arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
698         arm11_tlb_flushI,               /* tlb_flushI           */
699         arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
700         arm11_tlb_flushD,               /* tlb_flushD           */
701         arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
702         
703         /* Cache operations */
704         
705         arm11x6_icache_sync_all,        /* icache_sync_all      */
706         arm11x6_icache_sync_range,      /* icache_sync_range    */
707         
708         arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
709         armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
710         armv6_dcache_inv_range,         /* dcache_inv_range     */
711         armv6_dcache_wb_range,          /* dcache_wb_range      */
712         
713         armv6_idcache_inv_all,          /* idcache_inv_all      */
714         arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
715         arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
716         
717         (void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
718         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
719         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
720         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
721         
722         /* Other functions */
723         
724         arm11x6_flush_prefetchbuf,      /* flush_prefetchbuf    */
725         arm11_drain_writebuf,           /* drain_writebuf       */
726         cpufunc_nullop,                 /* flush_brnchtgt_C     */
727         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
728         
729         arm11x6_sleep,                  /* sleep                */
730         
731         /* Soft functions */
732         
733         cpufunc_null_fixup,             /* dataabt_fixup        */
734         cpufunc_null_fixup,             /* prefetchabt_fixup    */
735         
736         arm11_context_switch,           /* context_switch       */
737         
738         arm11x6_setup                   /* cpu setup            */
739 };
740 #endif /*CPU_ARM1176 */
741
742 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
743 struct cpu_functions cortexa_cpufuncs = {
744         /* CPU functions */
745         
746         cpufunc_id,                     /* id                   */
747         cpufunc_nullop,                 /* cpwait               */
748         
749         /* MMU functions */
750         
751         cpufunc_control,                /* control              */
752         cpufunc_domains,                /* Domain               */
753         armv7_setttb,                   /* Setttb               */
754         cpufunc_faultstatus,            /* Faultstatus          */
755         cpufunc_faultaddress,           /* Faultaddress         */
756         
757         /* 
758          * TLB functions.  ARMv7 does all TLB ops based on a unified TLB model
759          * whether the hardware implements separate I+D or not, so we use the
760          * same 'ID' functions for all 3 variations.
761          */
762         
763         armv7_tlb_flushID,              /* tlb_flushID          */
764         armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
765         armv7_tlb_flushID,              /* tlb_flushI           */
766         armv7_tlb_flushID_SE,           /* tlb_flushI_SE        */
767         armv7_tlb_flushID,              /* tlb_flushD           */
768         armv7_tlb_flushID_SE,           /* tlb_flushD_SE        */
769         
770         /* Cache operations */
771         
772         armv7_icache_sync_all,          /* icache_sync_all      */
773         armv7_icache_sync_range,        /* icache_sync_range    */
774         
775         armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
776         armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
777         armv7_dcache_inv_range,         /* dcache_inv_range     */
778         armv7_dcache_wb_range,          /* dcache_wb_range      */
779         
780         armv7_idcache_inv_all,          /* idcache_inv_all      */
781         armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
782         armv7_idcache_wbinv_range,      /* idcache_wbinv_range  */
783         
784         /* 
785          * Note: For CPUs using the PL310 the L2 ops are filled in when the
786          * L2 cache controller is actually enabled.
787          */
788         cpufunc_nullop,                 /* l2cache_wbinv_all    */
789         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
790         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
791         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
792         
793         /* Other functions */
794         
795         cpufunc_nullop,                 /* flush_prefetchbuf    */
796         armv7_drain_writebuf,           /* drain_writebuf       */
797         cpufunc_nullop,                 /* flush_brnchtgt_C     */
798         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
799         
800         armv7_sleep,                    /* sleep                */
801         
802         /* Soft functions */
803         
804         cpufunc_null_fixup,             /* dataabt_fixup        */
805         cpufunc_null_fixup,             /* prefetchabt_fixup    */
806         
807         armv7_context_switch,           /* context_switch       */
808         
809         cortexa_setup                     /* cpu setup            */
810 };
811 #endif /* CPU_CORTEXA */
812
813 /*
814  * Global constants also used by locore.s
815  */
816
817 struct cpu_functions cpufuncs;
818 u_int cputype;
819 u_int cpu_reset_needs_v4_MMU_disable;   /* flag used in locore.s */
820
821 #if defined(CPU_ARM9) ||        \
822   defined (CPU_ARM9E) || defined (CPU_ARM10) || defined (CPU_ARM1136) ||        \
823   defined(CPU_ARM1176) || defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||             \
824   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||           \
825   defined(CPU_FA526) || defined(CPU_FA626TE) || defined(CPU_MV_PJ4B) ||                 \
826   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
827   defined(CPU_CORTEXA) || defined(CPU_KRAIT)
828
829 static void get_cachetype_cp15(void);
830
831 /* Additional cache information local to this file.  Log2 of some of the
832    above numbers.  */
833 static int      arm_dcache_l2_nsets;
834 static int      arm_dcache_l2_assoc;
835 static int      arm_dcache_l2_linesize;
836
837 static void
838 get_cachetype_cp15()
839 {
840         u_int ctype, isize, dsize, cpuid;
841         u_int clevel, csize, i, sel;
842         u_int multiplier;
843         u_char type;
844
845         __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
846                 : "=r" (ctype));
847
848         cpuid = cpufunc_id();
849         /*
850          * ...and thus spake the ARM ARM:
851          *
852          * If an <opcode2> value corresponding to an unimplemented or
853          * reserved ID register is encountered, the System Control
854          * processor returns the value of the main ID register.
855          */
856         if (ctype == cpuid)
857                 goto out;
858
859         if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
860                 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
861                     : "=r" (clevel));
862                 arm_cache_level = clevel;
863                 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
864                 i = 0;
865                 while ((type = (clevel & 0x7)) && i < 7) {
866                         if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
867                             type == CACHE_SEP_CACHE) {
868                                 sel = i << 1;
869                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
870                                     : : "r" (sel));
871                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
872                                     : "=r" (csize));
873                                 arm_cache_type[sel] = csize;
874                                 arm_dcache_align = 1 << 
875                                     (CPUV7_CT_xSIZE_LEN(csize) + 4);
876                                 arm_dcache_align_mask = arm_dcache_align - 1;
877                         }
878                         if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
879                                 sel = (i << 1) | 1;
880                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
881                                     : : "r" (sel));
882                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
883                                     : "=r" (csize));
884                                 arm_cache_type[sel] = csize;
885                         }
886                         i++;
887                         clevel >>= 3;
888                 }
889         } else {
890                 if ((ctype & CPU_CT_S) == 0)
891                         arm_pcache_unified = 1;
892
893                 /*
894                  * If you want to know how this code works, go read the ARM ARM.
895                  */
896
897                 arm_pcache_type = CPU_CT_CTYPE(ctype);
898
899                 if (arm_pcache_unified == 0) {
900                         isize = CPU_CT_ISIZE(ctype);
901                         multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
902                         arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
903                         if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
904                                 if (isize & CPU_CT_xSIZE_M)
905                                         arm_picache_line_size = 0; /* not present */
906                                 else
907                                         arm_picache_ways = 1;
908                         } else {
909                                 arm_picache_ways = multiplier <<
910                                     (CPU_CT_xSIZE_ASSOC(isize) - 1);
911                         }
912                         arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
913                 }
914
915                 dsize = CPU_CT_DSIZE(ctype);
916                 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
917                 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
918                 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
919                         if (dsize & CPU_CT_xSIZE_M)
920                                 arm_pdcache_line_size = 0; /* not present */
921                         else
922                                 arm_pdcache_ways = 1;
923                 } else {
924                         arm_pdcache_ways = multiplier <<
925                             (CPU_CT_xSIZE_ASSOC(dsize) - 1);
926                 }
927                 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
928
929                 arm_dcache_align = arm_pdcache_line_size;
930
931                 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
932                 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
933                 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
934                     CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
935
936         out:
937                 arm_dcache_align_mask = arm_dcache_align - 1;
938         }
939 }
940 #endif /* ARM9 || XSCALE */
941
942 /*
943  * Cannot panic here as we may not have a console yet ...
944  */
945
946 int
947 set_cpufuncs()
948 {
949         cputype = cpufunc_id();
950         cputype &= CPU_ID_CPU_MASK;
951
952         /*
953          * NOTE: cpu_do_powersave defaults to off.  If we encounter a
954          * CPU type where we want to use it by default, then we set it.
955          */
956
957 #ifdef CPU_ARM9
958         if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
959              (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
960             (cputype & 0x0000f000) == 0x00009000) {
961                 cpufuncs = arm9_cpufuncs;
962                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
963                 get_cachetype_cp15();
964                 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
965                 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
966                     arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
967                 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
968                 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
969 #ifdef ARM9_CACHE_WRITE_THROUGH
970                 pmap_pte_init_arm9();
971 #else
972                 pmap_pte_init_generic();
973 #endif
974                 goto out;
975         }
976 #endif /* CPU_ARM9 */
977 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
978         if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
979             cputype == CPU_ID_MV88FR571_41) {
980                 uint32_t sheeva_ctrl;
981
982                 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
983                     MV_L2_ENABLE);
984                 /*
985                  * Workaround for Marvell MV78100 CPU: Cache prefetch
986                  * mechanism may affect the cache coherency validity,
987                  * so it needs to be disabled.
988                  *
989                  * Refer to errata document MV-S501058-00C.pdf (p. 3.1
990                  * L2 Prefetching Mechanism) for details.
991                  */
992                 if (cputype == CPU_ID_MV88FR571_VD ||
993                     cputype == CPU_ID_MV88FR571_41)
994                         sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
995
996                 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
997
998                 cpufuncs = sheeva_cpufuncs;
999                 get_cachetype_cp15();
1000                 pmap_pte_init_generic();
1001                 goto out;
1002         } else if (cputype == CPU_ID_ARM926EJS || cputype == CPU_ID_ARM1026EJS) {
1003                 cpufuncs = armv5_ec_cpufuncs;
1004                 get_cachetype_cp15();
1005                 pmap_pte_init_generic();
1006                 goto out;
1007         }
1008 #endif /* CPU_ARM9E || CPU_ARM10 */
1009 #ifdef CPU_ARM10
1010         if (/* cputype == CPU_ID_ARM1020T || */
1011             cputype == CPU_ID_ARM1020E) {
1012                 /*
1013                  * Select write-through cacheing (this isn't really an
1014                  * option on ARM1020T).
1015                  */
1016                 cpufuncs = arm10_cpufuncs;
1017                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
1018                 get_cachetype_cp15();
1019                 arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1020                 arm10_dcache_sets_max =
1021                     (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1022                     arm10_dcache_sets_inc;
1023                 arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1024                 arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
1025                 pmap_pte_init_generic();
1026                 goto out;
1027         }
1028 #endif /* CPU_ARM10 */
1029 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1030         if (cputype == CPU_ID_ARM1136JS
1031             || cputype == CPU_ID_ARM1136JSR1
1032             || cputype == CPU_ID_ARM1176JZS) {
1033 #ifdef CPU_ARM1136
1034                 if (cputype == CPU_ID_ARM1136JS
1035                     || cputype == CPU_ID_ARM1136JSR1)
1036                         cpufuncs = arm1136_cpufuncs;
1037 #endif
1038 #ifdef CPU_ARM1176
1039                 if (cputype == CPU_ID_ARM1176JZS)
1040                         cpufuncs = arm1176_cpufuncs;
1041 #endif
1042                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
1043                 get_cachetype_cp15();
1044
1045                 pmap_pte_init_mmu_v6();
1046
1047                 goto out;
1048         }
1049 #endif /* CPU_ARM1136 || CPU_ARM1176 */
1050 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1051         if (cputype == CPU_ID_CORTEXA7 ||
1052             cputype == CPU_ID_CORTEXA8R1 ||
1053             cputype == CPU_ID_CORTEXA8R2 ||
1054             cputype == CPU_ID_CORTEXA8R3 ||
1055             cputype == CPU_ID_CORTEXA9R1 ||
1056             cputype == CPU_ID_CORTEXA9R2 ||
1057             cputype == CPU_ID_CORTEXA9R3 ||
1058             cputype == CPU_ID_CORTEXA15R0 ||
1059             cputype == CPU_ID_CORTEXA15R1 ||
1060             cputype == CPU_ID_CORTEXA15R2 ||
1061             cputype == CPU_ID_CORTEXA15R3 ||
1062             cputype == CPU_ID_KRAIT ) {
1063                 cpufuncs = cortexa_cpufuncs;
1064                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
1065                 get_cachetype_cp15();
1066                 
1067                 pmap_pte_init_mmu_v6();
1068                 /* Use powersave on this CPU. */
1069                 cpu_do_powersave = 1;
1070                 goto out;
1071         }
1072 #endif /* CPU_CORTEXA */
1073                 
1074 #if defined(CPU_MV_PJ4B)
1075         if (cputype == CPU_ID_MV88SV581X_V7 ||
1076             cputype == CPU_ID_MV88SV584X_V7 ||
1077             cputype == CPU_ID_ARM_88SV581X_V7) {
1078                 cpufuncs = pj4bv7_cpufuncs;
1079                 get_cachetype_cp15();
1080                 pmap_pte_init_mmu_v6();
1081                 goto out;
1082         }
1083 #endif /* CPU_MV_PJ4B */
1084
1085 #if defined(CPU_FA526) || defined(CPU_FA626TE)
1086         if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
1087                 cpufuncs = fa526_cpufuncs;
1088                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
1089                 get_cachetype_cp15();
1090                 pmap_pte_init_generic();
1091
1092                 /* Use powersave on this CPU. */
1093                 cpu_do_powersave = 1;
1094
1095                 goto out;
1096         }
1097 #endif  /* CPU_FA526 || CPU_FA626TE */
1098
1099 #ifdef CPU_XSCALE_80200
1100         if (cputype == CPU_ID_80200) {
1101                 int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1102
1103                 i80200_icu_init();
1104
1105 #if defined(XSCALE_CCLKCFG)
1106                 /*
1107                  * Crank CCLKCFG to maximum legal value.
1108                  */
1109                 __asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
1110                         :
1111                         : "r" (XSCALE_CCLKCFG));
1112 #endif
1113
1114                 /*
1115                  * XXX Disable ECC in the Bus Controller Unit; we
1116                  * don't really support it, yet.  Clear any pending
1117                  * error indications.
1118                  */
1119                 __asm __volatile("mcr p13, 0, %0, c0, c1, 0"
1120                         :
1121                         : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1122
1123                 cpufuncs = xscale_cpufuncs;
1124                 /*
1125                  * i80200 errata: Step-A0 and A1 have a bug where
1126                  * D$ dirty bits are not cleared on "invalidate by
1127                  * address".
1128                  *
1129                  * Workaround: Clean cache line before invalidating.
1130                  */
1131                 if (rev == 0 || rev == 1)
1132                         cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1133
1134                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
1135                 get_cachetype_cp15();
1136                 pmap_pte_init_xscale();
1137                 goto out;
1138         }
1139 #endif /* CPU_XSCALE_80200 */
1140 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
1141         if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1142             cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1143             cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1144                 cpufuncs = xscale_cpufuncs;
1145                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
1146                 get_cachetype_cp15();
1147                 pmap_pte_init_xscale();
1148                 goto out;
1149         }
1150 #endif /* CPU_XSCALE_80321 */
1151
1152 #if defined(CPU_XSCALE_81342)
1153         if (cputype == CPU_ID_81342) {
1154                 cpufuncs = xscalec3_cpufuncs;
1155                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
1156                 get_cachetype_cp15();
1157                 pmap_pte_init_xscale();
1158                 goto out;
1159         }
1160 #endif /* CPU_XSCALE_81342 */
1161 #ifdef CPU_XSCALE_PXA2X0
1162         /* ignore core revision to test PXA2xx CPUs */
1163         if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1164             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1165             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1166
1167                 cpufuncs = xscale_cpufuncs;
1168                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
1169                 get_cachetype_cp15();
1170                 pmap_pte_init_xscale();
1171
1172                 /* Use powersave on this CPU. */
1173                 cpu_do_powersave = 1;
1174
1175                 goto out;
1176         }
1177 #endif /* CPU_XSCALE_PXA2X0 */
1178 #ifdef CPU_XSCALE_IXP425
1179         if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1180             cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
1181
1182                 cpufuncs = xscale_cpufuncs;
1183                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
1184                 get_cachetype_cp15();
1185                 pmap_pte_init_xscale();
1186
1187                 goto out;
1188         }
1189 #endif /* CPU_XSCALE_IXP425 */
1190         /*
1191          * Bzzzz. And the answer was ...
1192          */
1193         panic("No support for this CPU type (%08x) in kernel", cputype);
1194         return(ARCHITECTURE_NOT_PRESENT);
1195 out:
1196         uma_set_align(arm_dcache_align_mask);
1197         return (0);
1198 }
1199
1200 /*
1201  * Fixup routines for data and prefetch aborts.
1202  *
1203  * Several compile time symbols are used
1204  *
1205  * DEBUG_FAULT_CORRECTION - Print debugging information during the
1206  * correction of registers after a fault.
1207  */
1208
1209
1210 /*
1211  * Null abort fixup routine.
1212  * For use when no fixup is required.
1213  */
1214 int
1215 cpufunc_null_fixup(arg)
1216         void *arg;
1217 {
1218         return(ABORT_FIXUP_OK);
1219 }
1220
1221 /*
1222  * CPU Setup code
1223  */
1224
1225 #if defined (CPU_ARM9) || \
1226   defined(CPU_ARM9E) || \
1227   defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||             \
1228   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||           \
1229   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
1230   defined(CPU_ARM10) ||  defined(CPU_ARM1136) || defined(CPU_ARM1176) ||\
1231   defined(CPU_FA526) || defined(CPU_FA626TE)
1232
1233 #define IGN     0
1234 #define OR      1
1235 #define BIC     2
1236
1237 struct cpu_option {
1238         char    *co_name;
1239         int     co_falseop;
1240         int     co_trueop;
1241         int     co_value;
1242 };
1243
1244 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
1245
1246 static u_int
1247 parse_cpu_options(args, optlist, cpuctrl)
1248         char *args;
1249         struct cpu_option *optlist;
1250         u_int cpuctrl;
1251 {
1252         int integer;
1253
1254         if (args == NULL)
1255                 return(cpuctrl);
1256
1257         while (optlist->co_name) {
1258                 if (get_bootconf_option(args, optlist->co_name,
1259                     BOOTOPT_TYPE_BOOLEAN, &integer)) {
1260                         if (integer) {
1261                                 if (optlist->co_trueop == OR)
1262                                         cpuctrl |= optlist->co_value;
1263                                 else if (optlist->co_trueop == BIC)
1264                                         cpuctrl &= ~optlist->co_value;
1265                         } else {
1266                                 if (optlist->co_falseop == OR)
1267                                         cpuctrl |= optlist->co_value;
1268                                 else if (optlist->co_falseop == BIC)
1269                                         cpuctrl &= ~optlist->co_value;
1270                         }
1271                 }
1272                 ++optlist;
1273         }
1274         return(cpuctrl);
1275 }
1276 #endif /* CPU_ARM9 || XSCALE*/
1277
1278 #ifdef CPU_ARM9
1279 struct cpu_option arm9_options[] = {
1280         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1281         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1282         { "arm9.cache", BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1283         { "arm9.icache",        BIC, OR,  CPU_CONTROL_IC_ENABLE },
1284         { "arm9.dcache",        BIC, OR,  CPU_CONTROL_DC_ENABLE },
1285         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1286         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1287         { "arm9.writebuf",      BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1288         { NULL,                 IGN, IGN, 0 }
1289 };
1290
1291 void
1292 arm9_setup(args)
1293         char *args;
1294 {
1295         int cpuctrl, cpuctrlmask;
1296
1297         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1298             | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1299             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1300             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1301             CPU_CONTROL_ROUNDROBIN;
1302         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1303                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1304                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1305                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1306                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1307                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1308                  | CPU_CONTROL_ROUNDROBIN;
1309
1310 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1311         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1312 #endif
1313
1314         cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1315
1316 #ifdef __ARMEB__
1317         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1318 #endif
1319         if (vector_page == ARM_VECTORS_HIGH)
1320                 cpuctrl |= CPU_CONTROL_VECRELOC;
1321
1322         /* Clear out the cache */
1323         cpu_idcache_wbinv_all();
1324
1325         /* Set the control register */
1326         cpu_control(cpuctrlmask, cpuctrl);
1327         ctrl = cpuctrl;
1328
1329 }
1330 #endif  /* CPU_ARM9 */
1331
1332 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
1333 struct cpu_option arm10_options[] = {
1334         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1335         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1336         { "arm10.cache",        BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1337         { "arm10.icache",       BIC, OR,  CPU_CONTROL_IC_ENABLE },
1338         { "arm10.dcache",       BIC, OR,  CPU_CONTROL_DC_ENABLE },
1339         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1340         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1341         { "arm10.writebuf",     BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1342         { NULL,                 IGN, IGN, 0 }
1343 };
1344
1345 void
1346 arm10_setup(args)
1347         char *args;
1348 {
1349         int cpuctrl, cpuctrlmask;
1350
1351         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1352             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1353             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1354         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1355             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1356             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1357             | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1358             | CPU_CONTROL_BPRD_ENABLE
1359             | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1360
1361 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1362         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1363 #endif
1364
1365         cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
1366
1367 #ifdef __ARMEB__
1368         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1369 #endif
1370
1371         /* Clear out the cache */
1372         cpu_idcache_wbinv_all();
1373
1374         /* Now really make sure they are clean.  */
1375         __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1376
1377         if (vector_page == ARM_VECTORS_HIGH)
1378                 cpuctrl |= CPU_CONTROL_VECRELOC;
1379
1380         /* Set the control register */
1381         ctrl = cpuctrl;
1382         cpu_control(0xffffffff, cpuctrl);
1383
1384         /* And again. */
1385         cpu_idcache_wbinv_all();
1386 }
1387 #endif  /* CPU_ARM9E || CPU_ARM10 */
1388
1389 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1390 struct cpu_option arm11_options[] = {
1391         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1392         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1393         { "arm11.cache",        BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1394         { "arm11.icache",       BIC, OR,  CPU_CONTROL_IC_ENABLE },
1395         { "arm11.dcache",       BIC, OR,  CPU_CONTROL_DC_ENABLE },
1396         { NULL,                 IGN, IGN, 0 }
1397 };
1398
1399 void
1400 arm11x6_setup(char *args)
1401 {
1402         int cpuctrl, cpuctrl_wax;
1403         uint32_t auxctrl, auxctrl_wax;
1404         uint32_t tmp, tmp2;
1405         uint32_t sbz=0;
1406         uint32_t cpuid;
1407
1408         cpuid = cpufunc_id();
1409
1410         cpuctrl =
1411                 CPU_CONTROL_MMU_ENABLE  |
1412                 CPU_CONTROL_DC_ENABLE   |
1413                 CPU_CONTROL_WBUF_ENABLE |
1414                 CPU_CONTROL_32BP_ENABLE |
1415                 CPU_CONTROL_32BD_ENABLE |
1416                 CPU_CONTROL_LABT_ENABLE |
1417                 CPU_CONTROL_SYST_ENABLE |
1418                 CPU_CONTROL_IC_ENABLE;
1419
1420         /*
1421          * "write as existing" bits
1422          * inverse of this is mask
1423          */
1424         cpuctrl_wax =
1425                 (3 << 30) | /* SBZ */
1426                 (1 << 29) | /* FA */
1427                 (1 << 28) | /* TR */
1428                 (3 << 26) | /* SBZ */ 
1429                 (3 << 19) | /* SBZ */
1430                 (1 << 17);  /* SBZ */
1431
1432         cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1433         cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1434
1435         cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
1436
1437 #ifdef __ARMEB__
1438         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1439 #endif
1440
1441         if (vector_page == ARM_VECTORS_HIGH)
1442                 cpuctrl |= CPU_CONTROL_VECRELOC;
1443
1444         auxctrl = 0;
1445         auxctrl_wax = ~0;
1446         /*
1447          * This options enables the workaround for the 364296 ARM1136
1448          * r0pX errata (possible cache data corruption with
1449          * hit-under-miss enabled). It sets the undocumented bit 31 in
1450          * the auxiliary control register and the FI bit in the control
1451          * register, thus disabling hit-under-miss without putting the
1452          * processor into full low interrupt latency mode. ARM11MPCore
1453          * is not affected.
1454          */
1455         if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
1456                 cpuctrl |= CPU_CONTROL_FI_ENABLE;
1457                 auxctrl = ARM1136_AUXCTL_PFI;
1458                 auxctrl_wax = ~ARM1136_AUXCTL_PFI;
1459         }
1460
1461         /*
1462          * Enable an errata workaround
1463          */
1464         if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
1465                 auxctrl = ARM1176_AUXCTL_PHD;
1466                 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
1467         }
1468
1469         /* Clear out the cache */
1470         cpu_idcache_wbinv_all();
1471
1472         /* Now really make sure they are clean.  */
1473         __asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
1474
1475         /* Allow detection code to find the VFP if it's fitted.  */
1476         __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
1477
1478         /* Set the control register */
1479         ctrl = cpuctrl;
1480         cpu_control(~cpuctrl_wax, cpuctrl);
1481
1482         __asm volatile ("mrc    p15, 0, %0, c1, c0, 1\n\t"
1483                         "and    %1, %0, %2\n\t"
1484                         "orr    %1, %1, %3\n\t"
1485                         "teq    %0, %1\n\t"
1486                         "mcrne  p15, 0, %1, c1, c0, 1\n\t"
1487                         : "=r"(tmp), "=r"(tmp2) :
1488                           "r"(auxctrl_wax), "r"(auxctrl));
1489
1490         /* And again. */
1491         cpu_idcache_wbinv_all();
1492 }
1493 #endif  /* CPU_ARM1136 || CPU_ARM1176 */
1494
1495 #ifdef CPU_MV_PJ4B
1496 void
1497 pj4bv7_setup(args)
1498         char *args;
1499 {
1500         int cpuctrl;
1501
1502         pj4b_config();
1503
1504         cpuctrl = CPU_CONTROL_MMU_ENABLE;
1505 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1506         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1507 #endif
1508         cpuctrl |= CPU_CONTROL_DC_ENABLE;
1509         cpuctrl |= (0xf << 3);
1510         cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1511         cpuctrl |= CPU_CONTROL_IC_ENABLE;
1512         if (vector_page == ARM_VECTORS_HIGH)
1513                 cpuctrl |= CPU_CONTROL_VECRELOC;
1514         cpuctrl |= (0x5 << 16) | (1 < 22);
1515         cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1516
1517         /* Clear out the cache */
1518         cpu_idcache_wbinv_all();
1519
1520         /* Set the control register */
1521         ctrl = cpuctrl;
1522         cpu_control(0xFFFFFFFF, cpuctrl);
1523
1524         /* And again. */
1525         cpu_idcache_wbinv_all();
1526 }
1527 #endif /* CPU_MV_PJ4B */
1528
1529 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1530
1531 void
1532 cortexa_setup(char *args)
1533 {
1534         int cpuctrl, cpuctrlmask;
1535         
1536         cpuctrlmask = CPU_CONTROL_MMU_ENABLE |     /* MMU enable         [0] */
1537             CPU_CONTROL_AFLT_ENABLE |    /* Alignment fault    [1] */
1538             CPU_CONTROL_DC_ENABLE |      /* DCache enable      [2] */
1539             CPU_CONTROL_BPRD_ENABLE |    /* Branch prediction [11] */
1540             CPU_CONTROL_IC_ENABLE |      /* ICache enable     [12] */
1541             CPU_CONTROL_VECRELOC;        /* Vector relocation [13] */
1542         
1543         cpuctrl = CPU_CONTROL_MMU_ENABLE |
1544             CPU_CONTROL_IC_ENABLE |
1545             CPU_CONTROL_DC_ENABLE |
1546             CPU_CONTROL_BPRD_ENABLE;
1547         
1548 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1549         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1550 #endif
1551         
1552         /* Switch to big endian */
1553 #ifdef __ARMEB__
1554         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1555 #endif
1556         
1557         /* Check if the vector page is at the high address (0xffff0000) */
1558         if (vector_page == ARM_VECTORS_HIGH)
1559                 cpuctrl |= CPU_CONTROL_VECRELOC;
1560         
1561         /* Clear out the cache */
1562         cpu_idcache_wbinv_all();
1563         
1564         /* Set the control register */
1565         ctrl = cpuctrl;
1566         cpu_control(cpuctrlmask, cpuctrl);
1567         
1568         /* And again. */
1569         cpu_idcache_wbinv_all();
1570 #ifdef SMP
1571         armv7_auxctrl((1 << 6) | (1 << 0), (1 << 6) | (1 << 0)); /* Enable SMP + TLB broadcasting  */
1572 #endif
1573 }
1574 #endif  /* CPU_CORTEXA */
1575
1576 #if defined(CPU_FA526) || defined(CPU_FA626TE)
1577 struct cpu_option fa526_options[] = {
1578 #ifdef COMPAT_12
1579         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE |
1580                                            CPU_CONTROL_DC_ENABLE) },
1581         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1582 #endif  /* COMPAT_12 */
1583         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE |
1584                                            CPU_CONTROL_DC_ENABLE) },
1585         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE |
1586                                            CPU_CONTROL_DC_ENABLE) },
1587         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1588         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1589         { NULL,                 IGN, IGN, 0 }
1590 };
1591
1592 void
1593 fa526_setup(char *args)
1594 {
1595         int cpuctrl, cpuctrlmask;
1596
1597         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1598                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1599                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1600                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1601                 | CPU_CONTROL_BPRD_ENABLE;
1602         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1603                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1604                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1605                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1606                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1607                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1608                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1609
1610 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1611         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1612 #endif
1613
1614         cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
1615
1616 #ifdef __ARMEB__
1617         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1618 #endif
1619
1620         if (vector_page == ARM_VECTORS_HIGH)
1621                 cpuctrl |= CPU_CONTROL_VECRELOC;
1622
1623         /* Clear out the cache */
1624         cpu_idcache_wbinv_all();
1625
1626         /* Set the control register */
1627         ctrl = cpuctrl;
1628         cpu_control(0xffffffff, cpuctrl);
1629 }
1630 #endif  /* CPU_FA526 || CPU_FA626TE */
1631
1632 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1633   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1634   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
1635 struct cpu_option xscale_options[] = {
1636 #ifdef COMPAT_12
1637         { "branchpredict",      BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1638         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1639 #endif  /* COMPAT_12 */
1640         { "cpu.branchpredict",  BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1641         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1642         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1643         { "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1644         { "xscale.cache",       BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1645         { "xscale.icache",      BIC, OR,  CPU_CONTROL_IC_ENABLE },
1646         { "xscale.dcache",      BIC, OR,  CPU_CONTROL_DC_ENABLE },
1647         { NULL,                 IGN, IGN, 0 }
1648 };
1649
1650 void
1651 xscale_setup(args)
1652         char *args;
1653 {
1654         uint32_t auxctl;
1655         int cpuctrl, cpuctrlmask;
1656
1657         /*
1658          * The XScale Write Buffer is always enabled.  Our option
1659          * is to enable/disable coalescing.  Note that bits 6:3
1660          * must always be enabled.
1661          */
1662
1663         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1664                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1665                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1666                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1667                  | CPU_CONTROL_BPRD_ENABLE;
1668         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1669                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1670                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1671                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1672                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1673                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1674                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
1675                  CPU_CONTROL_L2_ENABLE;
1676
1677 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1678         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1679 #endif
1680
1681         cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
1682
1683 #ifdef __ARMEB__
1684         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1685 #endif
1686
1687         if (vector_page == ARM_VECTORS_HIGH)
1688                 cpuctrl |= CPU_CONTROL_VECRELOC;
1689 #ifdef CPU_XSCALE_CORE3
1690         cpuctrl |= CPU_CONTROL_L2_ENABLE;
1691 #endif
1692
1693         /* Clear out the cache */
1694         cpu_idcache_wbinv_all();
1695
1696         /*
1697          * Set the control register.  Note that bits 6:3 must always
1698          * be set to 1.
1699          */
1700         ctrl = cpuctrl;
1701 /*      cpu_control(cpuctrlmask, cpuctrl);*/
1702         cpu_control(0xffffffff, cpuctrl);
1703
1704         /* Make sure write coalescing is turned on */
1705         __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1706                 : "=r" (auxctl));
1707 #ifdef XSCALE_NO_COALESCE_WRITES
1708         auxctl |= XSCALE_AUXCTL_K;
1709 #else
1710         auxctl &= ~XSCALE_AUXCTL_K;
1711 #endif
1712 #ifdef CPU_XSCALE_CORE3
1713         auxctl |= XSCALE_AUXCTL_LLR;
1714         auxctl |= XSCALE_AUXCTL_MD_MASK;
1715 #endif
1716         __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1717                 : : "r" (auxctl));
1718 }
1719 #endif  /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
1720            CPU_XSCALE_80219 */