1 /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
4 * arm9 support code Copyright (C) 2001 ARM Ltd
5 * Copyright (c) 1997 Mark Brinicombe.
6 * Copyright (c) 1997 Causality Limited
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by Causality Limited.
20 * 4. The name of Causality Limited may not be used to endorse or promote
21 * products derived from this software without specific prior written
24 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * RiscBSD kernel project
40 * C functions for supporting CPU / MMU / TLB specific operations.
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
47 #include <sys/param.h>
48 #include <sys/systm.h>
50 #include <sys/mutex.h>
52 #include <machine/bus.h>
53 #include <machine/cpu.h>
54 #include <machine/disassem.h>
60 #include <machine/cpuconf.h>
61 #include <machine/cpufunc.h>
62 #include <machine/bootconfig.h>
64 #ifdef CPU_XSCALE_80200
65 #include <arm/xscale/i80200/i80200reg.h>
66 #include <arm/xscale/i80200/i80200var.h>
69 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
70 #include <arm/xscale/i80321/i80321reg.h>
71 #include <arm/xscale/i80321/i80321var.h>
75 * Some definitions in i81342reg.h clash with i80321reg.h.
76 * This only happens for the LINT kernel. As it happens,
77 * we don't need anything from i81342reg.h that we already
78 * got from somewhere else during a LINT compile.
80 #if defined(CPU_XSCALE_81342) && !defined(COMPILING_LINT)
81 #include <arm/xscale/i8134x/i81342reg.h>
84 #ifdef CPU_XSCALE_IXP425
85 #include <arm/xscale/ixp425/ixp425reg.h>
86 #include <arm/xscale/ixp425/ixp425var.h>
89 /* PRIMARY CACHE VARIABLES */
91 int arm_picache_line_size;
94 int arm_pdcache_size; /* and unified */
95 int arm_pdcache_line_size;
99 int arm_pcache_unified;
101 int arm_dcache_align;
102 int arm_dcache_align_mask;
104 u_int arm_cache_level;
105 u_int arm_cache_type[14];
108 /* 1 == use cpu_sleep(), 0 == don't */
109 int cpu_do_powersave;
113 struct cpu_functions arm9_cpufuncs = {
117 cpufunc_nullop, /* cpwait */
121 cpufunc_control, /* control */
122 cpufunc_domains, /* Domain */
123 arm9_setttb, /* Setttb */
124 cpufunc_faultstatus, /* Faultstatus */
125 cpufunc_faultaddress, /* Faultaddress */
129 armv4_tlb_flushID, /* tlb_flushID */
130 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
131 armv4_tlb_flushI, /* tlb_flushI */
132 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
133 armv4_tlb_flushD, /* tlb_flushD */
134 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
136 /* Cache operations */
138 arm9_icache_sync_all, /* icache_sync_all */
139 arm9_icache_sync_range, /* icache_sync_range */
141 arm9_dcache_wbinv_all, /* dcache_wbinv_all */
142 arm9_dcache_wbinv_range, /* dcache_wbinv_range */
143 arm9_dcache_inv_range, /* dcache_inv_range */
144 arm9_dcache_wb_range, /* dcache_wb_range */
146 armv4_idcache_inv_all, /* idcache_inv_all */
147 arm9_idcache_wbinv_all, /* idcache_wbinv_all */
148 arm9_idcache_wbinv_range, /* idcache_wbinv_range */
149 cpufunc_nullop, /* l2cache_wbinv_all */
150 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
151 (void *)cpufunc_nullop, /* l2cache_inv_range */
152 (void *)cpufunc_nullop, /* l2cache_wb_range */
154 /* Other functions */
156 cpufunc_nullop, /* flush_prefetchbuf */
157 armv4_drain_writebuf, /* drain_writebuf */
158 cpufunc_nullop, /* flush_brnchtgt_C */
159 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
161 (void *)cpufunc_nullop, /* sleep */
165 cpufunc_null_fixup, /* dataabt_fixup */
166 cpufunc_null_fixup, /* prefetchabt_fixup */
168 arm9_context_switch, /* context_switch */
170 arm9_setup /* cpu setup */
173 #endif /* CPU_ARM9 */
175 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
176 struct cpu_functions armv5_ec_cpufuncs = {
180 cpufunc_nullop, /* cpwait */
184 cpufunc_control, /* control */
185 cpufunc_domains, /* Domain */
186 armv5_ec_setttb, /* Setttb */
187 cpufunc_faultstatus, /* Faultstatus */
188 cpufunc_faultaddress, /* Faultaddress */
192 armv4_tlb_flushID, /* tlb_flushID */
193 arm10_tlb_flushID_SE, /* tlb_flushID_SE */
194 armv4_tlb_flushI, /* tlb_flushI */
195 arm10_tlb_flushI_SE, /* tlb_flushI_SE */
196 armv4_tlb_flushD, /* tlb_flushD */
197 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
199 /* Cache operations */
201 armv5_ec_icache_sync_all, /* icache_sync_all */
202 armv5_ec_icache_sync_range, /* icache_sync_range */
204 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
205 armv5_ec_dcache_wbinv_range, /* dcache_wbinv_range */
206 armv5_ec_dcache_inv_range, /* dcache_inv_range */
207 armv5_ec_dcache_wb_range, /* dcache_wb_range */
209 armv4_idcache_inv_all, /* idcache_inv_all */
210 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
211 armv5_ec_idcache_wbinv_range, /* idcache_wbinv_range */
213 cpufunc_nullop, /* l2cache_wbinv_all */
214 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
215 (void *)cpufunc_nullop, /* l2cache_inv_range */
216 (void *)cpufunc_nullop, /* l2cache_wb_range */
218 /* Other functions */
220 cpufunc_nullop, /* flush_prefetchbuf */
221 armv4_drain_writebuf, /* drain_writebuf */
222 cpufunc_nullop, /* flush_brnchtgt_C */
223 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
225 (void *)cpufunc_nullop, /* sleep */
229 cpufunc_null_fixup, /* dataabt_fixup */
230 cpufunc_null_fixup, /* prefetchabt_fixup */
232 arm10_context_switch, /* context_switch */
234 arm10_setup /* cpu setup */
238 struct cpu_functions sheeva_cpufuncs = {
242 cpufunc_nullop, /* cpwait */
246 cpufunc_control, /* control */
247 cpufunc_domains, /* Domain */
248 sheeva_setttb, /* Setttb */
249 cpufunc_faultstatus, /* Faultstatus */
250 cpufunc_faultaddress, /* Faultaddress */
254 armv4_tlb_flushID, /* tlb_flushID */
255 arm10_tlb_flushID_SE, /* tlb_flushID_SE */
256 armv4_tlb_flushI, /* tlb_flushI */
257 arm10_tlb_flushI_SE, /* tlb_flushI_SE */
258 armv4_tlb_flushD, /* tlb_flushD */
259 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
261 /* Cache operations */
263 armv5_ec_icache_sync_all, /* icache_sync_all */
264 armv5_ec_icache_sync_range, /* icache_sync_range */
266 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
267 sheeva_dcache_wbinv_range, /* dcache_wbinv_range */
268 sheeva_dcache_inv_range, /* dcache_inv_range */
269 sheeva_dcache_wb_range, /* dcache_wb_range */
271 armv4_idcache_inv_all, /* idcache_inv_all */
272 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
273 sheeva_idcache_wbinv_range, /* idcache_wbinv_all */
275 sheeva_l2cache_wbinv_all, /* l2cache_wbinv_all */
276 sheeva_l2cache_wbinv_range, /* l2cache_wbinv_range */
277 sheeva_l2cache_inv_range, /* l2cache_inv_range */
278 sheeva_l2cache_wb_range, /* l2cache_wb_range */
280 /* Other functions */
282 cpufunc_nullop, /* flush_prefetchbuf */
283 armv4_drain_writebuf, /* drain_writebuf */
284 cpufunc_nullop, /* flush_brnchtgt_C */
285 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
287 sheeva_cpu_sleep, /* sleep */
291 cpufunc_null_fixup, /* dataabt_fixup */
292 cpufunc_null_fixup, /* prefetchabt_fixup */
294 arm10_context_switch, /* context_switch */
296 arm10_setup /* cpu setup */
298 #endif /* CPU_ARM9E || CPU_ARM10 */
301 struct cpu_functions arm10_cpufuncs = {
305 cpufunc_nullop, /* cpwait */
309 cpufunc_control, /* control */
310 cpufunc_domains, /* Domain */
311 arm10_setttb, /* Setttb */
312 cpufunc_faultstatus, /* Faultstatus */
313 cpufunc_faultaddress, /* Faultaddress */
317 armv4_tlb_flushID, /* tlb_flushID */
318 arm10_tlb_flushID_SE, /* tlb_flushID_SE */
319 armv4_tlb_flushI, /* tlb_flushI */
320 arm10_tlb_flushI_SE, /* tlb_flushI_SE */
321 armv4_tlb_flushD, /* tlb_flushD */
322 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
324 /* Cache operations */
326 arm10_icache_sync_all, /* icache_sync_all */
327 arm10_icache_sync_range, /* icache_sync_range */
329 arm10_dcache_wbinv_all, /* dcache_wbinv_all */
330 arm10_dcache_wbinv_range, /* dcache_wbinv_range */
331 arm10_dcache_inv_range, /* dcache_inv_range */
332 arm10_dcache_wb_range, /* dcache_wb_range */
334 armv4_idcache_inv_all, /* idcache_inv_all */
335 arm10_idcache_wbinv_all, /* idcache_wbinv_all */
336 arm10_idcache_wbinv_range, /* idcache_wbinv_range */
337 cpufunc_nullop, /* l2cache_wbinv_all */
338 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
339 (void *)cpufunc_nullop, /* l2cache_inv_range */
340 (void *)cpufunc_nullop, /* l2cache_wb_range */
342 /* Other functions */
344 cpufunc_nullop, /* flush_prefetchbuf */
345 armv4_drain_writebuf, /* drain_writebuf */
346 cpufunc_nullop, /* flush_brnchtgt_C */
347 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
349 (void *)cpufunc_nullop, /* sleep */
353 cpufunc_null_fixup, /* dataabt_fixup */
354 cpufunc_null_fixup, /* prefetchabt_fixup */
356 arm10_context_switch, /* context_switch */
358 arm10_setup /* cpu setup */
361 #endif /* CPU_ARM10 */
364 struct cpu_functions pj4bv7_cpufuncs = {
368 arm11_drain_writebuf, /* cpwait */
372 cpufunc_control, /* control */
373 cpufunc_domains, /* Domain */
374 pj4b_setttb, /* Setttb */
375 cpufunc_faultstatus, /* Faultstatus */
376 cpufunc_faultaddress, /* Faultaddress */
380 armv7_tlb_flushID, /* tlb_flushID */
381 armv7_tlb_flushID_SE, /* tlb_flushID_SE */
382 armv7_tlb_flushID, /* tlb_flushI */
383 armv7_tlb_flushID_SE, /* tlb_flushI_SE */
384 armv7_tlb_flushID, /* tlb_flushD */
385 armv7_tlb_flushID_SE, /* tlb_flushD_SE */
387 /* Cache operations */
388 armv7_idcache_wbinv_all, /* icache_sync_all */
389 armv7_icache_sync_range, /* icache_sync_range */
391 armv7_dcache_wbinv_all, /* dcache_wbinv_all */
392 armv7_dcache_wbinv_range, /* dcache_wbinv_range */
393 armv7_dcache_inv_range, /* dcache_inv_range */
394 armv7_dcache_wb_range, /* dcache_wb_range */
396 armv7_idcache_inv_all, /* idcache_inv_all */
397 armv7_idcache_wbinv_all, /* idcache_wbinv_all */
398 armv7_idcache_wbinv_range, /* idcache_wbinv_all */
400 (void *)cpufunc_nullop, /* l2cache_wbinv_all */
401 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
402 (void *)cpufunc_nullop, /* l2cache_inv_range */
403 (void *)cpufunc_nullop, /* l2cache_wb_range */
405 /* Other functions */
407 pj4b_drain_readbuf, /* flush_prefetchbuf */
408 arm11_drain_writebuf, /* drain_writebuf */
409 pj4b_flush_brnchtgt_all, /* flush_brnchtgt_C */
410 pj4b_flush_brnchtgt_va, /* flush_brnchtgt_E */
412 (void *)cpufunc_nullop, /* sleep */
416 cpufunc_null_fixup, /* dataabt_fixup */
417 cpufunc_null_fixup, /* prefetchabt_fixup */
419 arm11_context_switch, /* context_switch */
421 pj4bv7_setup /* cpu setup */
423 #endif /* CPU_MV_PJ4B */
425 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
426 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
427 defined(CPU_XSCALE_80219)
429 struct cpu_functions xscale_cpufuncs = {
433 xscale_cpwait, /* cpwait */
437 xscale_control, /* control */
438 cpufunc_domains, /* domain */
439 xscale_setttb, /* setttb */
440 cpufunc_faultstatus, /* faultstatus */
441 cpufunc_faultaddress, /* faultaddress */
445 armv4_tlb_flushID, /* tlb_flushID */
446 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
447 armv4_tlb_flushI, /* tlb_flushI */
448 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
449 armv4_tlb_flushD, /* tlb_flushD */
450 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
452 /* Cache operations */
454 xscale_cache_syncI, /* icache_sync_all */
455 xscale_cache_syncI_rng, /* icache_sync_range */
457 xscale_cache_purgeD, /* dcache_wbinv_all */
458 xscale_cache_purgeD_rng, /* dcache_wbinv_range */
459 xscale_cache_flushD_rng, /* dcache_inv_range */
460 xscale_cache_cleanD_rng, /* dcache_wb_range */
462 xscale_cache_flushID, /* idcache_inv_all */
463 xscale_cache_purgeID, /* idcache_wbinv_all */
464 xscale_cache_purgeID_rng, /* idcache_wbinv_range */
465 cpufunc_nullop, /* l2cache_wbinv_all */
466 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
467 (void *)cpufunc_nullop, /* l2cache_inv_range */
468 (void *)cpufunc_nullop, /* l2cache_wb_range */
470 /* Other functions */
472 cpufunc_nullop, /* flush_prefetchbuf */
473 armv4_drain_writebuf, /* drain_writebuf */
474 cpufunc_nullop, /* flush_brnchtgt_C */
475 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
477 xscale_cpu_sleep, /* sleep */
481 cpufunc_null_fixup, /* dataabt_fixup */
482 cpufunc_null_fixup, /* prefetchabt_fixup */
484 xscale_context_switch, /* context_switch */
486 xscale_setup /* cpu setup */
489 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
492 #ifdef CPU_XSCALE_81342
493 struct cpu_functions xscalec3_cpufuncs = {
497 xscale_cpwait, /* cpwait */
501 xscale_control, /* control */
502 cpufunc_domains, /* domain */
503 xscalec3_setttb, /* setttb */
504 cpufunc_faultstatus, /* faultstatus */
505 cpufunc_faultaddress, /* faultaddress */
509 armv4_tlb_flushID, /* tlb_flushID */
510 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
511 armv4_tlb_flushI, /* tlb_flushI */
512 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
513 armv4_tlb_flushD, /* tlb_flushD */
514 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
516 /* Cache operations */
518 xscalec3_cache_syncI, /* icache_sync_all */
519 xscalec3_cache_syncI_rng, /* icache_sync_range */
521 xscalec3_cache_purgeD, /* dcache_wbinv_all */
522 xscalec3_cache_purgeD_rng, /* dcache_wbinv_range */
523 xscale_cache_flushD_rng, /* dcache_inv_range */
524 xscalec3_cache_cleanD_rng, /* dcache_wb_range */
526 xscale_cache_flushID, /* idcache_inv_all */
527 xscalec3_cache_purgeID, /* idcache_wbinv_all */
528 xscalec3_cache_purgeID_rng, /* idcache_wbinv_range */
529 xscalec3_l2cache_purge, /* l2cache_wbinv_all */
530 xscalec3_l2cache_purge_rng, /* l2cache_wbinv_range */
531 xscalec3_l2cache_flush_rng, /* l2cache_inv_range */
532 xscalec3_l2cache_clean_rng, /* l2cache_wb_range */
534 /* Other functions */
536 cpufunc_nullop, /* flush_prefetchbuf */
537 armv4_drain_writebuf, /* drain_writebuf */
538 cpufunc_nullop, /* flush_brnchtgt_C */
539 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
541 xscale_cpu_sleep, /* sleep */
545 cpufunc_null_fixup, /* dataabt_fixup */
546 cpufunc_null_fixup, /* prefetchabt_fixup */
548 xscalec3_context_switch, /* context_switch */
550 xscale_setup /* cpu setup */
552 #endif /* CPU_XSCALE_81342 */
555 #if defined(CPU_FA526) || defined(CPU_FA626TE)
556 struct cpu_functions fa526_cpufuncs = {
560 cpufunc_nullop, /* cpwait */
564 cpufunc_control, /* control */
565 cpufunc_domains, /* domain */
566 fa526_setttb, /* setttb */
567 cpufunc_faultstatus, /* faultstatus */
568 cpufunc_faultaddress, /* faultaddress */
572 armv4_tlb_flushID, /* tlb_flushID */
573 fa526_tlb_flushID_SE, /* tlb_flushID_SE */
574 armv4_tlb_flushI, /* tlb_flushI */
575 fa526_tlb_flushI_SE, /* tlb_flushI_SE */
576 armv4_tlb_flushD, /* tlb_flushD */
577 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
579 /* Cache operations */
581 fa526_icache_sync_all, /* icache_sync_all */
582 fa526_icache_sync_range, /* icache_sync_range */
584 fa526_dcache_wbinv_all, /* dcache_wbinv_all */
585 fa526_dcache_wbinv_range, /* dcache_wbinv_range */
586 fa526_dcache_inv_range, /* dcache_inv_range */
587 fa526_dcache_wb_range, /* dcache_wb_range */
589 armv4_idcache_inv_all, /* idcache_inv_all */
590 fa526_idcache_wbinv_all, /* idcache_wbinv_all */
591 fa526_idcache_wbinv_range, /* idcache_wbinv_range */
592 cpufunc_nullop, /* l2cache_wbinv_all */
593 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
594 (void *)cpufunc_nullop, /* l2cache_inv_range */
595 (void *)cpufunc_nullop, /* l2cache_wb_range */
597 /* Other functions */
599 fa526_flush_prefetchbuf, /* flush_prefetchbuf */
600 armv4_drain_writebuf, /* drain_writebuf */
601 cpufunc_nullop, /* flush_brnchtgt_C */
602 fa526_flush_brnchtgt_E, /* flush_brnchtgt_E */
604 fa526_cpu_sleep, /* sleep */
608 cpufunc_null_fixup, /* dataabt_fixup */
609 cpufunc_null_fixup, /* prefetchabt_fixup */
611 fa526_context_switch, /* context_switch */
613 fa526_setup /* cpu setup */
615 #endif /* CPU_FA526 || CPU_FA626TE */
617 #if defined(CPU_ARM1136)
618 struct cpu_functions arm1136_cpufuncs = {
622 cpufunc_nullop, /* cpwait */
626 cpufunc_control, /* control */
627 cpufunc_domains, /* Domain */
628 arm11x6_setttb, /* Setttb */
629 cpufunc_faultstatus, /* Faultstatus */
630 cpufunc_faultaddress, /* Faultaddress */
634 arm11_tlb_flushID, /* tlb_flushID */
635 arm11_tlb_flushID_SE, /* tlb_flushID_SE */
636 arm11_tlb_flushI, /* tlb_flushI */
637 arm11_tlb_flushI_SE, /* tlb_flushI_SE */
638 arm11_tlb_flushD, /* tlb_flushD */
639 arm11_tlb_flushD_SE, /* tlb_flushD_SE */
641 /* Cache operations */
643 arm11x6_icache_sync_all, /* icache_sync_all */
644 arm11x6_icache_sync_range, /* icache_sync_range */
646 arm11x6_dcache_wbinv_all, /* dcache_wbinv_all */
647 armv6_dcache_wbinv_range, /* dcache_wbinv_range */
648 armv6_dcache_inv_range, /* dcache_inv_range */
649 armv6_dcache_wb_range, /* dcache_wb_range */
651 armv6_idcache_inv_all, /* idcache_inv_all */
652 arm11x6_idcache_wbinv_all, /* idcache_wbinv_all */
653 arm11x6_idcache_wbinv_range, /* idcache_wbinv_range */
655 (void *)cpufunc_nullop, /* l2cache_wbinv_all */
656 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
657 (void *)cpufunc_nullop, /* l2cache_inv_range */
658 (void *)cpufunc_nullop, /* l2cache_wb_range */
660 /* Other functions */
662 arm11x6_flush_prefetchbuf, /* flush_prefetchbuf */
663 arm11_drain_writebuf, /* drain_writebuf */
664 cpufunc_nullop, /* flush_brnchtgt_C */
665 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
667 arm11_sleep, /* sleep */
671 cpufunc_null_fixup, /* dataabt_fixup */
672 cpufunc_null_fixup, /* prefetchabt_fixup */
674 arm11_context_switch, /* context_switch */
676 arm11x6_setup /* cpu setup */
678 #endif /* CPU_ARM1136 */
679 #if defined(CPU_ARM1176)
680 struct cpu_functions arm1176_cpufuncs = {
684 cpufunc_nullop, /* cpwait */
688 cpufunc_control, /* control */
689 cpufunc_domains, /* Domain */
690 arm11x6_setttb, /* Setttb */
691 cpufunc_faultstatus, /* Faultstatus */
692 cpufunc_faultaddress, /* Faultaddress */
696 arm11_tlb_flushID, /* tlb_flushID */
697 arm11_tlb_flushID_SE, /* tlb_flushID_SE */
698 arm11_tlb_flushI, /* tlb_flushI */
699 arm11_tlb_flushI_SE, /* tlb_flushI_SE */
700 arm11_tlb_flushD, /* tlb_flushD */
701 arm11_tlb_flushD_SE, /* tlb_flushD_SE */
703 /* Cache operations */
705 arm11x6_icache_sync_all, /* icache_sync_all */
706 arm11x6_icache_sync_range, /* icache_sync_range */
708 arm11x6_dcache_wbinv_all, /* dcache_wbinv_all */
709 armv6_dcache_wbinv_range, /* dcache_wbinv_range */
710 armv6_dcache_inv_range, /* dcache_inv_range */
711 armv6_dcache_wb_range, /* dcache_wb_range */
713 armv6_idcache_inv_all, /* idcache_inv_all */
714 arm11x6_idcache_wbinv_all, /* idcache_wbinv_all */
715 arm11x6_idcache_wbinv_range, /* idcache_wbinv_range */
717 (void *)cpufunc_nullop, /* l2cache_wbinv_all */
718 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
719 (void *)cpufunc_nullop, /* l2cache_inv_range */
720 (void *)cpufunc_nullop, /* l2cache_wb_range */
722 /* Other functions */
724 arm11x6_flush_prefetchbuf, /* flush_prefetchbuf */
725 arm11_drain_writebuf, /* drain_writebuf */
726 cpufunc_nullop, /* flush_brnchtgt_C */
727 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
729 arm11x6_sleep, /* sleep */
733 cpufunc_null_fixup, /* dataabt_fixup */
734 cpufunc_null_fixup, /* prefetchabt_fixup */
736 arm11_context_switch, /* context_switch */
738 arm11x6_setup /* cpu setup */
740 #endif /*CPU_ARM1176 */
742 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
743 struct cpu_functions cortexa_cpufuncs = {
747 cpufunc_nullop, /* cpwait */
751 cpufunc_control, /* control */
752 cpufunc_domains, /* Domain */
753 armv7_setttb, /* Setttb */
754 cpufunc_faultstatus, /* Faultstatus */
755 cpufunc_faultaddress, /* Faultaddress */
758 * TLB functions. ARMv7 does all TLB ops based on a unified TLB model
759 * whether the hardware implements separate I+D or not, so we use the
760 * same 'ID' functions for all 3 variations.
763 armv7_tlb_flushID, /* tlb_flushID */
764 armv7_tlb_flushID_SE, /* tlb_flushID_SE */
765 armv7_tlb_flushID, /* tlb_flushI */
766 armv7_tlb_flushID_SE, /* tlb_flushI_SE */
767 armv7_tlb_flushID, /* tlb_flushD */
768 armv7_tlb_flushID_SE, /* tlb_flushD_SE */
770 /* Cache operations */
772 armv7_icache_sync_all, /* icache_sync_all */
773 armv7_icache_sync_range, /* icache_sync_range */
775 armv7_dcache_wbinv_all, /* dcache_wbinv_all */
776 armv7_dcache_wbinv_range, /* dcache_wbinv_range */
777 armv7_dcache_inv_range, /* dcache_inv_range */
778 armv7_dcache_wb_range, /* dcache_wb_range */
780 armv7_idcache_inv_all, /* idcache_inv_all */
781 armv7_idcache_wbinv_all, /* idcache_wbinv_all */
782 armv7_idcache_wbinv_range, /* idcache_wbinv_range */
785 * Note: For CPUs using the PL310 the L2 ops are filled in when the
786 * L2 cache controller is actually enabled.
788 cpufunc_nullop, /* l2cache_wbinv_all */
789 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
790 (void *)cpufunc_nullop, /* l2cache_inv_range */
791 (void *)cpufunc_nullop, /* l2cache_wb_range */
793 /* Other functions */
795 cpufunc_nullop, /* flush_prefetchbuf */
796 armv7_drain_writebuf, /* drain_writebuf */
797 cpufunc_nullop, /* flush_brnchtgt_C */
798 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
800 armv7_sleep, /* sleep */
804 cpufunc_null_fixup, /* dataabt_fixup */
805 cpufunc_null_fixup, /* prefetchabt_fixup */
807 armv7_context_switch, /* context_switch */
809 cortexa_setup /* cpu setup */
811 #endif /* CPU_CORTEXA */
814 * Global constants also used by locore.s
817 struct cpu_functions cpufuncs;
819 u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore.s */
821 #if defined(CPU_ARM9) || \
822 defined (CPU_ARM9E) || defined (CPU_ARM10) || defined (CPU_ARM1136) || \
823 defined(CPU_ARM1176) || defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
824 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
825 defined(CPU_FA526) || defined(CPU_FA626TE) || defined(CPU_MV_PJ4B) || \
826 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
827 defined(CPU_CORTEXA) || defined(CPU_KRAIT)
829 static void get_cachetype_cp15(void);
831 /* Additional cache information local to this file. Log2 of some of the
833 static int arm_dcache_l2_nsets;
834 static int arm_dcache_l2_assoc;
835 static int arm_dcache_l2_linesize;
840 u_int ctype, isize, dsize, cpuid;
841 u_int clevel, csize, i, sel;
845 __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
848 cpuid = cpufunc_id();
850 * ...and thus spake the ARM ARM:
852 * If an <opcode2> value corresponding to an unimplemented or
853 * reserved ID register is encountered, the System Control
854 * processor returns the value of the main ID register.
859 if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
860 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
862 arm_cache_level = clevel;
863 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
865 while ((type = (clevel & 0x7)) && i < 7) {
866 if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
867 type == CACHE_SEP_CACHE) {
869 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
871 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
873 arm_cache_type[sel] = csize;
874 arm_dcache_align = 1 <<
875 (CPUV7_CT_xSIZE_LEN(csize) + 4);
876 arm_dcache_align_mask = arm_dcache_align - 1;
878 if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
880 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
882 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
884 arm_cache_type[sel] = csize;
890 if ((ctype & CPU_CT_S) == 0)
891 arm_pcache_unified = 1;
894 * If you want to know how this code works, go read the ARM ARM.
897 arm_pcache_type = CPU_CT_CTYPE(ctype);
899 if (arm_pcache_unified == 0) {
900 isize = CPU_CT_ISIZE(ctype);
901 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
902 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
903 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
904 if (isize & CPU_CT_xSIZE_M)
905 arm_picache_line_size = 0; /* not present */
907 arm_picache_ways = 1;
909 arm_picache_ways = multiplier <<
910 (CPU_CT_xSIZE_ASSOC(isize) - 1);
912 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
915 dsize = CPU_CT_DSIZE(ctype);
916 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
917 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
918 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
919 if (dsize & CPU_CT_xSIZE_M)
920 arm_pdcache_line_size = 0; /* not present */
922 arm_pdcache_ways = 1;
924 arm_pdcache_ways = multiplier <<
925 (CPU_CT_xSIZE_ASSOC(dsize) - 1);
927 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
929 arm_dcache_align = arm_pdcache_line_size;
931 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
932 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
933 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
934 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
937 arm_dcache_align_mask = arm_dcache_align - 1;
940 #endif /* ARM9 || XSCALE */
943 * Cannot panic here as we may not have a console yet ...
949 cputype = cpufunc_id();
950 cputype &= CPU_ID_CPU_MASK;
953 * NOTE: cpu_do_powersave defaults to off. If we encounter a
954 * CPU type where we want to use it by default, then we set it.
958 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
959 (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
960 (cputype & 0x0000f000) == 0x00009000) {
961 cpufuncs = arm9_cpufuncs;
962 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
963 get_cachetype_cp15();
964 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
965 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
966 arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
967 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
968 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
969 #ifdef ARM9_CACHE_WRITE_THROUGH
970 pmap_pte_init_arm9();
972 pmap_pte_init_generic();
976 #endif /* CPU_ARM9 */
977 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
978 if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
979 cputype == CPU_ID_MV88FR571_41) {
980 uint32_t sheeva_ctrl;
982 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
985 * Workaround for Marvell MV78100 CPU: Cache prefetch
986 * mechanism may affect the cache coherency validity,
987 * so it needs to be disabled.
989 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
990 * L2 Prefetching Mechanism) for details.
992 if (cputype == CPU_ID_MV88FR571_VD ||
993 cputype == CPU_ID_MV88FR571_41)
994 sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
996 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
998 cpufuncs = sheeva_cpufuncs;
999 get_cachetype_cp15();
1000 pmap_pte_init_generic();
1002 } else if (cputype == CPU_ID_ARM926EJS || cputype == CPU_ID_ARM1026EJS) {
1003 cpufuncs = armv5_ec_cpufuncs;
1004 get_cachetype_cp15();
1005 pmap_pte_init_generic();
1008 #endif /* CPU_ARM9E || CPU_ARM10 */
1010 if (/* cputype == CPU_ID_ARM1020T || */
1011 cputype == CPU_ID_ARM1020E) {
1013 * Select write-through cacheing (this isn't really an
1014 * option on ARM1020T).
1016 cpufuncs = arm10_cpufuncs;
1017 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
1018 get_cachetype_cp15();
1019 arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1020 arm10_dcache_sets_max =
1021 (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1022 arm10_dcache_sets_inc;
1023 arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1024 arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
1025 pmap_pte_init_generic();
1028 #endif /* CPU_ARM10 */
1029 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1030 if (cputype == CPU_ID_ARM1136JS
1031 || cputype == CPU_ID_ARM1136JSR1
1032 || cputype == CPU_ID_ARM1176JZS) {
1034 if (cputype == CPU_ID_ARM1136JS
1035 || cputype == CPU_ID_ARM1136JSR1)
1036 cpufuncs = arm1136_cpufuncs;
1039 if (cputype == CPU_ID_ARM1176JZS)
1040 cpufuncs = arm1176_cpufuncs;
1042 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
1043 get_cachetype_cp15();
1045 pmap_pte_init_mmu_v6();
1049 #endif /* CPU_ARM1136 || CPU_ARM1176 */
1050 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1051 if (cputype == CPU_ID_CORTEXA7 ||
1052 cputype == CPU_ID_CORTEXA8R1 ||
1053 cputype == CPU_ID_CORTEXA8R2 ||
1054 cputype == CPU_ID_CORTEXA8R3 ||
1055 cputype == CPU_ID_CORTEXA9R1 ||
1056 cputype == CPU_ID_CORTEXA9R2 ||
1057 cputype == CPU_ID_CORTEXA9R3 ||
1058 cputype == CPU_ID_CORTEXA15R0 ||
1059 cputype == CPU_ID_CORTEXA15R1 ||
1060 cputype == CPU_ID_CORTEXA15R2 ||
1061 cputype == CPU_ID_CORTEXA15R3 ||
1062 cputype == CPU_ID_KRAIT ) {
1063 cpufuncs = cortexa_cpufuncs;
1064 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
1065 get_cachetype_cp15();
1067 pmap_pte_init_mmu_v6();
1068 /* Use powersave on this CPU. */
1069 cpu_do_powersave = 1;
1072 #endif /* CPU_CORTEXA */
1074 #if defined(CPU_MV_PJ4B)
1075 if (cputype == CPU_ID_MV88SV581X_V7 ||
1076 cputype == CPU_ID_MV88SV584X_V7 ||
1077 cputype == CPU_ID_ARM_88SV581X_V7) {
1078 cpufuncs = pj4bv7_cpufuncs;
1079 get_cachetype_cp15();
1080 pmap_pte_init_mmu_v6();
1083 #endif /* CPU_MV_PJ4B */
1085 #if defined(CPU_FA526) || defined(CPU_FA626TE)
1086 if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
1087 cpufuncs = fa526_cpufuncs;
1088 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
1089 get_cachetype_cp15();
1090 pmap_pte_init_generic();
1092 /* Use powersave on this CPU. */
1093 cpu_do_powersave = 1;
1097 #endif /* CPU_FA526 || CPU_FA626TE */
1099 #ifdef CPU_XSCALE_80200
1100 if (cputype == CPU_ID_80200) {
1101 int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1105 #if defined(XSCALE_CCLKCFG)
1107 * Crank CCLKCFG to maximum legal value.
1109 __asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
1111 : "r" (XSCALE_CCLKCFG));
1115 * XXX Disable ECC in the Bus Controller Unit; we
1116 * don't really support it, yet. Clear any pending
1117 * error indications.
1119 __asm __volatile("mcr p13, 0, %0, c0, c1, 0"
1121 : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1123 cpufuncs = xscale_cpufuncs;
1125 * i80200 errata: Step-A0 and A1 have a bug where
1126 * D$ dirty bits are not cleared on "invalidate by
1129 * Workaround: Clean cache line before invalidating.
1131 if (rev == 0 || rev == 1)
1132 cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1134 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1135 get_cachetype_cp15();
1136 pmap_pte_init_xscale();
1139 #endif /* CPU_XSCALE_80200 */
1140 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
1141 if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1142 cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1143 cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1144 cpufuncs = xscale_cpufuncs;
1145 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1146 get_cachetype_cp15();
1147 pmap_pte_init_xscale();
1150 #endif /* CPU_XSCALE_80321 */
1152 #if defined(CPU_XSCALE_81342)
1153 if (cputype == CPU_ID_81342) {
1154 cpufuncs = xscalec3_cpufuncs;
1155 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1156 get_cachetype_cp15();
1157 pmap_pte_init_xscale();
1160 #endif /* CPU_XSCALE_81342 */
1161 #ifdef CPU_XSCALE_PXA2X0
1162 /* ignore core revision to test PXA2xx CPUs */
1163 if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1164 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1165 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1167 cpufuncs = xscale_cpufuncs;
1168 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1169 get_cachetype_cp15();
1170 pmap_pte_init_xscale();
1172 /* Use powersave on this CPU. */
1173 cpu_do_powersave = 1;
1177 #endif /* CPU_XSCALE_PXA2X0 */
1178 #ifdef CPU_XSCALE_IXP425
1179 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1180 cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
1182 cpufuncs = xscale_cpufuncs;
1183 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1184 get_cachetype_cp15();
1185 pmap_pte_init_xscale();
1189 #endif /* CPU_XSCALE_IXP425 */
1191 * Bzzzz. And the answer was ...
1193 panic("No support for this CPU type (%08x) in kernel", cputype);
1194 return(ARCHITECTURE_NOT_PRESENT);
1196 uma_set_align(arm_dcache_align_mask);
1201 * Fixup routines for data and prefetch aborts.
1203 * Several compile time symbols are used
1205 * DEBUG_FAULT_CORRECTION - Print debugging information during the
1206 * correction of registers after a fault.
1211 * Null abort fixup routine.
1212 * For use when no fixup is required.
1215 cpufunc_null_fixup(arg)
1218 return(ABORT_FIXUP_OK);
1225 #if defined (CPU_ARM9) || \
1226 defined(CPU_ARM9E) || \
1227 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1228 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1229 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
1230 defined(CPU_ARM10) || defined(CPU_ARM1136) || defined(CPU_ARM1176) ||\
1231 defined(CPU_FA526) || defined(CPU_FA626TE)
1244 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
1247 parse_cpu_options(args, optlist, cpuctrl)
1249 struct cpu_option *optlist;
1257 while (optlist->co_name) {
1258 if (get_bootconf_option(args, optlist->co_name,
1259 BOOTOPT_TYPE_BOOLEAN, &integer)) {
1261 if (optlist->co_trueop == OR)
1262 cpuctrl |= optlist->co_value;
1263 else if (optlist->co_trueop == BIC)
1264 cpuctrl &= ~optlist->co_value;
1266 if (optlist->co_falseop == OR)
1267 cpuctrl |= optlist->co_value;
1268 else if (optlist->co_falseop == BIC)
1269 cpuctrl &= ~optlist->co_value;
1276 #endif /* CPU_ARM9 || XSCALE*/
1279 struct cpu_option arm9_options[] = {
1280 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1281 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1282 { "arm9.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1283 { "arm9.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1284 { "arm9.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1285 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1286 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1287 { "arm9.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1288 { NULL, IGN, IGN, 0 }
1295 int cpuctrl, cpuctrlmask;
1297 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1298 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1299 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1300 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1301 CPU_CONTROL_ROUNDROBIN;
1302 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1303 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1304 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1305 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1306 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1307 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1308 | CPU_CONTROL_ROUNDROBIN;
1310 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1311 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1314 cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1317 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1319 if (vector_page == ARM_VECTORS_HIGH)
1320 cpuctrl |= CPU_CONTROL_VECRELOC;
1322 /* Clear out the cache */
1323 cpu_idcache_wbinv_all();
1325 /* Set the control register */
1326 cpu_control(cpuctrlmask, cpuctrl);
1330 #endif /* CPU_ARM9 */
1332 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
1333 struct cpu_option arm10_options[] = {
1334 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1335 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1336 { "arm10.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1337 { "arm10.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1338 { "arm10.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1339 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1340 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1341 { "arm10.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1342 { NULL, IGN, IGN, 0 }
1349 int cpuctrl, cpuctrlmask;
1351 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1352 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1353 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1354 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1355 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1356 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1357 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1358 | CPU_CONTROL_BPRD_ENABLE
1359 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1361 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1362 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1365 cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
1368 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1371 /* Clear out the cache */
1372 cpu_idcache_wbinv_all();
1374 /* Now really make sure they are clean. */
1375 __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1377 if (vector_page == ARM_VECTORS_HIGH)
1378 cpuctrl |= CPU_CONTROL_VECRELOC;
1380 /* Set the control register */
1382 cpu_control(0xffffffff, cpuctrl);
1385 cpu_idcache_wbinv_all();
1387 #endif /* CPU_ARM9E || CPU_ARM10 */
1389 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1390 struct cpu_option arm11_options[] = {
1391 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1392 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1393 { "arm11.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1394 { "arm11.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1395 { "arm11.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1396 { NULL, IGN, IGN, 0 }
1400 arm11x6_setup(char *args)
1402 int cpuctrl, cpuctrl_wax;
1403 uint32_t auxctrl, auxctrl_wax;
1408 cpuid = cpufunc_id();
1411 CPU_CONTROL_MMU_ENABLE |
1412 CPU_CONTROL_DC_ENABLE |
1413 CPU_CONTROL_WBUF_ENABLE |
1414 CPU_CONTROL_32BP_ENABLE |
1415 CPU_CONTROL_32BD_ENABLE |
1416 CPU_CONTROL_LABT_ENABLE |
1417 CPU_CONTROL_SYST_ENABLE |
1418 CPU_CONTROL_IC_ENABLE;
1421 * "write as existing" bits
1422 * inverse of this is mask
1425 (3 << 30) | /* SBZ */
1426 (1 << 29) | /* FA */
1427 (1 << 28) | /* TR */
1428 (3 << 26) | /* SBZ */
1429 (3 << 19) | /* SBZ */
1430 (1 << 17); /* SBZ */
1432 cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1433 cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1435 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
1438 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1441 if (vector_page == ARM_VECTORS_HIGH)
1442 cpuctrl |= CPU_CONTROL_VECRELOC;
1447 * This options enables the workaround for the 364296 ARM1136
1448 * r0pX errata (possible cache data corruption with
1449 * hit-under-miss enabled). It sets the undocumented bit 31 in
1450 * the auxiliary control register and the FI bit in the control
1451 * register, thus disabling hit-under-miss without putting the
1452 * processor into full low interrupt latency mode. ARM11MPCore
1455 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
1456 cpuctrl |= CPU_CONTROL_FI_ENABLE;
1457 auxctrl = ARM1136_AUXCTL_PFI;
1458 auxctrl_wax = ~ARM1136_AUXCTL_PFI;
1462 * Enable an errata workaround
1464 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
1465 auxctrl = ARM1176_AUXCTL_PHD;
1466 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
1469 /* Clear out the cache */
1470 cpu_idcache_wbinv_all();
1472 /* Now really make sure they are clean. */
1473 __asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
1475 /* Allow detection code to find the VFP if it's fitted. */
1476 __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
1478 /* Set the control register */
1480 cpu_control(~cpuctrl_wax, cpuctrl);
1482 __asm volatile ("mrc p15, 0, %0, c1, c0, 1\n\t"
1483 "and %1, %0, %2\n\t"
1484 "orr %1, %1, %3\n\t"
1486 "mcrne p15, 0, %1, c1, c0, 1\n\t"
1487 : "=r"(tmp), "=r"(tmp2) :
1488 "r"(auxctrl_wax), "r"(auxctrl));
1491 cpu_idcache_wbinv_all();
1493 #endif /* CPU_ARM1136 || CPU_ARM1176 */
1504 cpuctrl = CPU_CONTROL_MMU_ENABLE;
1505 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1506 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1508 cpuctrl |= CPU_CONTROL_DC_ENABLE;
1509 cpuctrl |= (0xf << 3);
1510 cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1511 cpuctrl |= CPU_CONTROL_IC_ENABLE;
1512 if (vector_page == ARM_VECTORS_HIGH)
1513 cpuctrl |= CPU_CONTROL_VECRELOC;
1514 cpuctrl |= (0x5 << 16) | (1 < 22);
1515 cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1517 /* Clear out the cache */
1518 cpu_idcache_wbinv_all();
1520 /* Set the control register */
1522 cpu_control(0xFFFFFFFF, cpuctrl);
1525 cpu_idcache_wbinv_all();
1527 #endif /* CPU_MV_PJ4B */
1529 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1532 cortexa_setup(char *args)
1534 int cpuctrl, cpuctrlmask;
1536 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | /* MMU enable [0] */
1537 CPU_CONTROL_AFLT_ENABLE | /* Alignment fault [1] */
1538 CPU_CONTROL_DC_ENABLE | /* DCache enable [2] */
1539 CPU_CONTROL_BPRD_ENABLE | /* Branch prediction [11] */
1540 CPU_CONTROL_IC_ENABLE | /* ICache enable [12] */
1541 CPU_CONTROL_VECRELOC; /* Vector relocation [13] */
1543 cpuctrl = CPU_CONTROL_MMU_ENABLE |
1544 CPU_CONTROL_IC_ENABLE |
1545 CPU_CONTROL_DC_ENABLE |
1546 CPU_CONTROL_BPRD_ENABLE;
1548 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1549 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1552 /* Switch to big endian */
1554 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1557 /* Check if the vector page is at the high address (0xffff0000) */
1558 if (vector_page == ARM_VECTORS_HIGH)
1559 cpuctrl |= CPU_CONTROL_VECRELOC;
1561 /* Clear out the cache */
1562 cpu_idcache_wbinv_all();
1564 /* Set the control register */
1566 cpu_control(cpuctrlmask, cpuctrl);
1569 cpu_idcache_wbinv_all();
1571 armv7_auxctrl((1 << 6) | (1 << 0), (1 << 6) | (1 << 0)); /* Enable SMP + TLB broadcasting */
1574 #endif /* CPU_CORTEXA */
1576 #if defined(CPU_FA526) || defined(CPU_FA626TE)
1577 struct cpu_option fa526_options[] = {
1579 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE |
1580 CPU_CONTROL_DC_ENABLE) },
1581 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1582 #endif /* COMPAT_12 */
1583 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE |
1584 CPU_CONTROL_DC_ENABLE) },
1585 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE |
1586 CPU_CONTROL_DC_ENABLE) },
1587 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1588 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1589 { NULL, IGN, IGN, 0 }
1593 fa526_setup(char *args)
1595 int cpuctrl, cpuctrlmask;
1597 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1598 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1599 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1600 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1601 | CPU_CONTROL_BPRD_ENABLE;
1602 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1603 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1604 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1605 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1606 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1607 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1608 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1610 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1611 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1614 cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
1617 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1620 if (vector_page == ARM_VECTORS_HIGH)
1621 cpuctrl |= CPU_CONTROL_VECRELOC;
1623 /* Clear out the cache */
1624 cpu_idcache_wbinv_all();
1626 /* Set the control register */
1628 cpu_control(0xffffffff, cpuctrl);
1630 #endif /* CPU_FA526 || CPU_FA626TE */
1632 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1633 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1634 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
1635 struct cpu_option xscale_options[] = {
1637 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1638 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1639 #endif /* COMPAT_12 */
1640 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1641 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1642 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1643 { "xscale.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1644 { "xscale.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1645 { "xscale.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1646 { "xscale.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1647 { NULL, IGN, IGN, 0 }
1655 int cpuctrl, cpuctrlmask;
1658 * The XScale Write Buffer is always enabled. Our option
1659 * is to enable/disable coalescing. Note that bits 6:3
1660 * must always be enabled.
1663 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1664 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1665 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1666 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1667 | CPU_CONTROL_BPRD_ENABLE;
1668 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1669 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1670 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1671 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1672 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1673 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1674 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
1675 CPU_CONTROL_L2_ENABLE;
1677 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1678 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1681 cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
1684 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1687 if (vector_page == ARM_VECTORS_HIGH)
1688 cpuctrl |= CPU_CONTROL_VECRELOC;
1689 #ifdef CPU_XSCALE_CORE3
1690 cpuctrl |= CPU_CONTROL_L2_ENABLE;
1693 /* Clear out the cache */
1694 cpu_idcache_wbinv_all();
1697 * Set the control register. Note that bits 6:3 must always
1701 /* cpu_control(cpuctrlmask, cpuctrl);*/
1702 cpu_control(0xffffffff, cpuctrl);
1704 /* Make sure write coalescing is turned on */
1705 __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1707 #ifdef XSCALE_NO_COALESCE_WRITES
1708 auxctl |= XSCALE_AUXCTL_K;
1710 auxctl &= ~XSCALE_AUXCTL_K;
1712 #ifdef CPU_XSCALE_CORE3
1713 auxctl |= XSCALE_AUXCTL_LLR;
1714 auxctl |= XSCALE_AUXCTL_MD_MASK;
1716 __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1719 #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425