1 /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
4 * arm9 support code Copyright (C) 2001 ARM Ltd
5 * Copyright (c) 1997 Mark Brinicombe.
6 * Copyright (c) 1997 Causality Limited
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by Causality Limited.
20 * 4. The name of Causality Limited may not be used to endorse or promote
21 * products derived from this software without specific prior written
24 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * RiscBSD kernel project
40 * C functions for supporting CPU / MMU / TLB specific operations.
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
47 #include <sys/param.h>
48 #include <sys/systm.h>
50 #include <sys/mutex.h>
52 #include <machine/bus.h>
53 #include <machine/cpu.h>
54 #include <machine/disassem.h>
60 #include <machine/cpuconf.h>
61 #include <machine/cpufunc.h>
62 #include <machine/bootconfig.h>
64 #ifdef CPU_XSCALE_80200
65 #include <arm/xscale/i80200/i80200reg.h>
66 #include <arm/xscale/i80200/i80200var.h>
69 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
70 #include <arm/xscale/i80321/i80321reg.h>
71 #include <arm/xscale/i80321/i80321var.h>
75 * Some definitions in i81342reg.h clash with i80321reg.h.
76 * This only happens for the LINT kernel. As it happens,
77 * we don't need anything from i81342reg.h that we already
78 * got from somewhere else during a LINT compile.
80 #if defined(CPU_XSCALE_81342) && !defined(COMPILING_LINT)
81 #include <arm/xscale/i8134x/i81342reg.h>
84 #ifdef CPU_XSCALE_IXP425
85 #include <arm/xscale/ixp425/ixp425reg.h>
86 #include <arm/xscale/ixp425/ixp425var.h>
89 /* PRIMARY CACHE VARIABLES */
91 int arm_picache_line_size;
94 int arm_pdcache_size; /* and unified */
95 int arm_pdcache_line_size;
99 int arm_pcache_unified;
101 int arm_dcache_align;
102 int arm_dcache_align_mask;
104 u_int arm_cache_level;
105 u_int arm_cache_type[14];
108 /* 1 == use cpu_sleep(), 0 == don't */
109 int cpu_do_powersave;
113 struct cpu_functions arm9_cpufuncs = {
117 cpufunc_nullop, /* cpwait */
121 cpufunc_control, /* control */
122 cpufunc_domains, /* Domain */
123 arm9_setttb, /* Setttb */
124 cpufunc_faultstatus, /* Faultstatus */
125 cpufunc_faultaddress, /* Faultaddress */
129 armv4_tlb_flushID, /* tlb_flushID */
130 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
131 armv4_tlb_flushI, /* tlb_flushI */
132 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
133 armv4_tlb_flushD, /* tlb_flushD */
134 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
136 /* Cache operations */
138 arm9_icache_sync_all, /* icache_sync_all */
139 arm9_icache_sync_range, /* icache_sync_range */
141 arm9_dcache_wbinv_all, /* dcache_wbinv_all */
142 arm9_dcache_wbinv_range, /* dcache_wbinv_range */
143 arm9_dcache_inv_range, /* dcache_inv_range */
144 arm9_dcache_wb_range, /* dcache_wb_range */
146 armv4_idcache_inv_all, /* idcache_inv_all */
147 arm9_idcache_wbinv_all, /* idcache_wbinv_all */
148 arm9_idcache_wbinv_range, /* idcache_wbinv_range */
149 cpufunc_nullop, /* l2cache_wbinv_all */
150 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
151 (void *)cpufunc_nullop, /* l2cache_inv_range */
152 (void *)cpufunc_nullop, /* l2cache_wb_range */
153 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
155 /* Other functions */
157 cpufunc_nullop, /* flush_prefetchbuf */
158 armv4_drain_writebuf, /* drain_writebuf */
159 cpufunc_nullop, /* flush_brnchtgt_C */
160 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
162 (void *)cpufunc_nullop, /* sleep */
166 cpufunc_null_fixup, /* dataabt_fixup */
167 cpufunc_null_fixup, /* prefetchabt_fixup */
169 arm9_context_switch, /* context_switch */
171 arm9_setup /* cpu setup */
174 #endif /* CPU_ARM9 */
176 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
177 struct cpu_functions armv5_ec_cpufuncs = {
181 cpufunc_nullop, /* cpwait */
185 cpufunc_control, /* control */
186 cpufunc_domains, /* Domain */
187 armv5_ec_setttb, /* Setttb */
188 cpufunc_faultstatus, /* Faultstatus */
189 cpufunc_faultaddress, /* Faultaddress */
193 armv4_tlb_flushID, /* tlb_flushID */
194 arm10_tlb_flushID_SE, /* tlb_flushID_SE */
195 armv4_tlb_flushI, /* tlb_flushI */
196 arm10_tlb_flushI_SE, /* tlb_flushI_SE */
197 armv4_tlb_flushD, /* tlb_flushD */
198 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
200 /* Cache operations */
202 armv5_ec_icache_sync_all, /* icache_sync_all */
203 armv5_ec_icache_sync_range, /* icache_sync_range */
205 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
206 armv5_ec_dcache_wbinv_range, /* dcache_wbinv_range */
207 armv5_ec_dcache_inv_range, /* dcache_inv_range */
208 armv5_ec_dcache_wb_range, /* dcache_wb_range */
210 armv4_idcache_inv_all, /* idcache_inv_all */
211 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
212 armv5_ec_idcache_wbinv_range, /* idcache_wbinv_range */
214 cpufunc_nullop, /* l2cache_wbinv_all */
215 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
216 (void *)cpufunc_nullop, /* l2cache_inv_range */
217 (void *)cpufunc_nullop, /* l2cache_wb_range */
218 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
220 /* Other functions */
222 cpufunc_nullop, /* flush_prefetchbuf */
223 armv4_drain_writebuf, /* drain_writebuf */
224 cpufunc_nullop, /* flush_brnchtgt_C */
225 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
227 (void *)cpufunc_nullop, /* sleep */
231 cpufunc_null_fixup, /* dataabt_fixup */
232 cpufunc_null_fixup, /* prefetchabt_fixup */
234 arm10_context_switch, /* context_switch */
236 arm10_setup /* cpu setup */
240 struct cpu_functions sheeva_cpufuncs = {
244 cpufunc_nullop, /* cpwait */
248 cpufunc_control, /* control */
249 cpufunc_domains, /* Domain */
250 sheeva_setttb, /* Setttb */
251 cpufunc_faultstatus, /* Faultstatus */
252 cpufunc_faultaddress, /* Faultaddress */
256 armv4_tlb_flushID, /* tlb_flushID */
257 arm10_tlb_flushID_SE, /* tlb_flushID_SE */
258 armv4_tlb_flushI, /* tlb_flushI */
259 arm10_tlb_flushI_SE, /* tlb_flushI_SE */
260 armv4_tlb_flushD, /* tlb_flushD */
261 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
263 /* Cache operations */
265 armv5_ec_icache_sync_all, /* icache_sync_all */
266 armv5_ec_icache_sync_range, /* icache_sync_range */
268 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
269 sheeva_dcache_wbinv_range, /* dcache_wbinv_range */
270 sheeva_dcache_inv_range, /* dcache_inv_range */
271 sheeva_dcache_wb_range, /* dcache_wb_range */
273 armv4_idcache_inv_all, /* idcache_inv_all */
274 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
275 sheeva_idcache_wbinv_range, /* idcache_wbinv_all */
277 sheeva_l2cache_wbinv_all, /* l2cache_wbinv_all */
278 sheeva_l2cache_wbinv_range, /* l2cache_wbinv_range */
279 sheeva_l2cache_inv_range, /* l2cache_inv_range */
280 sheeva_l2cache_wb_range, /* l2cache_wb_range */
281 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
283 /* Other functions */
285 cpufunc_nullop, /* flush_prefetchbuf */
286 armv4_drain_writebuf, /* drain_writebuf */
287 cpufunc_nullop, /* flush_brnchtgt_C */
288 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
290 sheeva_cpu_sleep, /* sleep */
294 cpufunc_null_fixup, /* dataabt_fixup */
295 cpufunc_null_fixup, /* prefetchabt_fixup */
297 arm10_context_switch, /* context_switch */
299 arm10_setup /* cpu setup */
301 #endif /* CPU_ARM9E || CPU_ARM10 */
304 struct cpu_functions arm10_cpufuncs = {
308 cpufunc_nullop, /* cpwait */
312 cpufunc_control, /* control */
313 cpufunc_domains, /* Domain */
314 arm10_setttb, /* Setttb */
315 cpufunc_faultstatus, /* Faultstatus */
316 cpufunc_faultaddress, /* Faultaddress */
320 armv4_tlb_flushID, /* tlb_flushID */
321 arm10_tlb_flushID_SE, /* tlb_flushID_SE */
322 armv4_tlb_flushI, /* tlb_flushI */
323 arm10_tlb_flushI_SE, /* tlb_flushI_SE */
324 armv4_tlb_flushD, /* tlb_flushD */
325 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
327 /* Cache operations */
329 arm10_icache_sync_all, /* icache_sync_all */
330 arm10_icache_sync_range, /* icache_sync_range */
332 arm10_dcache_wbinv_all, /* dcache_wbinv_all */
333 arm10_dcache_wbinv_range, /* dcache_wbinv_range */
334 arm10_dcache_inv_range, /* dcache_inv_range */
335 arm10_dcache_wb_range, /* dcache_wb_range */
337 armv4_idcache_inv_all, /* idcache_inv_all */
338 arm10_idcache_wbinv_all, /* idcache_wbinv_all */
339 arm10_idcache_wbinv_range, /* idcache_wbinv_range */
340 cpufunc_nullop, /* l2cache_wbinv_all */
341 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
342 (void *)cpufunc_nullop, /* l2cache_inv_range */
343 (void *)cpufunc_nullop, /* l2cache_wb_range */
344 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
346 /* Other functions */
348 cpufunc_nullop, /* flush_prefetchbuf */
349 armv4_drain_writebuf, /* drain_writebuf */
350 cpufunc_nullop, /* flush_brnchtgt_C */
351 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
353 (void *)cpufunc_nullop, /* sleep */
357 cpufunc_null_fixup, /* dataabt_fixup */
358 cpufunc_null_fixup, /* prefetchabt_fixup */
360 arm10_context_switch, /* context_switch */
362 arm10_setup /* cpu setup */
365 #endif /* CPU_ARM10 */
368 struct cpu_functions pj4bv7_cpufuncs = {
372 armv7_drain_writebuf, /* cpwait */
376 cpufunc_control, /* control */
377 cpufunc_domains, /* Domain */
378 armv7_setttb, /* Setttb */
379 cpufunc_faultstatus, /* Faultstatus */
380 cpufunc_faultaddress, /* Faultaddress */
384 armv7_tlb_flushID, /* tlb_flushID */
385 armv7_tlb_flushID_SE, /* tlb_flushID_SE */
386 armv7_tlb_flushID, /* tlb_flushI */
387 armv7_tlb_flushID_SE, /* tlb_flushI_SE */
388 armv7_tlb_flushID, /* tlb_flushD */
389 armv7_tlb_flushID_SE, /* tlb_flushD_SE */
391 /* Cache operations */
392 armv7_idcache_wbinv_all, /* icache_sync_all */
393 armv7_icache_sync_range, /* icache_sync_range */
395 armv7_dcache_wbinv_all, /* dcache_wbinv_all */
396 armv7_dcache_wbinv_range, /* dcache_wbinv_range */
397 armv7_dcache_inv_range, /* dcache_inv_range */
398 armv7_dcache_wb_range, /* dcache_wb_range */
400 armv7_idcache_inv_all, /* idcache_inv_all */
401 armv7_idcache_wbinv_all, /* idcache_wbinv_all */
402 armv7_idcache_wbinv_range, /* idcache_wbinv_all */
404 (void *)cpufunc_nullop, /* l2cache_wbinv_all */
405 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
406 (void *)cpufunc_nullop, /* l2cache_inv_range */
407 (void *)cpufunc_nullop, /* l2cache_wb_range */
408 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
410 /* Other functions */
412 cpufunc_nullop, /* flush_prefetchbuf */
413 armv7_drain_writebuf, /* drain_writebuf */
414 cpufunc_nullop, /* flush_brnchtgt_C */
415 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
417 (void *)cpufunc_nullop, /* sleep */
421 cpufunc_null_fixup, /* dataabt_fixup */
422 cpufunc_null_fixup, /* prefetchabt_fixup */
424 armv7_context_switch, /* context_switch */
426 pj4bv7_setup /* cpu setup */
428 #endif /* CPU_MV_PJ4B */
430 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
431 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
432 defined(CPU_XSCALE_80219)
434 struct cpu_functions xscale_cpufuncs = {
438 xscale_cpwait, /* cpwait */
442 xscale_control, /* control */
443 cpufunc_domains, /* domain */
444 xscale_setttb, /* setttb */
445 cpufunc_faultstatus, /* faultstatus */
446 cpufunc_faultaddress, /* faultaddress */
450 armv4_tlb_flushID, /* tlb_flushID */
451 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
452 armv4_tlb_flushI, /* tlb_flushI */
453 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
454 armv4_tlb_flushD, /* tlb_flushD */
455 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
457 /* Cache operations */
459 xscale_cache_syncI, /* icache_sync_all */
460 xscale_cache_syncI_rng, /* icache_sync_range */
462 xscale_cache_purgeD, /* dcache_wbinv_all */
463 xscale_cache_purgeD_rng, /* dcache_wbinv_range */
464 xscale_cache_flushD_rng, /* dcache_inv_range */
465 xscale_cache_cleanD_rng, /* dcache_wb_range */
467 xscale_cache_flushID, /* idcache_inv_all */
468 xscale_cache_purgeID, /* idcache_wbinv_all */
469 xscale_cache_purgeID_rng, /* idcache_wbinv_range */
470 cpufunc_nullop, /* l2cache_wbinv_all */
471 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
472 (void *)cpufunc_nullop, /* l2cache_inv_range */
473 (void *)cpufunc_nullop, /* l2cache_wb_range */
474 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
476 /* Other functions */
478 cpufunc_nullop, /* flush_prefetchbuf */
479 armv4_drain_writebuf, /* drain_writebuf */
480 cpufunc_nullop, /* flush_brnchtgt_C */
481 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
483 xscale_cpu_sleep, /* sleep */
487 cpufunc_null_fixup, /* dataabt_fixup */
488 cpufunc_null_fixup, /* prefetchabt_fixup */
490 xscale_context_switch, /* context_switch */
492 xscale_setup /* cpu setup */
495 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
498 #ifdef CPU_XSCALE_81342
499 struct cpu_functions xscalec3_cpufuncs = {
503 xscale_cpwait, /* cpwait */
507 xscale_control, /* control */
508 cpufunc_domains, /* domain */
509 xscalec3_setttb, /* setttb */
510 cpufunc_faultstatus, /* faultstatus */
511 cpufunc_faultaddress, /* faultaddress */
515 armv4_tlb_flushID, /* tlb_flushID */
516 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
517 armv4_tlb_flushI, /* tlb_flushI */
518 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
519 armv4_tlb_flushD, /* tlb_flushD */
520 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
522 /* Cache operations */
524 xscalec3_cache_syncI, /* icache_sync_all */
525 xscalec3_cache_syncI_rng, /* icache_sync_range */
527 xscalec3_cache_purgeD, /* dcache_wbinv_all */
528 xscalec3_cache_purgeD_rng, /* dcache_wbinv_range */
529 xscale_cache_flushD_rng, /* dcache_inv_range */
530 xscalec3_cache_cleanD_rng, /* dcache_wb_range */
532 xscale_cache_flushID, /* idcache_inv_all */
533 xscalec3_cache_purgeID, /* idcache_wbinv_all */
534 xscalec3_cache_purgeID_rng, /* idcache_wbinv_range */
535 xscalec3_l2cache_purge, /* l2cache_wbinv_all */
536 xscalec3_l2cache_purge_rng, /* l2cache_wbinv_range */
537 xscalec3_l2cache_flush_rng, /* l2cache_inv_range */
538 xscalec3_l2cache_clean_rng, /* l2cache_wb_range */
539 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
541 /* Other functions */
543 cpufunc_nullop, /* flush_prefetchbuf */
544 armv4_drain_writebuf, /* drain_writebuf */
545 cpufunc_nullop, /* flush_brnchtgt_C */
546 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
548 xscale_cpu_sleep, /* sleep */
552 cpufunc_null_fixup, /* dataabt_fixup */
553 cpufunc_null_fixup, /* prefetchabt_fixup */
555 xscalec3_context_switch, /* context_switch */
557 xscale_setup /* cpu setup */
559 #endif /* CPU_XSCALE_81342 */
562 #if defined(CPU_FA526) || defined(CPU_FA626TE)
563 struct cpu_functions fa526_cpufuncs = {
567 cpufunc_nullop, /* cpwait */
571 cpufunc_control, /* control */
572 cpufunc_domains, /* domain */
573 fa526_setttb, /* setttb */
574 cpufunc_faultstatus, /* faultstatus */
575 cpufunc_faultaddress, /* faultaddress */
579 armv4_tlb_flushID, /* tlb_flushID */
580 fa526_tlb_flushID_SE, /* tlb_flushID_SE */
581 armv4_tlb_flushI, /* tlb_flushI */
582 fa526_tlb_flushI_SE, /* tlb_flushI_SE */
583 armv4_tlb_flushD, /* tlb_flushD */
584 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
586 /* Cache operations */
588 fa526_icache_sync_all, /* icache_sync_all */
589 fa526_icache_sync_range, /* icache_sync_range */
591 fa526_dcache_wbinv_all, /* dcache_wbinv_all */
592 fa526_dcache_wbinv_range, /* dcache_wbinv_range */
593 fa526_dcache_inv_range, /* dcache_inv_range */
594 fa526_dcache_wb_range, /* dcache_wb_range */
596 armv4_idcache_inv_all, /* idcache_inv_all */
597 fa526_idcache_wbinv_all, /* idcache_wbinv_all */
598 fa526_idcache_wbinv_range, /* idcache_wbinv_range */
599 cpufunc_nullop, /* l2cache_wbinv_all */
600 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
601 (void *)cpufunc_nullop, /* l2cache_inv_range */
602 (void *)cpufunc_nullop, /* l2cache_wb_range */
603 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
605 /* Other functions */
607 fa526_flush_prefetchbuf, /* flush_prefetchbuf */
608 armv4_drain_writebuf, /* drain_writebuf */
609 cpufunc_nullop, /* flush_brnchtgt_C */
610 fa526_flush_brnchtgt_E, /* flush_brnchtgt_E */
612 fa526_cpu_sleep, /* sleep */
616 cpufunc_null_fixup, /* dataabt_fixup */
617 cpufunc_null_fixup, /* prefetchabt_fixup */
619 fa526_context_switch, /* context_switch */
621 fa526_setup /* cpu setup */
623 #endif /* CPU_FA526 || CPU_FA626TE */
625 #if defined(CPU_ARM1136)
626 struct cpu_functions arm1136_cpufuncs = {
630 cpufunc_nullop, /* cpwait */
634 cpufunc_control, /* control */
635 cpufunc_domains, /* Domain */
636 arm11x6_setttb, /* Setttb */
637 cpufunc_faultstatus, /* Faultstatus */
638 cpufunc_faultaddress, /* Faultaddress */
642 arm11_tlb_flushID, /* tlb_flushID */
643 arm11_tlb_flushID_SE, /* tlb_flushID_SE */
644 arm11_tlb_flushI, /* tlb_flushI */
645 arm11_tlb_flushI_SE, /* tlb_flushI_SE */
646 arm11_tlb_flushD, /* tlb_flushD */
647 arm11_tlb_flushD_SE, /* tlb_flushD_SE */
649 /* Cache operations */
651 arm11x6_icache_sync_all, /* icache_sync_all */
652 arm11x6_icache_sync_range, /* icache_sync_range */
654 arm11x6_dcache_wbinv_all, /* dcache_wbinv_all */
655 armv6_dcache_wbinv_range, /* dcache_wbinv_range */
656 armv6_dcache_inv_range, /* dcache_inv_range */
657 armv6_dcache_wb_range, /* dcache_wb_range */
659 armv6_idcache_inv_all, /* idcache_inv_all */
660 arm11x6_idcache_wbinv_all, /* idcache_wbinv_all */
661 arm11x6_idcache_wbinv_range, /* idcache_wbinv_range */
663 (void *)cpufunc_nullop, /* l2cache_wbinv_all */
664 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
665 (void *)cpufunc_nullop, /* l2cache_inv_range */
666 (void *)cpufunc_nullop, /* l2cache_wb_range */
667 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
669 /* Other functions */
671 arm11x6_flush_prefetchbuf, /* flush_prefetchbuf */
672 arm11_drain_writebuf, /* drain_writebuf */
673 cpufunc_nullop, /* flush_brnchtgt_C */
674 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
676 arm11_sleep, /* sleep */
680 cpufunc_null_fixup, /* dataabt_fixup */
681 cpufunc_null_fixup, /* prefetchabt_fixup */
683 arm11_context_switch, /* context_switch */
685 arm11x6_setup /* cpu setup */
687 #endif /* CPU_ARM1136 */
688 #if defined(CPU_ARM1176)
689 struct cpu_functions arm1176_cpufuncs = {
693 cpufunc_nullop, /* cpwait */
697 cpufunc_control, /* control */
698 cpufunc_domains, /* Domain */
699 arm11x6_setttb, /* Setttb */
700 cpufunc_faultstatus, /* Faultstatus */
701 cpufunc_faultaddress, /* Faultaddress */
705 arm11_tlb_flushID, /* tlb_flushID */
706 arm11_tlb_flushID_SE, /* tlb_flushID_SE */
707 arm11_tlb_flushI, /* tlb_flushI */
708 arm11_tlb_flushI_SE, /* tlb_flushI_SE */
709 arm11_tlb_flushD, /* tlb_flushD */
710 arm11_tlb_flushD_SE, /* tlb_flushD_SE */
712 /* Cache operations */
714 arm11x6_icache_sync_all, /* icache_sync_all */
715 arm11x6_icache_sync_range, /* icache_sync_range */
717 arm11x6_dcache_wbinv_all, /* dcache_wbinv_all */
718 armv6_dcache_wbinv_range, /* dcache_wbinv_range */
719 armv6_dcache_inv_range, /* dcache_inv_range */
720 armv6_dcache_wb_range, /* dcache_wb_range */
722 armv6_idcache_inv_all, /* idcache_inv_all */
723 arm11x6_idcache_wbinv_all, /* idcache_wbinv_all */
724 arm11x6_idcache_wbinv_range, /* idcache_wbinv_range */
726 (void *)cpufunc_nullop, /* l2cache_wbinv_all */
727 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
728 (void *)cpufunc_nullop, /* l2cache_inv_range */
729 (void *)cpufunc_nullop, /* l2cache_wb_range */
730 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
732 /* Other functions */
734 arm11x6_flush_prefetchbuf, /* flush_prefetchbuf */
735 arm11_drain_writebuf, /* drain_writebuf */
736 cpufunc_nullop, /* flush_brnchtgt_C */
737 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
739 arm11x6_sleep, /* sleep */
743 cpufunc_null_fixup, /* dataabt_fixup */
744 cpufunc_null_fixup, /* prefetchabt_fixup */
746 arm11_context_switch, /* context_switch */
748 arm11x6_setup /* cpu setup */
750 #endif /*CPU_ARM1176 */
752 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
753 struct cpu_functions cortexa_cpufuncs = {
757 cpufunc_nullop, /* cpwait */
761 cpufunc_control, /* control */
762 cpufunc_domains, /* Domain */
763 armv7_setttb, /* Setttb */
764 cpufunc_faultstatus, /* Faultstatus */
765 cpufunc_faultaddress, /* Faultaddress */
768 * TLB functions. ARMv7 does all TLB ops based on a unified TLB model
769 * whether the hardware implements separate I+D or not, so we use the
770 * same 'ID' functions for all 3 variations.
773 armv7_tlb_flushID, /* tlb_flushID */
774 armv7_tlb_flushID_SE, /* tlb_flushID_SE */
775 armv7_tlb_flushID, /* tlb_flushI */
776 armv7_tlb_flushID_SE, /* tlb_flushI_SE */
777 armv7_tlb_flushID, /* tlb_flushD */
778 armv7_tlb_flushID_SE, /* tlb_flushD_SE */
780 /* Cache operations */
782 armv7_icache_sync_all, /* icache_sync_all */
783 armv7_icache_sync_range, /* icache_sync_range */
785 armv7_dcache_wbinv_all, /* dcache_wbinv_all */
786 armv7_dcache_wbinv_range, /* dcache_wbinv_range */
787 armv7_dcache_inv_range, /* dcache_inv_range */
788 armv7_dcache_wb_range, /* dcache_wb_range */
790 armv7_idcache_inv_all, /* idcache_inv_all */
791 armv7_idcache_wbinv_all, /* idcache_wbinv_all */
792 armv7_idcache_wbinv_range, /* idcache_wbinv_range */
795 * Note: For CPUs using the PL310 the L2 ops are filled in when the
796 * L2 cache controller is actually enabled.
798 cpufunc_nullop, /* l2cache_wbinv_all */
799 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
800 (void *)cpufunc_nullop, /* l2cache_inv_range */
801 (void *)cpufunc_nullop, /* l2cache_wb_range */
802 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
804 /* Other functions */
806 cpufunc_nullop, /* flush_prefetchbuf */
807 armv7_drain_writebuf, /* drain_writebuf */
808 cpufunc_nullop, /* flush_brnchtgt_C */
809 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
811 armv7_sleep, /* sleep */
815 cpufunc_null_fixup, /* dataabt_fixup */
816 cpufunc_null_fixup, /* prefetchabt_fixup */
818 armv7_context_switch, /* context_switch */
820 cortexa_setup /* cpu setup */
822 #endif /* CPU_CORTEXA */
825 * Global constants also used by locore.s
828 struct cpu_functions cpufuncs;
830 u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore.s */
832 #if defined(CPU_ARM9) || \
833 defined (CPU_ARM9E) || defined (CPU_ARM10) || defined (CPU_ARM1136) || \
834 defined(CPU_ARM1176) || defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
835 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
836 defined(CPU_FA526) || defined(CPU_FA626TE) || defined(CPU_MV_PJ4B) || \
837 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
838 defined(CPU_CORTEXA) || defined(CPU_KRAIT)
840 static void get_cachetype_cp15(void);
842 /* Additional cache information local to this file. Log2 of some of the
844 static int arm_dcache_l2_nsets;
845 static int arm_dcache_l2_assoc;
846 static int arm_dcache_l2_linesize;
851 u_int ctype, isize, dsize, cpuid;
852 u_int clevel, csize, i, sel;
856 __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
859 cpuid = cpufunc_id();
861 * ...and thus spake the ARM ARM:
863 * If an <opcode2> value corresponding to an unimplemented or
864 * reserved ID register is encountered, the System Control
865 * processor returns the value of the main ID register.
870 if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
871 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
873 arm_cache_level = clevel;
874 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
876 while ((type = (clevel & 0x7)) && i < 7) {
877 if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
878 type == CACHE_SEP_CACHE) {
880 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
882 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
884 arm_cache_type[sel] = csize;
885 arm_dcache_align = 1 <<
886 (CPUV7_CT_xSIZE_LEN(csize) + 4);
887 arm_dcache_align_mask = arm_dcache_align - 1;
889 if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
891 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
893 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
895 arm_cache_type[sel] = csize;
901 if ((ctype & CPU_CT_S) == 0)
902 arm_pcache_unified = 1;
905 * If you want to know how this code works, go read the ARM ARM.
908 arm_pcache_type = CPU_CT_CTYPE(ctype);
910 if (arm_pcache_unified == 0) {
911 isize = CPU_CT_ISIZE(ctype);
912 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
913 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
914 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
915 if (isize & CPU_CT_xSIZE_M)
916 arm_picache_line_size = 0; /* not present */
918 arm_picache_ways = 1;
920 arm_picache_ways = multiplier <<
921 (CPU_CT_xSIZE_ASSOC(isize) - 1);
923 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
926 dsize = CPU_CT_DSIZE(ctype);
927 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
928 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
929 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
930 if (dsize & CPU_CT_xSIZE_M)
931 arm_pdcache_line_size = 0; /* not present */
933 arm_pdcache_ways = 1;
935 arm_pdcache_ways = multiplier <<
936 (CPU_CT_xSIZE_ASSOC(dsize) - 1);
938 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
940 arm_dcache_align = arm_pdcache_line_size;
942 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
943 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
944 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
945 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
948 arm_dcache_align_mask = arm_dcache_align - 1;
951 #endif /* ARM9 || XSCALE */
954 * Cannot panic here as we may not have a console yet ...
960 cputype = cpufunc_id();
961 cputype &= CPU_ID_CPU_MASK;
964 * NOTE: cpu_do_powersave defaults to off. If we encounter a
965 * CPU type where we want to use it by default, then we set it.
969 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
970 (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
971 (cputype & 0x0000f000) == 0x00009000) {
972 cpufuncs = arm9_cpufuncs;
973 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
974 get_cachetype_cp15();
975 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
976 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
977 arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
978 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
979 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
980 #ifdef ARM9_CACHE_WRITE_THROUGH
981 pmap_pte_init_arm9();
983 pmap_pte_init_generic();
987 #endif /* CPU_ARM9 */
988 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
989 if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
990 cputype == CPU_ID_MV88FR571_41) {
991 uint32_t sheeva_ctrl;
993 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
996 * Workaround for Marvell MV78100 CPU: Cache prefetch
997 * mechanism may affect the cache coherency validity,
998 * so it needs to be disabled.
1000 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
1001 * L2 Prefetching Mechanism) for details.
1003 if (cputype == CPU_ID_MV88FR571_VD ||
1004 cputype == CPU_ID_MV88FR571_41)
1005 sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
1007 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
1009 cpufuncs = sheeva_cpufuncs;
1010 get_cachetype_cp15();
1011 pmap_pte_init_generic();
1013 } else if (cputype == CPU_ID_ARM926EJS || cputype == CPU_ID_ARM1026EJS) {
1014 cpufuncs = armv5_ec_cpufuncs;
1015 get_cachetype_cp15();
1016 pmap_pte_init_generic();
1019 #endif /* CPU_ARM9E || CPU_ARM10 */
1021 if (/* cputype == CPU_ID_ARM1020T || */
1022 cputype == CPU_ID_ARM1020E) {
1024 * Select write-through cacheing (this isn't really an
1025 * option on ARM1020T).
1027 cpufuncs = arm10_cpufuncs;
1028 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
1029 get_cachetype_cp15();
1030 arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1031 arm10_dcache_sets_max =
1032 (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1033 arm10_dcache_sets_inc;
1034 arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1035 arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
1036 pmap_pte_init_generic();
1039 #endif /* CPU_ARM10 */
1040 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1041 if (cputype == CPU_ID_ARM1136JS
1042 || cputype == CPU_ID_ARM1136JSR1
1043 || cputype == CPU_ID_ARM1176JZS) {
1045 if (cputype == CPU_ID_ARM1136JS
1046 || cputype == CPU_ID_ARM1136JSR1)
1047 cpufuncs = arm1136_cpufuncs;
1050 if (cputype == CPU_ID_ARM1176JZS)
1051 cpufuncs = arm1176_cpufuncs;
1053 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
1054 get_cachetype_cp15();
1056 pmap_pte_init_mmu_v6();
1060 #endif /* CPU_ARM1136 || CPU_ARM1176 */
1061 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1062 if (cputype == CPU_ID_CORTEXA5 ||
1063 cputype == CPU_ID_CORTEXA7 ||
1064 cputype == CPU_ID_CORTEXA8R1 ||
1065 cputype == CPU_ID_CORTEXA8R2 ||
1066 cputype == CPU_ID_CORTEXA8R3 ||
1067 cputype == CPU_ID_CORTEXA9R1 ||
1068 cputype == CPU_ID_CORTEXA9R2 ||
1069 cputype == CPU_ID_CORTEXA9R3 ||
1070 cputype == CPU_ID_CORTEXA15R0 ||
1071 cputype == CPU_ID_CORTEXA15R1 ||
1072 cputype == CPU_ID_CORTEXA15R2 ||
1073 cputype == CPU_ID_CORTEXA15R3 ||
1074 cputype == CPU_ID_KRAIT ) {
1075 cpufuncs = cortexa_cpufuncs;
1076 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
1077 get_cachetype_cp15();
1079 pmap_pte_init_mmu_v6();
1080 /* Use powersave on this CPU. */
1081 cpu_do_powersave = 1;
1084 #endif /* CPU_CORTEXA */
1086 #if defined(CPU_MV_PJ4B)
1087 if (cputype == CPU_ID_MV88SV581X_V7 ||
1088 cputype == CPU_ID_MV88SV584X_V7 ||
1089 cputype == CPU_ID_ARM_88SV581X_V7) {
1090 cpufuncs = pj4bv7_cpufuncs;
1091 get_cachetype_cp15();
1092 pmap_pte_init_mmu_v6();
1095 #endif /* CPU_MV_PJ4B */
1097 #if defined(CPU_FA526) || defined(CPU_FA626TE)
1098 if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
1099 cpufuncs = fa526_cpufuncs;
1100 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
1101 get_cachetype_cp15();
1102 pmap_pte_init_generic();
1104 /* Use powersave on this CPU. */
1105 cpu_do_powersave = 1;
1109 #endif /* CPU_FA526 || CPU_FA626TE */
1111 #ifdef CPU_XSCALE_80200
1112 if (cputype == CPU_ID_80200) {
1113 int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1117 #if defined(XSCALE_CCLKCFG)
1119 * Crank CCLKCFG to maximum legal value.
1121 __asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
1123 : "r" (XSCALE_CCLKCFG));
1127 * XXX Disable ECC in the Bus Controller Unit; we
1128 * don't really support it, yet. Clear any pending
1129 * error indications.
1131 __asm __volatile("mcr p13, 0, %0, c0, c1, 0"
1133 : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1135 cpufuncs = xscale_cpufuncs;
1137 * i80200 errata: Step-A0 and A1 have a bug where
1138 * D$ dirty bits are not cleared on "invalidate by
1141 * Workaround: Clean cache line before invalidating.
1143 if (rev == 0 || rev == 1)
1144 cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1146 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1147 get_cachetype_cp15();
1148 pmap_pte_init_xscale();
1151 #endif /* CPU_XSCALE_80200 */
1152 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
1153 if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1154 cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1155 cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1156 cpufuncs = xscale_cpufuncs;
1157 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1158 get_cachetype_cp15();
1159 pmap_pte_init_xscale();
1162 #endif /* CPU_XSCALE_80321 */
1164 #if defined(CPU_XSCALE_81342)
1165 if (cputype == CPU_ID_81342) {
1166 cpufuncs = xscalec3_cpufuncs;
1167 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1168 get_cachetype_cp15();
1169 pmap_pte_init_xscale();
1172 #endif /* CPU_XSCALE_81342 */
1173 #ifdef CPU_XSCALE_PXA2X0
1174 /* ignore core revision to test PXA2xx CPUs */
1175 if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1176 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1177 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1179 cpufuncs = xscale_cpufuncs;
1180 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1181 get_cachetype_cp15();
1182 pmap_pte_init_xscale();
1184 /* Use powersave on this CPU. */
1185 cpu_do_powersave = 1;
1189 #endif /* CPU_XSCALE_PXA2X0 */
1190 #ifdef CPU_XSCALE_IXP425
1191 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1192 cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
1194 cpufuncs = xscale_cpufuncs;
1195 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1196 get_cachetype_cp15();
1197 pmap_pte_init_xscale();
1201 #endif /* CPU_XSCALE_IXP425 */
1203 * Bzzzz. And the answer was ...
1205 panic("No support for this CPU type (%08x) in kernel", cputype);
1206 return(ARCHITECTURE_NOT_PRESENT);
1208 uma_set_align(arm_dcache_align_mask);
1213 * Fixup routines for data and prefetch aborts.
1215 * Several compile time symbols are used
1217 * DEBUG_FAULT_CORRECTION - Print debugging information during the
1218 * correction of registers after a fault.
1223 * Null abort fixup routine.
1224 * For use when no fixup is required.
1227 cpufunc_null_fixup(arg)
1230 return(ABORT_FIXUP_OK);
1237 #if defined (CPU_ARM9) || \
1238 defined(CPU_ARM9E) || \
1239 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1240 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1241 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
1242 defined(CPU_ARM10) || defined(CPU_ARM1136) || defined(CPU_ARM1176) ||\
1243 defined(CPU_FA526) || defined(CPU_FA626TE)
1256 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
1259 parse_cpu_options(args, optlist, cpuctrl)
1261 struct cpu_option *optlist;
1269 while (optlist->co_name) {
1270 if (get_bootconf_option(args, optlist->co_name,
1271 BOOTOPT_TYPE_BOOLEAN, &integer)) {
1273 if (optlist->co_trueop == OR)
1274 cpuctrl |= optlist->co_value;
1275 else if (optlist->co_trueop == BIC)
1276 cpuctrl &= ~optlist->co_value;
1278 if (optlist->co_falseop == OR)
1279 cpuctrl |= optlist->co_value;
1280 else if (optlist->co_falseop == BIC)
1281 cpuctrl &= ~optlist->co_value;
1288 #endif /* CPU_ARM9 || XSCALE*/
1291 struct cpu_option arm9_options[] = {
1292 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1293 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1294 { "arm9.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1295 { "arm9.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1296 { "arm9.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1297 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1298 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1299 { "arm9.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1300 { NULL, IGN, IGN, 0 }
1307 int cpuctrl, cpuctrlmask;
1309 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1310 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1311 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1312 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1313 CPU_CONTROL_ROUNDROBIN;
1314 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1315 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1316 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1317 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1318 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1319 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1320 | CPU_CONTROL_ROUNDROBIN;
1322 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1323 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1326 cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1329 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1331 if (vector_page == ARM_VECTORS_HIGH)
1332 cpuctrl |= CPU_CONTROL_VECRELOC;
1334 /* Clear out the cache */
1335 cpu_idcache_wbinv_all();
1337 /* Set the control register */
1338 cpu_control(cpuctrlmask, cpuctrl);
1342 #endif /* CPU_ARM9 */
1344 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
1345 struct cpu_option arm10_options[] = {
1346 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1347 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1348 { "arm10.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1349 { "arm10.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1350 { "arm10.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1351 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1352 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1353 { "arm10.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1354 { NULL, IGN, IGN, 0 }
1361 int cpuctrl, cpuctrlmask;
1363 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1364 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1365 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1366 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1367 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1368 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1369 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1370 | CPU_CONTROL_BPRD_ENABLE
1371 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1373 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1374 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1377 cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
1380 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1383 /* Clear out the cache */
1384 cpu_idcache_wbinv_all();
1386 /* Now really make sure they are clean. */
1387 __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1389 if (vector_page == ARM_VECTORS_HIGH)
1390 cpuctrl |= CPU_CONTROL_VECRELOC;
1392 /* Set the control register */
1394 cpu_control(0xffffffff, cpuctrl);
1397 cpu_idcache_wbinv_all();
1399 #endif /* CPU_ARM9E || CPU_ARM10 */
1401 #if defined(CPU_ARM1136) || defined(CPU_ARM1176) \
1402 || defined(CPU_MV_PJ4B) \
1403 || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1404 static __inline void
1405 cpu_scc_setup_ccnt(void)
1407 /* This is how you give userland access to the CCNT and PMCn
1409 * BEWARE! This gives write access also, which may not be what
1412 #ifdef _PMC_USER_READ_WRITE_
1413 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1414 /* Use the Secure User and Non-secure Access Validation Control Register
1415 * to allow userland access
1417 __asm volatile ("mcr p15, 0, %0, c15, c9, 0\n\t"
1421 /* Set PMUSERENR[0] to allow userland access */
1422 __asm volatile ("mcr p15, 0, %0, c9, c14, 0\n\t"
1427 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1428 /* Set PMCR[2,0] to enable counters and reset CCNT */
1429 __asm volatile ("mcr p15, 0, %0, c15, c12, 0\n\t"
1433 /* Set up the PMCCNTR register as a cyclecounter:
1434 * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
1435 * Set PMCR[2,0] to enable counters and reset CCNT
1436 * Set PMCNTENSET to 0x80000000 to enable CCNT */
1437 __asm volatile ("mcr p15, 0, %0, c9, c14, 2\n\t"
1438 "mcr p15, 0, %1, c9, c12, 0\n\t"
1439 "mcr p15, 0, %2, c9, c12, 1\n\t"
1448 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1449 struct cpu_option arm11_options[] = {
1450 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1451 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1452 { "arm11.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1453 { "arm11.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1454 { "arm11.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1455 { NULL, IGN, IGN, 0 }
1459 arm11x6_setup(char *args)
1461 int cpuctrl, cpuctrl_wax;
1462 uint32_t auxctrl, auxctrl_wax;
1467 cpuid = cpufunc_id();
1470 CPU_CONTROL_MMU_ENABLE |
1471 CPU_CONTROL_DC_ENABLE |
1472 CPU_CONTROL_WBUF_ENABLE |
1473 CPU_CONTROL_32BP_ENABLE |
1474 CPU_CONTROL_32BD_ENABLE |
1475 CPU_CONTROL_LABT_ENABLE |
1476 CPU_CONTROL_SYST_ENABLE |
1477 CPU_CONTROL_IC_ENABLE;
1480 * "write as existing" bits
1481 * inverse of this is mask
1484 (3 << 30) | /* SBZ */
1485 (1 << 29) | /* FA */
1486 (1 << 28) | /* TR */
1487 (3 << 26) | /* SBZ */
1488 (3 << 19) | /* SBZ */
1489 (1 << 17); /* SBZ */
1491 cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1492 cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1494 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
1497 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1500 if (vector_page == ARM_VECTORS_HIGH)
1501 cpuctrl |= CPU_CONTROL_VECRELOC;
1506 * This options enables the workaround for the 364296 ARM1136
1507 * r0pX errata (possible cache data corruption with
1508 * hit-under-miss enabled). It sets the undocumented bit 31 in
1509 * the auxiliary control register and the FI bit in the control
1510 * register, thus disabling hit-under-miss without putting the
1511 * processor into full low interrupt latency mode. ARM11MPCore
1514 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
1515 cpuctrl |= CPU_CONTROL_FI_ENABLE;
1516 auxctrl = ARM1136_AUXCTL_PFI;
1517 auxctrl_wax = ~ARM1136_AUXCTL_PFI;
1521 * Enable an errata workaround
1523 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
1524 auxctrl = ARM1176_AUXCTL_PHD;
1525 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
1528 /* Clear out the cache */
1529 cpu_idcache_wbinv_all();
1531 /* Now really make sure they are clean. */
1532 __asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
1534 /* Allow detection code to find the VFP if it's fitted. */
1535 __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
1537 /* Set the control register */
1539 cpu_control(~cpuctrl_wax, cpuctrl);
1541 __asm volatile ("mrc p15, 0, %0, c1, c0, 1\n\t"
1542 "and %1, %0, %2\n\t"
1543 "orr %1, %1, %3\n\t"
1545 "mcrne p15, 0, %1, c1, c0, 1\n\t"
1546 : "=r"(tmp), "=r"(tmp2) :
1547 "r"(auxctrl_wax), "r"(auxctrl));
1550 cpu_idcache_wbinv_all();
1552 cpu_scc_setup_ccnt();
1554 #endif /* CPU_ARM1136 || CPU_ARM1176 */
1565 cpuctrl = CPU_CONTROL_MMU_ENABLE;
1566 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1567 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1569 cpuctrl |= CPU_CONTROL_DC_ENABLE;
1570 cpuctrl |= (0xf << 3);
1571 cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1572 cpuctrl |= CPU_CONTROL_IC_ENABLE;
1573 if (vector_page == ARM_VECTORS_HIGH)
1574 cpuctrl |= CPU_CONTROL_VECRELOC;
1575 cpuctrl |= (0x5 << 16) | (1 < 22);
1576 cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1578 /* Clear out the cache */
1579 cpu_idcache_wbinv_all();
1581 /* Set the control register */
1583 cpu_control(0xFFFFFFFF, cpuctrl);
1586 cpu_idcache_wbinv_all();
1588 cpu_scc_setup_ccnt();
1590 #endif /* CPU_MV_PJ4B */
1592 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1595 cortexa_setup(char *args)
1597 int cpuctrl, cpuctrlmask;
1599 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | /* MMU enable [0] */
1600 CPU_CONTROL_AFLT_ENABLE | /* Alignment fault [1] */
1601 CPU_CONTROL_DC_ENABLE | /* DCache enable [2] */
1602 CPU_CONTROL_BPRD_ENABLE | /* Branch prediction [11] */
1603 CPU_CONTROL_IC_ENABLE | /* ICache enable [12] */
1604 CPU_CONTROL_VECRELOC; /* Vector relocation [13] */
1606 cpuctrl = CPU_CONTROL_MMU_ENABLE |
1607 CPU_CONTROL_IC_ENABLE |
1608 CPU_CONTROL_DC_ENABLE |
1609 CPU_CONTROL_BPRD_ENABLE;
1611 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1612 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1615 /* Switch to big endian */
1617 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1620 /* Check if the vector page is at the high address (0xffff0000) */
1621 if (vector_page == ARM_VECTORS_HIGH)
1622 cpuctrl |= CPU_CONTROL_VECRELOC;
1624 /* Clear out the cache */
1625 cpu_idcache_wbinv_all();
1627 /* Set the control register */
1629 cpu_control(cpuctrlmask, cpuctrl);
1632 cpu_idcache_wbinv_all();
1634 armv7_auxctrl((1 << 6) | (1 << 0), (1 << 6) | (1 << 0)); /* Enable SMP + TLB broadcasting */
1637 cpu_scc_setup_ccnt();
1639 #endif /* CPU_CORTEXA */
1641 #if defined(CPU_FA526) || defined(CPU_FA626TE)
1642 struct cpu_option fa526_options[] = {
1644 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE |
1645 CPU_CONTROL_DC_ENABLE) },
1646 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1647 #endif /* COMPAT_12 */
1648 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE |
1649 CPU_CONTROL_DC_ENABLE) },
1650 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE |
1651 CPU_CONTROL_DC_ENABLE) },
1652 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1653 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1654 { NULL, IGN, IGN, 0 }
1658 fa526_setup(char *args)
1660 int cpuctrl, cpuctrlmask;
1662 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1663 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1664 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1665 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1666 | CPU_CONTROL_BPRD_ENABLE;
1667 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1668 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1669 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1670 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1671 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1672 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1673 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1675 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1676 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1679 cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
1682 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1685 if (vector_page == ARM_VECTORS_HIGH)
1686 cpuctrl |= CPU_CONTROL_VECRELOC;
1688 /* Clear out the cache */
1689 cpu_idcache_wbinv_all();
1691 /* Set the control register */
1693 cpu_control(0xffffffff, cpuctrl);
1695 #endif /* CPU_FA526 || CPU_FA626TE */
1697 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1698 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1699 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
1700 struct cpu_option xscale_options[] = {
1702 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1703 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1704 #endif /* COMPAT_12 */
1705 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1706 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1707 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1708 { "xscale.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1709 { "xscale.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1710 { "xscale.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1711 { "xscale.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1712 { NULL, IGN, IGN, 0 }
1720 int cpuctrl, cpuctrlmask;
1723 * The XScale Write Buffer is always enabled. Our option
1724 * is to enable/disable coalescing. Note that bits 6:3
1725 * must always be enabled.
1728 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1729 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1730 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1731 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1732 | CPU_CONTROL_BPRD_ENABLE;
1733 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1734 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1735 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1736 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1737 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1738 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1739 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
1740 CPU_CONTROL_L2_ENABLE;
1742 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1743 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1746 cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
1749 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1752 if (vector_page == ARM_VECTORS_HIGH)
1753 cpuctrl |= CPU_CONTROL_VECRELOC;
1754 #ifdef CPU_XSCALE_CORE3
1755 cpuctrl |= CPU_CONTROL_L2_ENABLE;
1758 /* Clear out the cache */
1759 cpu_idcache_wbinv_all();
1762 * Set the control register. Note that bits 6:3 must always
1766 /* cpu_control(cpuctrlmask, cpuctrl);*/
1767 cpu_control(0xffffffff, cpuctrl);
1769 /* Make sure write coalescing is turned on */
1770 __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1772 #ifdef XSCALE_NO_COALESCE_WRITES
1773 auxctl |= XSCALE_AUXCTL_K;
1775 auxctl &= ~XSCALE_AUXCTL_K;
1777 #ifdef CPU_XSCALE_CORE3
1778 auxctl |= XSCALE_AUXCTL_LLR;
1779 auxctl |= XSCALE_AUXCTL_MD_MASK;
1781 __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1784 #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425