1 /* $NetBSD: cpufunc_asm_arm8.S,v 1.2 2001/11/11 00:47:49 thorpej Exp $ */
4 * Copyright (c) 1997 ARM Limited
5 * Copyright (c) 1997 Causality Limited
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Causality Limited.
19 * 4. The name of Causality Limited may not be used to endorse or promote
20 * products derived from this software without specific prior written
23 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
24 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * ARM8 assembly functions for CPU / MMU / TLB specific operations
39 #include <machine/asm.h>
40 __FBSDID("$FreeBSD$");
42 ENTRY(arm8_clock_config)
43 mrc p15, 0, r3, c15, c0, 0 /* Read the clock register */
44 bic r2, r3, #0x11 /* turn off dynamic clocking
46 mcr p15, 0, r2, c15, c0, 0 /* Write clock register */
48 bic r2, r3, r0 /* Clear bits */
49 eor r2, r2, r1 /* XOR bits */
50 bic r2, r2, #0x10 /* clear the L bit */
52 bic r1, r2, #0x01 /* still keep dynamic clocking off */
53 mcr p15, 0, r1, c15, c0, 0 /* Write clock register */
58 mcr p15, 0, r2, c15, c0, 0 /* Write clock register */
59 mov r0, r3 /* Return old value */
61 END(arm8_clock_config)
64 * Functions to set the MMU Translation Table Base register
66 * We need to clean and flush the cache as it uses virtual
67 * addresses that are about to change.
71 orr r1, r3, #(I32_bit | F32_bit)
74 stmfd sp!, {r0-r3, lr}
75 bl _C_LABEL(arm8_cache_cleanID)
76 ldmfd sp!, {r0-r3, lr}
77 mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
80 mcr p15, 0, r0, c2, c0, 0
82 /* If we have updated the TTB we must flush the TLB */
83 mcr p15, 0, r0, c8, c7, 0
85 /* For good measure we will flush the IDC as well */
86 mcr p15, 0, r0, c7, c7, 0
88 /* Make sure that pipeline is emptied */
99 ENTRY(arm8_tlb_flushID)
100 mcr p15, 0, r0, c8, c7, 0 /* flush I+D tlb */
102 END(arm8_tlb_flushID)
104 ENTRY(arm8_tlb_flushID_SE)
105 mcr p15, 0, r0, c8, c7, 1 /* flush I+D tlb single entry */
107 END(arm8_tlb_flushID_SE)
112 ENTRY(arm8_cache_flushID)
113 mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
115 END(arm8_cache_flushID)
117 ENTRY(arm8_cache_flushID_E)
118 mcr p15, 0, r0, c7, c7, 1 /* flush I+D single entry */
120 END(arm8_cache_flushID_E)
122 ENTRY(arm8_cache_cleanID)
126 mcr p15, 0, r2, c7, c11, 1
128 mcr p15, 0, r2, c7, c11, 1
130 mcr p15, 0, r2, c7, c11, 1
132 mcr p15, 0, r2, c7, c11, 1
134 mcr p15, 0, r2, c7, c11, 1
136 mcr p15, 0, r2, c7, c11, 1
138 mcr p15, 0, r2, c7, c11, 1
140 mcr p15, 0, r2, c7, c11, 1
142 mcr p15, 0, r2, c7, c11, 1
144 mcr p15, 0, r2, c7, c11, 1
146 mcr p15, 0, r2, c7, c11, 1
148 mcr p15, 0, r2, c7, c11, 1
150 mcr p15, 0, r2, c7, c11, 1
152 mcr p15, 0, r2, c7, c11, 1
154 mcr p15, 0, r2, c7, c11, 1
156 mcr p15, 0, r2, c7, c11, 1
158 adds r0, r0, #0x04000000
162 END(arm8_cache_cleanID)
164 ENTRY(arm8_cache_cleanID_E)
165 mcr p15, 0, r0, c7, c11, 1 /* clean I+D single entry */
167 END(arm8_cache_cleanID_E)
169 ENTRY(arm8_cache_purgeID)
173 * Clean and invalidate entry will not invalidate the entry
174 * if the line was already clean. (mcr p15, 0, rd, c7, 15, 1)
176 * Instead of using the clean and invalidate entry operation
177 * use a separate clean and invalidate entry operations.
179 * mcr p15, 0, rd, c7, c11, 1
180 * mcr p15, 0, rd, c7, c7, 1
186 orr r2, r3, #(I32_bit | F32_bit)
190 mcr p15, 0, r2, c7, c11, 1
191 mcr p15, 0, r2, c7, c7, 1
193 mcr p15, 0, r2, c7, c11, 1
194 mcr p15, 0, r2, c7, c7, 1
196 mcr p15, 0, r2, c7, c11, 1
197 mcr p15, 0, r2, c7, c7, 1
199 mcr p15, 0, r2, c7, c11, 1
200 mcr p15, 0, r2, c7, c7, 1
202 mcr p15, 0, r2, c7, c11, 1
203 mcr p15, 0, r2, c7, c7, 1
205 mcr p15, 0, r2, c7, c11, 1
206 mcr p15, 0, r2, c7, c7, 1
208 mcr p15, 0, r2, c7, c11, 1
209 mcr p15, 0, r2, c7, c7, 1
211 mcr p15, 0, r2, c7, c11, 1
212 mcr p15, 0, r2, c7, c7, 1
214 mcr p15, 0, r2, c7, c11, 1
215 mcr p15, 0, r2, c7, c7, 1
217 mcr p15, 0, r2, c7, c11, 1
218 mcr p15, 0, r2, c7, c7, 1
220 mcr p15, 0, r2, c7, c11, 1
221 mcr p15, 0, r2, c7, c7, 1
223 mcr p15, 0, r2, c7, c11, 1
224 mcr p15, 0, r2, c7, c7, 1
226 mcr p15, 0, r2, c7, c11, 1
227 mcr p15, 0, r2, c7, c7, 1
229 mcr p15, 0, r2, c7, c11, 1
230 mcr p15, 0, r2, c7, c7, 1
232 mcr p15, 0, r2, c7, c11, 1
233 mcr p15, 0, r2, c7, c7, 1
235 mcr p15, 0, r2, c7, c11, 1
236 mcr p15, 0, r2, c7, c7, 1
238 adds r0, r0, #0x04000000
243 END(arm8_cache_purgeID)
245 ENTRY(arm8_cache_purgeID_E)
249 * Clean and invalidate entry will not invalidate the entry
250 * if the line was already clean. (mcr p15, 0, rd, c7, 15, 1)
252 * Instead of using the clean and invalidate entry operation
253 * use a separate clean and invalidate entry operations.
255 * mcr p15, 0, rd, c7, c11, 1
256 * mcr p15, 0, rd, c7, c7, 1
259 orr r2, r3, #(I32_bit | F32_bit)
261 mcr p15, 0, r0, c7, c11, 1 /* clean I+D single entry */
262 mcr p15, 0, r0, c7, c7, 1 /* flush I+D single entry */
265 END(arm8_cache_purgeID_E)
270 * These is the CPU-specific parts of the context switcher cpu_switch()
271 * These functions actually perform the TTB reload.
273 * NOTE: Special calling convention
274 * r1, r4-r13 must be preserved
276 ENTRY(arm8_context_switch)
277 /* For good measure we will flush the IDC as well */
278 mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
281 mcr p15, 0, r0, c2, c0, 0
283 /* If we have updated the TTB we must flush the TLB */
284 mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */
287 /* For good measure we will flush the IDC as well */
288 mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
291 /* Make sure that pipeline is emptied */
295 END(arm8_context_switch)