1 /* $NetBSD: cpufunc_asm_xscale.S,v 1.16 2002/08/17 16:36:32 thorpej Exp $ */
4 * Copyright (c) 2007 Olivier Houchard
5 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
8 * Written by Allen Briggs and Jason R. Thorpe for Wasabi Systems, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed for the NetBSD Project by
21 * Wasabi Systems, Inc.
22 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
23 * or promote products derived from this software without specific prior
26 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
41 * Copyright (c) 2001 Matt Thomas.
42 * Copyright (c) 1997,1998 Mark Brinicombe.
43 * Copyright (c) 1997 Causality Limited
44 * All rights reserved.
46 * Redistribution and use in source and binary forms, with or without
47 * modification, are permitted provided that the following conditions
49 * 1. Redistributions of source code must retain the above copyright
50 * notice, this list of conditions and the following disclaimer.
51 * 2. Redistributions in binary form must reproduce the above copyright
52 * notice, this list of conditions and the following disclaimer in the
53 * documentation and/or other materials provided with the distribution.
54 * 3. All advertising materials mentioning features or use of this software
55 * must display the following acknowledgement:
56 * This product includes software developed by Causality Limited.
57 * 4. The name of Causality Limited may not be used to endorse or promote
58 * products derived from this software without specific prior written
61 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
62 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
63 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
64 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
65 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
66 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
67 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * XScale core 3 assembly functions for CPU / MMU / TLB specific operations
76 #include <machine/asm.h>
77 __FBSDID("$FreeBSD$");
80 * Size of the XScale core D-cache.
82 #define DCACHE_SIZE 0x00008000
84 .Lblock_userspace_access:
85 .word _C_LABEL(block_userspace_access)
88 * CPWAIT -- Canonical method to wait for CP15 update.
89 * From: Intel 80200 manual, section 2.3.3.
91 * NOTE: Clobbers the specified temp reg.
93 #define CPWAIT_BRANCH \
97 mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\
98 mov tmp, tmp /* wait for it to complete */ ;\
99 CPWAIT_BRANCH /* branch to next insn */
101 #define CPWAIT_AND_RETURN_SHIFTER lsr #32
103 #define CPWAIT_AND_RETURN(tmp) \
104 mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\
105 /* Wait for it to complete and branch to the return address */ \
106 sub pc, lr, tmp, CPWAIT_AND_RETURN_SHIFTER
108 #define ARM_USE_L2_CACHE
110 #define L2_CACHE_SIZE 0x80000
111 #define L2_CACHE_WAYS 8
112 #define L2_CACHE_LINE_SIZE 32
113 #define L2_CACHE_SETS (L2_CACHE_SIZE / \
114 (L2_CACHE_WAYS * L2_CACHE_LINE_SIZE))
116 #define L1_DCACHE_SIZE 32 * 1024
117 #define L1_DCACHE_WAYS 4
118 #define L1_DCACHE_LINE_SIZE 32
119 #define L1_DCACHE_SETS (L1_DCACHE_SIZE / \
120 (L1_DCACHE_WAYS * L1_DCACHE_LINE_SIZE))
121 #ifdef CACHE_CLEAN_BLOCK_INTR
122 #define XSCALE_CACHE_CLEAN_BLOCK \
125 orr r0, r4, #(I32_bit | F32_bit) ; \
128 #define XSCALE_CACHE_CLEAN_UNBLOCK \
132 #define XSCALE_CACHE_CLEAN_BLOCK \
134 ldr r4, .Lblock_userspace_access ; \
139 #define XSCALE_CACHE_CLEAN_UNBLOCK \
142 #endif /* CACHE_CLEAN_BLOCK_INTR */
145 ENTRY_NP(xscalec3_cache_syncI)
146 ENTRY_NP(xscalec3_cache_purgeID)
147 mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */
148 ENTRY_NP(xscalec3_cache_cleanID)
149 ENTRY_NP(xscalec3_cache_purgeD)
150 ENTRY(xscalec3_cache_cleanD)
152 XSCALE_CACHE_CLEAN_BLOCK
158 orr r3, r1, r2, asl #5
159 mcr p15, 0, r3, c7, c14, 2 /* clean and invalidate */
161 cmp r2, #L1_DCACHE_SETS
167 XSCALE_CACHE_CLEAN_UNBLOCK
168 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
172 ENTRY(xscalec3_cache_purgeID_rng)
175 bcs _C_LABEL(xscalec3_cache_cleanID)
180 1: mcr p15, 0, r0, c7, c14, 1 /* clean/invalidate L1 D cache entry */
182 mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
189 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
191 CPWAIT_AND_RETURN(r0)
193 ENTRY(xscalec3_cache_syncI_rng)
195 bcs _C_LABEL(xscalec3_cache_syncI)
201 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
202 mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
209 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
211 CPWAIT_AND_RETURN(r0)
213 ENTRY(xscalec3_cache_purgeD_rng)
216 bcs _C_LABEL(xscalec3_cache_cleanID)
221 1: mcr p15, 0, r0, c7, c14, 1 /* Clean and invalidate D cache entry */
228 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
230 CPWAIT_AND_RETURN(r0)
231 ENTRY(xscalec3_cache_cleanID_rng)
232 ENTRY(xscalec3_cache_cleanD_rng)
235 bcs _C_LABEL(xscalec3_cache_cleanID)
240 1: mcr p15, 0, r0, c7, c10, 1 /* clean L1 D cache entry */
248 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
250 CPWAIT_AND_RETURN(r0)
253 ENTRY(xscalec3_l2cache_purge)
254 /* Clean-up the L2 cache */
255 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
261 orr r3, r1, r2, asl #5
262 mcr p15, 1, r3, c7, c15, 2
264 cmp r2, #L2_CACHE_SETS
269 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
272 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
275 ENTRY(xscalec3_l2cache_clean_rng)
276 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
282 1: mcr p15, 1, r0, c7, c11, 1 /* Clean L2 D cache entry */
290 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
291 mcr p15, 0, r0, c7, c10, 5
293 CPWAIT_AND_RETURN(r0)
295 ENTRY(xscalec3_l2cache_purge_rng)
297 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
303 1: mcr p15, 1, r0, c7, c11, 1 /* Clean L2 D cache entry */
304 mcr p15, 1, r0, c7, c7, 1 /* Invalidate L2 D cache entry */
309 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
310 mcr p15, 0, r0, c7, c10, 5
312 CPWAIT_AND_RETURN(r0)
314 ENTRY(xscalec3_l2cache_flush_rng)
315 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
321 1: mcr p15, 1, r0, c7, c7, 1 /* Invalidate L2 cache line */
325 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
326 mcr p15, 0, r0, c7, c10, 5
327 CPWAIT_AND_RETURN(r0)
329 * Functions to set the MMU Translation Table Base register
331 * We need to clean and flush the cache as it uses virtual
332 * addresses that are about to change.
334 ENTRY(xscalec3_setttb)
335 #ifdef CACHE_CLEAN_BLOCK_INTR
337 orr r1, r3, #(I32_bit | F32_bit)
340 ldr r3, .Lblock_userspace_access
345 stmfd sp!, {r0-r3, lr}
346 bl _C_LABEL(xscalec3_cache_cleanID)
347 mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */
348 mcr p15, 0, r0, c7, c10, 4 /* drain write and fill buffer */
352 ldmfd sp!, {r0-r3, lr}
354 #ifdef ARM_USE_L2_CACHE
355 orr r0, r0, #0x18 /* cache the page table in L2 */
358 mcr p15, 0, r0, c2, c0, 0
360 /* If we have updated the TTB we must flush the TLB */
361 mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLB */
365 #ifdef CACHE_CLEAN_BLOCK_INTR
375 * These is the CPU-specific parts of the context switcher cpu_switch()
376 * These functions actually perform the TTB reload.
378 * NOTE: Special calling convention
379 * r1, r4-r13 must be preserved
381 ENTRY(xscalec3_context_switch)
383 * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this.
384 * Thus the data cache will contain only kernel data and the
385 * instruction cache will contain only kernel code, and all
386 * kernel mappings are shared by all processes.
388 #ifdef ARM_USE_L2_CACHE
389 orr r0, r0, #0x18 /* Cache the page table in L2 */
392 mcr p15, 0, r0, c2, c0, 0
394 /* If we have updated the TTB we must flush the TLB */
395 mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */
397 CPWAIT_AND_RETURN(r0)