1 /* $NetBSD: cpufunc_asm_xscale.S,v 1.16 2002/08/17 16:36:32 thorpej Exp $ */
4 * Copyright (c) 2007 Olivier Houchard
5 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
8 * Written by Allen Briggs and Jason R. Thorpe for Wasabi Systems, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed for the NetBSD Project by
21 * Wasabi Systems, Inc.
22 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
23 * or promote products derived from this software without specific prior
26 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
41 * Copyright (c) 2001 Matt Thomas.
42 * Copyright (c) 1997,1998 Mark Brinicombe.
43 * Copyright (c) 1997 Causality Limited
44 * All rights reserved.
46 * Redistribution and use in source and binary forms, with or without
47 * modification, are permitted provided that the following conditions
49 * 1. Redistributions of source code must retain the above copyright
50 * notice, this list of conditions and the following disclaimer.
51 * 2. Redistributions in binary form must reproduce the above copyright
52 * notice, this list of conditions and the following disclaimer in the
53 * documentation and/or other materials provided with the distribution.
54 * 3. All advertising materials mentioning features or use of this software
55 * must display the following acknowledgement:
56 * This product includes software developed by Causality Limited.
57 * 4. The name of Causality Limited may not be used to endorse or promote
58 * products derived from this software without specific prior written
61 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
62 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
63 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
64 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
65 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
66 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
67 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * XScale core 3 assembly functions for CPU / MMU / TLB specific operations
76 #include <machine/asm.h>
77 __FBSDID("$FreeBSD$");
80 * Size of the XScale core D-cache.
82 #define DCACHE_SIZE 0x00008000
84 .Lblock_userspace_access:
85 .word _C_LABEL(block_userspace_access)
88 * CPWAIT -- Canonical method to wait for CP15 update.
89 * From: Intel 80200 manual, section 2.3.3.
91 * NOTE: Clobbers the specified temp reg.
93 #define CPWAIT_BRANCH \
97 mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\
98 mov tmp, tmp /* wait for it to complete */ ;\
99 CPWAIT_BRANCH /* branch to next insn */
101 #define CPWAIT_AND_RETURN_SHIFTER lsr #32
103 #define CPWAIT_AND_RETURN(tmp) \
104 mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\
105 /* Wait for it to complete and branch to the return address */ \
106 sub pc, lr, tmp, CPWAIT_AND_RETURN_SHIFTER
108 #define ARM_USE_L2_CACHE
110 #define L2_CACHE_SIZE 0x80000
111 #define L2_CACHE_WAYS 8
112 #define L2_CACHE_LINE_SIZE 32
113 #define L2_CACHE_SETS (L2_CACHE_SIZE / \
114 (L2_CACHE_WAYS * L2_CACHE_LINE_SIZE))
116 #define L1_DCACHE_SIZE 32 * 1024
117 #define L1_DCACHE_WAYS 4
118 #define L1_DCACHE_LINE_SIZE 32
119 #define L1_DCACHE_SETS (L1_DCACHE_SIZE / \
120 (L1_DCACHE_WAYS * L1_DCACHE_LINE_SIZE))
121 #ifdef CACHE_CLEAN_BLOCK_INTR
122 #define XSCALE_CACHE_CLEAN_BLOCK \
125 orr r0, r4, #(I32_bit | F32_bit) ; \
128 #define XSCALE_CACHE_CLEAN_UNBLOCK \
132 #define XSCALE_CACHE_CLEAN_BLOCK \
134 ldr r4, .Lblock_userspace_access ; \
139 #define XSCALE_CACHE_CLEAN_UNBLOCK \
142 #endif /* CACHE_CLEAN_BLOCK_INTR */
145 ENTRY_NP(xscalec3_cache_syncI)
146 ENTRY_NP(xscalec3_cache_purgeID)
147 mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */
148 ENTRY_NP(xscalec3_cache_cleanID)
149 ENTRY_NP(xscalec3_cache_purgeD)
150 ENTRY(xscalec3_cache_cleanD)
152 XSCALE_CACHE_CLEAN_BLOCK
158 orr r3, r1, r2, asl #5
159 mcr p15, 0, r3, c7, c14, 2 /* clean and invalidate */
161 cmp r2, #L1_DCACHE_SETS
167 XSCALE_CACHE_CLEAN_UNBLOCK
168 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
171 END(xscalec3_cache_syncI)
172 END(xscalec3_cache_purgeID)
173 END(xscalec3_cache_cleanID)
174 END(xscalec3_cache_purgeD)
175 END(xscalec3_cache_cleanD)
177 ENTRY(xscalec3_cache_purgeID_rng)
180 bcs _C_LABEL(xscalec3_cache_cleanID)
185 1: mcr p15, 0, r0, c7, c14, 1 /* clean/invalidate L1 D cache entry */
187 mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
194 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
196 CPWAIT_AND_RETURN(r0)
197 END(xscalec3_cache_purgeID_rng)
199 ENTRY(xscalec3_cache_syncI_rng)
201 bcs _C_LABEL(xscalec3_cache_syncI)
207 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
208 mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
215 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
217 CPWAIT_AND_RETURN(r0)
218 END(xscalec3_cache_syncI_rng)
220 ENTRY(xscalec3_cache_purgeD_rng)
223 bcs _C_LABEL(xscalec3_cache_cleanID)
228 1: mcr p15, 0, r0, c7, c14, 1 /* Clean and invalidate D cache entry */
235 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
237 CPWAIT_AND_RETURN(r0)
238 END(xscalec3_cache_purgeD_rng)
240 ENTRY(xscalec3_cache_cleanID_rng)
241 ENTRY(xscalec3_cache_cleanD_rng)
244 bcs _C_LABEL(xscalec3_cache_cleanID)
249 1: mcr p15, 0, r0, c7, c10, 1 /* clean L1 D cache entry */
257 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
259 CPWAIT_AND_RETURN(r0)
260 END(xscalec3_cache_cleanID_rng)
261 END(xscalec3_cache_cleanD_rng)
263 ENTRY(xscalec3_l2cache_purge)
264 /* Clean-up the L2 cache */
265 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
271 orr r3, r1, r2, asl #5
272 mcr p15, 1, r3, c7, c15, 2
274 cmp r2, #L2_CACHE_SETS
279 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
282 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
284 END(xscalec3_l2cache_purge)
286 ENTRY(xscalec3_l2cache_clean_rng)
287 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
293 1: mcr p15, 1, r0, c7, c11, 1 /* Clean L2 D cache entry */
301 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
302 mcr p15, 0, r0, c7, c10, 5
304 CPWAIT_AND_RETURN(r0)
305 END(xscalec3_l2cache_clean_rng)
307 ENTRY(xscalec3_l2cache_purge_rng)
309 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
315 1: mcr p15, 1, r0, c7, c11, 1 /* Clean L2 D cache entry */
316 mcr p15, 1, r0, c7, c7, 1 /* Invalidate L2 D cache entry */
321 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
322 mcr p15, 0, r0, c7, c10, 5
324 CPWAIT_AND_RETURN(r0)
325 END(xscalec3_l2cache_purge_rng)
327 ENTRY(xscalec3_l2cache_flush_rng)
328 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
334 1: mcr p15, 1, r0, c7, c7, 1 /* Invalidate L2 cache line */
338 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
339 mcr p15, 0, r0, c7, c10, 5
340 CPWAIT_AND_RETURN(r0)
341 END(xscalec3_l2cache_flush_rng)
344 * Functions to set the MMU Translation Table Base register
346 * We need to clean and flush the cache as it uses virtual
347 * addresses that are about to change.
349 ENTRY(xscalec3_setttb)
350 #ifdef CACHE_CLEAN_BLOCK_INTR
352 orr r1, r3, #(I32_bit | F32_bit)
355 ldr r3, .Lblock_userspace_access
360 stmfd sp!, {r0-r3, lr}
361 bl _C_LABEL(xscalec3_cache_cleanID)
362 mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */
363 mcr p15, 0, r0, c7, c10, 4 /* drain write and fill buffer */
367 ldmfd sp!, {r0-r3, lr}
369 #ifdef ARM_USE_L2_CACHE
370 orr r0, r0, #0x18 /* cache the page table in L2 */
373 mcr p15, 0, r0, c2, c0, 0
375 /* If we have updated the TTB we must flush the TLB */
376 mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLB */
380 #ifdef CACHE_CLEAN_BLOCK_INTR
391 * These is the CPU-specific parts of the context switcher cpu_switch()
392 * These functions actually perform the TTB reload.
394 * NOTE: Special calling convention
395 * r1, r4-r13 must be preserved
397 ENTRY(xscalec3_context_switch)
399 * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this.
400 * Thus the data cache will contain only kernel data and the
401 * instruction cache will contain only kernel code, and all
402 * kernel mappings are shared by all processes.
404 #ifdef ARM_USE_L2_CACHE
405 orr r0, r0, #0x18 /* Cache the page table in L2 */
408 mcr p15, 0, r0, c2, c0, 0
410 /* If we have updated the TTB we must flush the TLB */
411 mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */
413 CPWAIT_AND_RETURN(r0)
414 END(xscalec3_context_switch)