1 /* $NetBSD: cpufunc_asm_xscale.S,v 1.16 2002/08/17 16:36:32 thorpej Exp $ */
4 * Copyright (c) 2007 Olivier Houchard
5 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
8 * Written by Allen Briggs and Jason R. Thorpe for Wasabi Systems, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed for the NetBSD Project by
21 * Wasabi Systems, Inc.
22 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
23 * or promote products derived from this software without specific prior
26 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
41 * Copyright (c) 2001 Matt Thomas.
42 * Copyright (c) 1997,1998 Mark Brinicombe.
43 * Copyright (c) 1997 Causality Limited
44 * All rights reserved.
46 * Redistribution and use in source and binary forms, with or without
47 * modification, are permitted provided that the following conditions
49 * 1. Redistributions of source code must retain the above copyright
50 * notice, this list of conditions and the following disclaimer.
51 * 2. Redistributions in binary form must reproduce the above copyright
52 * notice, this list of conditions and the following disclaimer in the
53 * documentation and/or other materials provided with the distribution.
54 * 3. All advertising materials mentioning features or use of this software
55 * must display the following acknowledgement:
56 * This product includes software developed by Causality Limited.
57 * 4. The name of Causality Limited may not be used to endorse or promote
58 * products derived from this software without specific prior written
61 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
62 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
63 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
64 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
65 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
66 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
67 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * XScale core 3 assembly functions for CPU / MMU / TLB specific operations
76 #include <machine/armreg.h>
77 #include <machine/asm.h>
78 __FBSDID("$FreeBSD$");
81 * Size of the XScale core D-cache.
83 #define DCACHE_SIZE 0x00008000
85 .Lblock_userspace_access:
86 .word _C_LABEL(block_userspace_access)
89 * CPWAIT -- Canonical method to wait for CP15 update.
90 * From: Intel 80200 manual, section 2.3.3.
92 * NOTE: Clobbers the specified temp reg.
94 #define CPWAIT_BRANCH \
98 mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\
99 mov tmp, tmp /* wait for it to complete */ ;\
100 CPWAIT_BRANCH /* branch to next insn */
102 #define CPWAIT_AND_RETURN_SHIFTER lsr #32
104 #define CPWAIT_AND_RETURN(tmp) \
105 mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\
106 /* Wait for it to complete and branch to the return address */ \
107 sub pc, lr, tmp, CPWAIT_AND_RETURN_SHIFTER
109 #define ARM_USE_L2_CACHE
111 #define L2_CACHE_SIZE 0x80000
112 #define L2_CACHE_WAYS 8
113 #define L2_CACHE_LINE_SIZE 32
114 #define L2_CACHE_SETS (L2_CACHE_SIZE / \
115 (L2_CACHE_WAYS * L2_CACHE_LINE_SIZE))
117 #define L1_DCACHE_SIZE 32 * 1024
118 #define L1_DCACHE_WAYS 4
119 #define L1_DCACHE_LINE_SIZE 32
120 #define L1_DCACHE_SETS (L1_DCACHE_SIZE / \
121 (L1_DCACHE_WAYS * L1_DCACHE_LINE_SIZE))
122 #ifdef CACHE_CLEAN_BLOCK_INTR
123 #define XSCALE_CACHE_CLEAN_BLOCK \
126 orr r0, r4, #(PSR_I | PSR_F) ; \
129 #define XSCALE_CACHE_CLEAN_UNBLOCK \
130 msr cpsr_fsxc, r4 ; \
133 #define XSCALE_CACHE_CLEAN_BLOCK \
135 ldr r4, .Lblock_userspace_access ; \
140 #define XSCALE_CACHE_CLEAN_UNBLOCK \
143 #endif /* CACHE_CLEAN_BLOCK_INTR */
146 ENTRY_NP(xscalec3_cache_syncI)
147 EENTRY_NP(xscalec3_cache_purgeID)
148 mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */
149 EENTRY_NP(xscalec3_cache_cleanID)
150 EENTRY_NP(xscalec3_cache_purgeD)
151 EENTRY(xscalec3_cache_cleanD)
153 XSCALE_CACHE_CLEAN_BLOCK
159 orr r3, r1, r2, asl #5
160 mcr p15, 0, r3, c7, c14, 2 /* clean and invalidate */
162 cmp r2, #L1_DCACHE_SETS
168 XSCALE_CACHE_CLEAN_UNBLOCK
169 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
172 EEND(xscalec3_cache_purgeID)
173 EEND(xscalec3_cache_cleanID)
174 EEND(xscalec3_cache_purgeD)
175 EEND(xscalec3_cache_cleanD)
176 END(xscalec3_cache_syncI)
178 ENTRY(xscalec3_cache_purgeID_rng)
181 bcs _C_LABEL(xscalec3_cache_cleanID)
186 1: mcr p15, 0, r0, c7, c14, 1 /* clean/invalidate L1 D cache entry */
188 mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
195 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
197 CPWAIT_AND_RETURN(r0)
198 END(xscalec3_cache_purgeID_rng)
200 ENTRY(xscalec3_cache_syncI_rng)
202 bcs _C_LABEL(xscalec3_cache_syncI)
208 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
209 mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
216 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
218 CPWAIT_AND_RETURN(r0)
219 END(xscalec3_cache_syncI_rng)
221 ENTRY(xscalec3_cache_purgeD_rng)
224 bcs _C_LABEL(xscalec3_cache_cleanID)
229 1: mcr p15, 0, r0, c7, c14, 1 /* Clean and invalidate D cache entry */
236 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
238 CPWAIT_AND_RETURN(r0)
239 END(xscalec3_cache_purgeD_rng)
241 ENTRY(xscalec3_cache_cleanID_rng)
242 EENTRY(xscalec3_cache_cleanD_rng)
245 bcs _C_LABEL(xscalec3_cache_cleanID)
250 1: mcr p15, 0, r0, c7, c10, 1 /* clean L1 D cache entry */
258 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
260 CPWAIT_AND_RETURN(r0)
261 EEND(xscalec3_cache_cleanD_rng)
262 END(xscalec3_cache_cleanID_rng)
264 ENTRY(xscalec3_l2cache_purge)
265 /* Clean-up the L2 cache */
266 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
272 orr r3, r1, r2, asl #5
273 mcr p15, 1, r3, c7, c15, 2
275 cmp r2, #L2_CACHE_SETS
280 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
283 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
285 END(xscalec3_l2cache_purge)
287 ENTRY(xscalec3_l2cache_clean_rng)
288 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
294 1: mcr p15, 1, r0, c7, c11, 1 /* Clean L2 D cache entry */
302 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
303 mcr p15, 0, r0, c7, c10, 5
305 CPWAIT_AND_RETURN(r0)
306 END(xscalec3_l2cache_clean_rng)
308 ENTRY(xscalec3_l2cache_purge_rng)
310 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
316 1: mcr p15, 1, r0, c7, c11, 1 /* Clean L2 D cache entry */
317 mcr p15, 1, r0, c7, c7, 1 /* Invalidate L2 D cache entry */
322 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
323 mcr p15, 0, r0, c7, c10, 5
325 CPWAIT_AND_RETURN(r0)
326 END(xscalec3_l2cache_purge_rng)
328 ENTRY(xscalec3_l2cache_flush_rng)
329 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
335 1: mcr p15, 1, r0, c7, c7, 1 /* Invalidate L2 cache line */
339 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
340 mcr p15, 0, r0, c7, c10, 5
341 CPWAIT_AND_RETURN(r0)
342 END(xscalec3_l2cache_flush_rng)
345 * Functions to set the MMU Translation Table Base register
347 * We need to clean and flush the cache as it uses virtual
348 * addresses that are about to change.
350 ENTRY(xscalec3_setttb)
351 #ifdef CACHE_CLEAN_BLOCK_INTR
353 orr r1, r3, #(PSR_I | PSR_F)
356 ldr r3, .Lblock_userspace_access
361 stmfd sp!, {r0-r3, lr}
362 bl _C_LABEL(xscalec3_cache_cleanID)
363 mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */
364 mcr p15, 0, r0, c7, c10, 4 /* drain write and fill buffer */
368 ldmfd sp!, {r0-r3, lr}
370 #ifdef ARM_USE_L2_CACHE
371 orr r0, r0, #0x18 /* cache the page table in L2 */
374 mcr p15, 0, r0, c2, c0, 0
376 /* If we have updated the TTB we must flush the TLB */
377 mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLB */
381 #ifdef CACHE_CLEAN_BLOCK_INTR
392 * These is the CPU-specific parts of the context switcher cpu_switch()
393 * These functions actually perform the TTB reload.
395 * NOTE: Special calling convention
396 * r1, r4-r13 must be preserved
398 ENTRY(xscalec3_context_switch)
400 * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this.
401 * Thus the data cache will contain only kernel data and the
402 * instruction cache will contain only kernel code, and all
403 * kernel mappings are shared by all processes.
405 #ifdef ARM_USE_L2_CACHE
406 orr r0, r0, #0x18 /* Cache the page table in L2 */
409 mcr p15, 0, r0, c2, c0, 0
411 /* If we have updated the TTB we must flush the TLB */
412 mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */
414 CPWAIT_AND_RETURN(r0)
415 END(xscalec3_context_switch)