1 /* $NetBSD: cpufunc_asm_xscale.S,v 1.16 2002/08/17 16:36:32 thorpej Exp $ */
4 * Copyright (c) 2007 Olivier Houchard
5 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
8 * Written by Allen Briggs and Jason R. Thorpe for Wasabi Systems, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed for the NetBSD Project by
21 * Wasabi Systems, Inc.
22 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
23 * or promote products derived from this software without specific prior
26 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
41 * Copyright (c) 2001 Matt Thomas.
42 * Copyright (c) 1997,1998 Mark Brinicombe.
43 * Copyright (c) 1997 Causality Limited
44 * All rights reserved.
46 * Redistribution and use in source and binary forms, with or without
47 * modification, are permitted provided that the following conditions
49 * 1. Redistributions of source code must retain the above copyright
50 * notice, this list of conditions and the following disclaimer.
51 * 2. Redistributions in binary form must reproduce the above copyright
52 * notice, this list of conditions and the following disclaimer in the
53 * documentation and/or other materials provided with the distribution.
54 * 3. All advertising materials mentioning features or use of this software
55 * must display the following acknowledgement:
56 * This product includes software developed by Causality Limited.
57 * 4. The name of Causality Limited may not be used to endorse or promote
58 * products derived from this software without specific prior written
61 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
62 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
63 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
64 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
65 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
66 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
67 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * XScale core 3 assembly functions for CPU / MMU / TLB specific operations
76 #include <machine/asm.h>
77 __FBSDID("$FreeBSD$");
79 #include <machine/armreg.h>
82 * Size of the XScale core D-cache.
84 #define DCACHE_SIZE 0x00008000
87 * CPWAIT -- Canonical method to wait for CP15 update.
88 * From: Intel 80200 manual, section 2.3.3.
90 * NOTE: Clobbers the specified temp reg.
92 #define CPWAIT_BRANCH \
96 mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\
97 mov tmp, tmp /* wait for it to complete */ ;\
98 CPWAIT_BRANCH /* branch to next insn */
100 #define CPWAIT_AND_RETURN_SHIFTER lsr #32
102 #define CPWAIT_AND_RETURN(tmp) \
103 mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\
104 /* Wait for it to complete and branch to the return address */ \
105 sub pc, lr, tmp, CPWAIT_AND_RETURN_SHIFTER
107 #define ARM_USE_L2_CACHE
109 #define L2_CACHE_SIZE 0x80000
110 #define L2_CACHE_WAYS 8
111 #define L2_CACHE_LINE_SIZE 32
112 #define L2_CACHE_SETS (L2_CACHE_SIZE / \
113 (L2_CACHE_WAYS * L2_CACHE_LINE_SIZE))
115 #define L1_DCACHE_SIZE 32 * 1024
116 #define L1_DCACHE_WAYS 4
117 #define L1_DCACHE_LINE_SIZE 32
118 #define L1_DCACHE_SETS (L1_DCACHE_SIZE / \
119 (L1_DCACHE_WAYS * L1_DCACHE_LINE_SIZE))
120 #ifdef CACHE_CLEAN_BLOCK_INTR
121 #define XSCALE_CACHE_CLEAN_BLOCK \
124 orr r0, r4, #(PSR_I | PSR_F) ; \
127 #define XSCALE_CACHE_CLEAN_UNBLOCK \
128 msr cpsr_fsxc, r4 ; \
131 #define XSCALE_CACHE_CLEAN_BLOCK
132 #define XSCALE_CACHE_CLEAN_UNBLOCK
133 #endif /* CACHE_CLEAN_BLOCK_INTR */
136 ENTRY_NP(xscalec3_cache_syncI)
137 EENTRY_NP(xscalec3_cache_purgeID)
138 mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */
139 EENTRY_NP(xscalec3_cache_cleanID)
140 EENTRY_NP(xscalec3_cache_purgeD)
141 EENTRY(xscalec3_cache_cleanD)
143 XSCALE_CACHE_CLEAN_BLOCK
149 orr r3, r1, r2, asl #5
150 mcr p15, 0, r3, c7, c14, 2 /* clean and invalidate */
152 cmp r2, #L1_DCACHE_SETS
158 XSCALE_CACHE_CLEAN_UNBLOCK
159 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
162 EEND(xscalec3_cache_purgeID)
163 EEND(xscalec3_cache_cleanID)
164 EEND(xscalec3_cache_purgeD)
165 EEND(xscalec3_cache_cleanD)
166 END(xscalec3_cache_syncI)
168 ENTRY(xscalec3_cache_purgeID_rng)
171 bcs _C_LABEL(xscalec3_cache_cleanID)
176 1: mcr p15, 0, r0, c7, c14, 1 /* clean/invalidate L1 D cache entry */
178 mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
185 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
187 CPWAIT_AND_RETURN(r0)
188 END(xscalec3_cache_purgeID_rng)
190 ENTRY(xscalec3_cache_syncI_rng)
192 bcs _C_LABEL(xscalec3_cache_syncI)
198 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
199 mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
206 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
208 CPWAIT_AND_RETURN(r0)
209 END(xscalec3_cache_syncI_rng)
211 ENTRY(xscalec3_cache_purgeD_rng)
214 bcs _C_LABEL(xscalec3_cache_cleanID)
219 1: mcr p15, 0, r0, c7, c14, 1 /* Clean and invalidate D cache entry */
226 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
228 CPWAIT_AND_RETURN(r0)
229 END(xscalec3_cache_purgeD_rng)
231 ENTRY(xscalec3_cache_cleanID_rng)
232 EENTRY(xscalec3_cache_cleanD_rng)
235 bcs _C_LABEL(xscalec3_cache_cleanID)
240 1: mcr p15, 0, r0, c7, c10, 1 /* clean L1 D cache entry */
248 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
250 CPWAIT_AND_RETURN(r0)
251 EEND(xscalec3_cache_cleanD_rng)
252 END(xscalec3_cache_cleanID_rng)
254 ENTRY(xscalec3_l2cache_purge)
255 /* Clean-up the L2 cache */
256 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
262 orr r3, r1, r2, asl #5
263 mcr p15, 1, r3, c7, c15, 2
265 cmp r2, #L2_CACHE_SETS
270 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
273 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
275 END(xscalec3_l2cache_purge)
277 ENTRY(xscalec3_l2cache_clean_rng)
278 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
284 1: mcr p15, 1, r0, c7, c11, 1 /* Clean L2 D cache entry */
292 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
293 mcr p15, 0, r0, c7, c10, 5
295 CPWAIT_AND_RETURN(r0)
296 END(xscalec3_l2cache_clean_rng)
298 ENTRY(xscalec3_l2cache_purge_rng)
300 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
306 1: mcr p15, 1, r0, c7, c11, 1 /* Clean L2 D cache entry */
307 mcr p15, 1, r0, c7, c7, 1 /* Invalidate L2 D cache entry */
312 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
313 mcr p15, 0, r0, c7, c10, 5
315 CPWAIT_AND_RETURN(r0)
316 END(xscalec3_l2cache_purge_rng)
318 ENTRY(xscalec3_l2cache_flush_rng)
319 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
325 1: mcr p15, 1, r0, c7, c7, 1 /* Invalidate L2 cache line */
329 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
330 mcr p15, 0, r0, c7, c10, 5
331 CPWAIT_AND_RETURN(r0)
332 END(xscalec3_l2cache_flush_rng)
335 * Functions to set the MMU Translation Table Base register
337 * We need to clean and flush the cache as it uses virtual
338 * addresses that are about to change.
340 ENTRY(xscalec3_setttb)
341 #ifdef CACHE_CLEAN_BLOCK_INTR
343 orr r1, r3, #(PSR_I | PSR_F)
346 stmfd sp!, {r0-r3, lr}
347 bl _C_LABEL(xscalec3_cache_cleanID)
348 mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */
349 mcr p15, 0, r0, c7, c10, 4 /* drain write and fill buffer */
353 ldmfd sp!, {r0-r3, lr}
355 #ifdef ARM_USE_L2_CACHE
356 orr r0, r0, #0x18 /* cache the page table in L2 */
359 mcr p15, 0, r0, c2, c0, 0
361 /* If we have updated the TTB we must flush the TLB */
362 mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLB */
366 #ifdef CACHE_CLEAN_BLOCK_INTR
375 * These is the CPU-specific parts of the context switcher cpu_switch()
376 * These functions actually perform the TTB reload.
378 * NOTE: Special calling convention
379 * r1, r4-r13 must be preserved
381 ENTRY(xscalec3_context_switch)
383 * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this.
384 * Thus the data cache will contain only kernel data and the
385 * instruction cache will contain only kernel code, and all
386 * kernel mappings are shared by all processes.
388 #ifdef ARM_USE_L2_CACHE
389 orr r0, r0, #0x18 /* Cache the page table in L2 */
392 mcr p15, 0, r0, c2, c0, 0
394 /* If we have updated the TTB we must flush the TLB */
395 mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */
397 CPWAIT_AND_RETURN(r0)
398 END(xscalec3_context_switch)