1 /* $NetBSD: cpufunc_asm_xscale.S,v 1.16 2002/08/17 16:36:32 thorpej Exp $ */
4 * Copyright (c) 2007 Olivier Houchard
5 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
8 * Written by Allen Briggs and Jason R. Thorpe for Wasabi Systems, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed for the NetBSD Project by
21 * Wasabi Systems, Inc.
22 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
23 * or promote products derived from this software without specific prior
26 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
41 * Copyright (c) 2001 Matt Thomas.
42 * Copyright (c) 1997,1998 Mark Brinicombe.
43 * Copyright (c) 1997 Causality Limited
44 * All rights reserved.
46 * Redistribution and use in source and binary forms, with or without
47 * modification, are permitted provided that the following conditions
49 * 1. Redistributions of source code must retain the above copyright
50 * notice, this list of conditions and the following disclaimer.
51 * 2. Redistributions in binary form must reproduce the above copyright
52 * notice, this list of conditions and the following disclaimer in the
53 * documentation and/or other materials provided with the distribution.
54 * 3. All advertising materials mentioning features or use of this software
55 * must display the following acknowledgement:
56 * This product includes software developed by Causality Limited.
57 * 4. The name of Causality Limited may not be used to endorse or promote
58 * products derived from this software without specific prior written
61 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
62 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
63 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
64 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
65 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
66 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
67 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * XScale core 3 assembly functions for CPU / MMU / TLB specific operations
76 #include <machine/armreg.h>
77 #include <machine/asm.h>
78 __FBSDID("$FreeBSD$");
81 * Size of the XScale core D-cache.
83 #define DCACHE_SIZE 0x00008000
85 .Lblock_userspace_access:
86 .word _C_LABEL(block_userspace_access)
89 * CPWAIT -- Canonical method to wait for CP15 update.
90 * From: Intel 80200 manual, section 2.3.3.
92 * NOTE: Clobbers the specified temp reg.
94 #define CPWAIT_BRANCH \
98 mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\
99 mov tmp, tmp /* wait for it to complete */ ;\
100 CPWAIT_BRANCH /* branch to next insn */
102 #define CPWAIT_AND_RETURN_SHIFTER lsr #32
104 #define CPWAIT_AND_RETURN(tmp) \
105 mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\
106 /* Wait for it to complete and branch to the return address */ \
107 sub pc, lr, tmp, CPWAIT_AND_RETURN_SHIFTER
109 #define ARM_USE_L2_CACHE
111 #define L2_CACHE_SIZE 0x80000
112 #define L2_CACHE_WAYS 8
113 #define L2_CACHE_LINE_SIZE 32
114 #define L2_CACHE_SETS (L2_CACHE_SIZE / \
115 (L2_CACHE_WAYS * L2_CACHE_LINE_SIZE))
117 #define L1_DCACHE_SIZE 32 * 1024
118 #define L1_DCACHE_WAYS 4
119 #define L1_DCACHE_LINE_SIZE 32
120 #define L1_DCACHE_SETS (L1_DCACHE_SIZE / \
121 (L1_DCACHE_WAYS * L1_DCACHE_LINE_SIZE))
122 #ifdef CACHE_CLEAN_BLOCK_INTR
123 #define XSCALE_CACHE_CLEAN_BLOCK \
126 orr r0, r4, #(PSR_I | PSR_F) ; \
129 #define XSCALE_CACHE_CLEAN_UNBLOCK \
130 msr cpsr_fsxc, r4 ; \
133 #define XSCALE_CACHE_CLEAN_BLOCK \
135 ldr r4, .Lblock_userspace_access ; \
140 #define XSCALE_CACHE_CLEAN_UNBLOCK \
143 #endif /* CACHE_CLEAN_BLOCK_INTR */
146 ENTRY_NP(xscalec3_cache_syncI)
147 xscalec3_cache_purgeID:
148 EENTRY_NP(xscalec3_cache_purgeID)
149 mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */
150 EENTRY_NP(xscalec3_cache_cleanID)
151 EENTRY_NP(xscalec3_cache_purgeD)
152 EENTRY(xscalec3_cache_cleanD)
154 XSCALE_CACHE_CLEAN_BLOCK
160 orr r3, r1, r2, asl #5
161 mcr p15, 0, r3, c7, c14, 2 /* clean and invalidate */
163 cmp r2, #L1_DCACHE_SETS
169 XSCALE_CACHE_CLEAN_UNBLOCK
170 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
173 EEND(xscalec3_cache_purgeID)
174 EEND(xscalec3_cache_cleanID)
175 EEND(xscalec3_cache_purgeD)
176 EEND(xscalec3_cache_cleanD)
177 END(xscalec3_cache_syncI)
179 ENTRY(xscalec3_cache_purgeID_rng)
182 bcs _C_LABEL(xscalec3_cache_cleanID)
187 1: mcr p15, 0, r0, c7, c14, 1 /* clean/invalidate L1 D cache entry */
189 mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
196 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
198 CPWAIT_AND_RETURN(r0)
199 END(xscalec3_cache_purgeID_rng)
201 ENTRY(xscalec3_cache_syncI_rng)
203 bcs _C_LABEL(xscalec3_cache_syncI)
209 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
210 mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
217 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
219 CPWAIT_AND_RETURN(r0)
220 END(xscalec3_cache_syncI_rng)
222 ENTRY(xscalec3_cache_purgeD_rng)
225 bcs _C_LABEL(xscalec3_cache_cleanID)
230 1: mcr p15, 0, r0, c7, c14, 1 /* Clean and invalidate D cache entry */
237 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
239 CPWAIT_AND_RETURN(r0)
240 END(xscalec3_cache_purgeD_rng)
242 ENTRY(xscalec3_cache_cleanID_rng)
243 EENTRY(xscalec3_cache_cleanD_rng)
246 bcs _C_LABEL(xscalec3_cache_cleanID)
251 1: mcr p15, 0, r0, c7, c10, 1 /* clean L1 D cache entry */
259 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
261 CPWAIT_AND_RETURN(r0)
262 EEND(xscalec3_cache_cleanD_rng)
263 END(xscalec3_cache_cleanID_rng)
265 ENTRY(xscalec3_l2cache_purge)
266 /* Clean-up the L2 cache */
267 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
273 orr r3, r1, r2, asl #5
274 mcr p15, 1, r3, c7, c15, 2
276 cmp r2, #L2_CACHE_SETS
281 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
284 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
286 END(xscalec3_l2cache_purge)
288 ENTRY(xscalec3_l2cache_clean_rng)
289 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
295 1: mcr p15, 1, r0, c7, c11, 1 /* Clean L2 D cache entry */
303 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
304 mcr p15, 0, r0, c7, c10, 5
306 CPWAIT_AND_RETURN(r0)
307 END(xscalec3_l2cache_clean_rng)
309 ENTRY(xscalec3_l2cache_purge_rng)
311 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
317 1: mcr p15, 1, r0, c7, c11, 1 /* Clean L2 D cache entry */
318 mcr p15, 1, r0, c7, c7, 1 /* Invalidate L2 D cache entry */
323 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
324 mcr p15, 0, r0, c7, c10, 5
326 CPWAIT_AND_RETURN(r0)
327 END(xscalec3_l2cache_purge_rng)
329 ENTRY(xscalec3_l2cache_flush_rng)
330 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
336 1: mcr p15, 1, r0, c7, c7, 1 /* Invalidate L2 cache line */
340 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
341 mcr p15, 0, r0, c7, c10, 5
342 CPWAIT_AND_RETURN(r0)
343 END(xscalec3_l2cache_flush_rng)
346 * Functions to set the MMU Translation Table Base register
348 * We need to clean and flush the cache as it uses virtual
349 * addresses that are about to change.
351 ENTRY(xscalec3_setttb)
352 #ifdef CACHE_CLEAN_BLOCK_INTR
354 orr r1, r3, #(PSR_I | PSR_F)
357 ldr r3, .Lblock_userspace_access
362 stmfd sp!, {r0-r3, lr}
363 bl _C_LABEL(xscalec3_cache_cleanID)
364 mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */
365 mcr p15, 0, r0, c7, c10, 4 /* drain write and fill buffer */
369 ldmfd sp!, {r0-r3, lr}
371 #ifdef ARM_USE_L2_CACHE
372 orr r0, r0, #0x18 /* cache the page table in L2 */
375 mcr p15, 0, r0, c2, c0, 0
377 /* If we have updated the TTB we must flush the TLB */
378 mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLB */
382 #ifdef CACHE_CLEAN_BLOCK_INTR
393 * These is the CPU-specific parts of the context switcher cpu_switch()
394 * These functions actually perform the TTB reload.
396 * NOTE: Special calling convention
397 * r1, r4-r13 must be preserved
399 ENTRY(xscalec3_context_switch)
401 * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this.
402 * Thus the data cache will contain only kernel data and the
403 * instruction cache will contain only kernel code, and all
404 * kernel mappings are shared by all processes.
406 #ifdef ARM_USE_L2_CACHE
407 orr r0, r0, #0x18 /* Cache the page table in L2 */
410 mcr p15, 0, r0, c2, c0, 0
412 /* If we have updated the TTB we must flush the TLB */
413 mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */
415 CPWAIT_AND_RETURN(r0)
416 END(xscalec3_context_switch)