1 /* $NetBSD: cpufunc_asm_xscale.S,v 1.16 2002/08/17 16:36:32 thorpej Exp $ */
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
7 * Written by Allen Briggs and Jason R. Thorpe for Wasabi Systems, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
40 * Copyright (c) 2001 Matt Thomas.
41 * Copyright (c) 1997,1998 Mark Brinicombe.
42 * Copyright (c) 1997 Causality Limited
43 * All rights reserved.
45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions
48 * 1. Redistributions of source code must retain the above copyright
49 * notice, this list of conditions and the following disclaimer.
50 * 2. Redistributions in binary form must reproduce the above copyright
51 * notice, this list of conditions and the following disclaimer in the
52 * documentation and/or other materials provided with the distribution.
53 * 3. All advertising materials mentioning features or use of this software
54 * must display the following acknowledgement:
55 * This product includes software developed by Causality Limited.
56 * 4. The name of Causality Limited may not be used to endorse or promote
57 * products derived from this software without specific prior written
60 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
61 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
62 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
63 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
64 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
65 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
66 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
67 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
68 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
69 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * XScale assembly functions for CPU / MMU / TLB specific operations
74 #include <machine/armreg.h>
75 #include <machine/asm.h>
76 __FBSDID("$FreeBSD$");
79 * Size of the XScale core D-cache.
81 #define DCACHE_SIZE 0x00008000
84 * CPWAIT -- Canonical method to wait for CP15 update.
85 * From: Intel 80200 manual, section 2.3.3.
87 * NOTE: Clobbers the specified temp reg.
89 #define CPWAIT_BRANCH \
93 mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\
94 mov tmp, tmp /* wait for it to complete */ ;\
95 CPWAIT_BRANCH /* branch to next insn */
97 #define CPWAIT_AND_RETURN_SHIFTER lsr #32
99 #define CPWAIT_AND_RETURN(tmp) \
100 mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\
101 /* Wait for it to complete and branch to the return address */ \
102 sub pc, lr, tmp, CPWAIT_AND_RETURN_SHIFTER
105 CPWAIT_AND_RETURN(r0)
109 * We need a separate cpu_control() entry point, since we have to
110 * invalidate the Branch Target Buffer in the event the BPRD bit
111 * changes in the control register.
113 ENTRY(xscale_control)
114 mrc p15, 0, r3, c1, c0, 0 /* Read the control register */
115 bic r2, r3, r0 /* Clear bits */
116 eor r2, r2, r1 /* XOR bits */
118 teq r2, r3 /* Only write if there was a change */
119 mcrne p15, 0, r0, c7, c5, 6 /* Invalidate the BTB */
120 mcrne p15, 0, r2, c1, c0, 0 /* Write new control register */
121 mov r0, r3 /* Return old value */
123 CPWAIT_AND_RETURN(r1)
127 * Functions to set the MMU Translation Table Base register
129 * We need to clean and flush the cache as it uses virtual
130 * addresses that are about to change.
133 #ifdef CACHE_CLEAN_BLOCK_INTR
135 orr r1, r3, #(PSR_I | PSR_F)
138 stmfd sp!, {r0-r3, lr}
139 bl _C_LABEL(xscale_cache_cleanID)
140 mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */
141 mcr p15, 0, r0, c7, c10, 4 /* drain write and fill buffer */
145 ldmfd sp!, {r0-r3, lr}
148 mcr p15, 0, r0, c2, c0, 0
150 /* If we have updated the TTB we must flush the TLB */
151 mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLB */
153 /* The cleanID above means we only need to flush the I cache here */
154 mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */
158 #ifdef CACHE_CLEAN_BLOCK_INTR
168 ENTRY(xscale_tlb_flushID_SE)
169 mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
170 mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
171 CPWAIT_AND_RETURN(r0)
172 END(xscale_tlb_flushID_SE)
177 ENTRY(xscale_cache_flushID)
178 mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
179 CPWAIT_AND_RETURN(r0)
180 END(xscale_cache_flushID)
182 ENTRY(xscale_cache_flushI)
183 mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
184 CPWAIT_AND_RETURN(r0)
185 END(xscale_cache_flushI)
187 ENTRY(xscale_cache_flushD)
188 mcr p15, 0, r0, c7, c6, 0 /* flush D cache */
189 CPWAIT_AND_RETURN(r0)
190 END(xscale_cache_flushD)
192 ENTRY(xscale_cache_flushI_SE)
193 mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
194 CPWAIT_AND_RETURN(r0)
195 END(xscale_cache_flushI_SE)
197 ENTRY(xscale_cache_flushD_SE)
199 * Errata (rev < 2): Must clean-dcache-line to an address
200 * before invalidate-dcache-line to an address, or dirty
201 * bits will not be cleared in the dcache array.
203 mcr p15, 0, r0, c7, c10, 1
204 mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
205 CPWAIT_AND_RETURN(r0)
206 END(xscale_cache_flushD_SE)
208 ENTRY(xscale_cache_cleanD_E)
209 mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
210 CPWAIT_AND_RETURN(r0)
211 END(xscale_cache_cleanD_E)
214 * Information for the XScale cache clean/purge functions:
216 * * Virtual address of the memory region to use
217 * * Size of memory region
219 * Note the virtual address for the Data cache clean operation
220 * does not need to be backed by physical memory, since no loads
221 * will actually be performed by the allocate-line operation.
223 * Note that the Mini-Data cache MUST be cleaned by executing
224 * loads from memory mapped into a region reserved exclusively
225 * for cleaning of the Mini-Data cache.
229 .global _C_LABEL(xscale_cache_clean_addr)
230 _C_LABEL(xscale_cache_clean_addr):
233 .global _C_LABEL(xscale_cache_clean_size)
234 _C_LABEL(xscale_cache_clean_size):
237 .global _C_LABEL(xscale_minidata_clean_addr)
238 _C_LABEL(xscale_minidata_clean_addr):
241 .global _C_LABEL(xscale_minidata_clean_size)
242 _C_LABEL(xscale_minidata_clean_size):
247 .Lxscale_cache_clean_addr:
248 .word _C_LABEL(xscale_cache_clean_addr)
249 .Lxscale_cache_clean_size:
250 .word _C_LABEL(xscale_cache_clean_size)
252 .Lxscale_minidata_clean_addr:
253 .word _C_LABEL(xscale_minidata_clean_addr)
254 .Lxscale_minidata_clean_size:
255 .word _C_LABEL(xscale_minidata_clean_size)
257 #ifdef CACHE_CLEAN_BLOCK_INTR
258 #define XSCALE_CACHE_CLEAN_BLOCK \
260 orr r0, r3, #(PSR_I | PSR_F) ; \
263 #define XSCALE_CACHE_CLEAN_UNBLOCK \
266 #define XSCALE_CACHE_CLEAN_BLOCK
268 #define XSCALE_CACHE_CLEAN_UNBLOCK
269 #endif /* CACHE_CLEAN_BLOCK_INTR */
271 #define XSCALE_CACHE_CLEAN_PROLOGUE \
272 XSCALE_CACHE_CLEAN_BLOCK ; \
273 ldr r2, .Lxscale_cache_clean_addr ; \
274 ldmia r2, {r0, r1} ; \
278 * The XScale core has a strange cache eviction bug, which \
279 * requires us to use 2x the cache size for the cache clean \
280 * and for that area to be aligned to 2 * cache size. \
282 * The work-around is to use 2 areas for cache clean, and to \
283 * alternate between them whenever this is done. No one knows \
284 * why the work-around works (mmm!). \
286 eor r0, r0, #(DCACHE_SIZE) ; \
290 #define XSCALE_CACHE_CLEAN_EPILOGUE \
291 XSCALE_CACHE_CLEAN_UNBLOCK
293 ENTRY_NP(xscale_cache_syncI)
295 EENTRY_NP(xscale_cache_purgeID)
296 mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */
297 EENTRY_NP(xscale_cache_cleanID)
298 EENTRY_NP(xscale_cache_purgeD)
299 EENTRY(xscale_cache_cleanD)
300 XSCALE_CACHE_CLEAN_PROLOGUE
303 mcr p15, 0, r0, c7, c2, 5 /* allocate cache line */
309 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
313 XSCALE_CACHE_CLEAN_EPILOGUE
315 EEND(xscale_cache_cleanD)
316 EEND(xscale_cache_purgeD)
317 EEND(xscale_cache_cleanID)
318 EEND(xscale_cache_purgeID)
319 END(xscale_cache_syncI)
322 * Clean the mini-data cache.
324 * It's expected that we only use the mini-data cache for
325 * kernel addresses, so there is no need to purge it on
326 * context switch, and no need to prevent userspace access
329 ENTRY(xscale_cache_clean_minidata)
330 ldr r2, .Lxscale_minidata_clean_addr
336 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
338 CPWAIT_AND_RETURN(r1)
339 END(xscale_cache_clean_minidata)
341 ENTRY(xscale_cache_purgeID_E)
342 mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
344 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
345 mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
346 mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
347 CPWAIT_AND_RETURN(r1)
348 END(xscale_cache_purgeID_E)
350 ENTRY(xscale_cache_purgeD_E)
351 mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
353 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
354 mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
355 CPWAIT_AND_RETURN(r1)
356 END(xscale_cache_purgeD_E)
361 /* xscale_cache_syncI is identical to xscale_cache_purgeID */
363 EENTRY(xscale_cache_cleanID_rng)
364 ENTRY(xscale_cache_cleanD_rng)
366 bcs _C_LABEL(xscale_cache_cleanID)
372 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
379 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
381 CPWAIT_AND_RETURN(r0)
382 /*END(xscale_cache_cleanID_rng)*/
383 END(xscale_cache_cleanD_rng)
385 ENTRY(xscale_cache_purgeID_rng)
387 bcs _C_LABEL(xscale_cache_purgeID)
393 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
394 mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
395 mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
402 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
404 CPWAIT_AND_RETURN(r0)
405 END(xscale_cache_purgeID_rng)
407 ENTRY(xscale_cache_purgeD_rng)
409 bcs _C_LABEL(xscale_cache_purgeD)
415 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
416 mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
423 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
425 CPWAIT_AND_RETURN(r0)
426 END(xscale_cache_purgeD_rng)
428 ENTRY(xscale_cache_syncI_rng)
430 bcs _C_LABEL(xscale_cache_syncI)
436 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
437 mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
444 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
446 CPWAIT_AND_RETURN(r0)
447 END(xscale_cache_syncI_rng)
449 ENTRY(xscale_cache_flushD_rng)
454 1: mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
459 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
461 CPWAIT_AND_RETURN(r0)
462 END(xscale_cache_flushD_rng)
467 * These is the CPU-specific parts of the context switcher cpu_switch()
468 * These functions actually perform the TTB reload.
470 * NOTE: Special calling convention
471 * r1, r4-r13 must be preserved
473 ENTRY(xscale_context_switch)
475 * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this.
476 * Thus the data cache will contain only kernel data and the
477 * instruction cache will contain only kernel code, and all
478 * kernel mappings are shared by all processes.
482 mcr p15, 0, r0, c2, c0, 0
484 /* If we have updated the TTB we must flush the TLB */
485 mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */
487 CPWAIT_AND_RETURN(r0)
488 END(xscale_context_switch)
493 * This is called when there is nothing on any of the run queues.
494 * We go into IDLE mode so that any IRQ or FIQ will awaken us.
496 * If this is called with anything other than ARM_SLEEP_MODE_IDLE,
499 ENTRY(xscale_cpu_sleep)
503 mcr p14, 0, r0, c7, c0, 0
507 END(xscale_cpu_sleep)