2 * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
5 * Developed by Semihalf.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of MARVELL nor the names of contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <machine/asm.h>
33 __FBSDID("$FreeBSD$");
35 #include <machine/armreg.h>
36 #include <machine/param.h>
38 #ifndef ELF_TRAMPOLINE
39 .Lsheeva_cache_line_size:
40 .word _C_LABEL(arm_pdcache_line_size)
41 .Lsheeva_asm_page_mask:
42 .word _C_LABEL(PAGE_MASK)
47 orr r3, r2, #PSR_I | PSR_F
51 mcr p15, 0, r1, c7, c5, 0 /* Invalidate ICache */
52 1: mrc p15, 0, APSR_nzcv, c7, c14, 3 /* Test, clean and invalidate DCache */
53 bne 1b /* More to do? */
55 mcr p15, 1, r1, c15, c9, 0 /* Clean L2 */
56 mcr p15, 1, r1, c15, c11, 0 /* Invalidate L2 */
61 mcr p15, 0, r1, c7, c10, 4 /* drain the write buffer */
63 mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
65 mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
69 ENTRY(sheeva_dcache_wbinv_range)
72 /* Start with cache line aligned address */
73 ldr ip, .Lsheeva_cache_line_size
82 ldr ip, .Lsheeva_asm_page_mask
84 rsb r2, r2, #PAGE_SIZE
92 orr r3, lr, #PSR_I | PSR_F
94 mcr p15, 5, r0, c15, c15, 0 /* Clean and inv zone start address */
95 mcr p15, 5, r2, c15, c15, 1 /* Clean and inv zone end address */
107 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
110 END(sheeva_dcache_wbinv_range)
112 ENTRY(sheeva_idcache_wbinv_range)
115 /* Start with cache line aligned address */
116 ldr ip, .Lsheeva_cache_line_size
125 ldr ip, .Lsheeva_asm_page_mask
127 rsb r2, r2, #PAGE_SIZE
135 orr r3, lr, #PSR_I | PSR_F
137 mcr p15, 5, r0, c15, c15, 0 /* Clean and inv zone start address */
138 mcr p15, 5, r2, c15, c15, 1 /* Clean and inv zone end address */
142 /* Invalidate and clean icache line by line */
143 ldr r3, .Lsheeva_cache_line_size
146 mcr p15, 0, r0, c7, c5, 1
159 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
162 END(sheeva_idcache_wbinv_range)
164 ENTRY(sheeva_dcache_inv_range)
167 /* Start with cache line aligned address */
168 ldr ip, .Lsheeva_cache_line_size
177 ldr ip, .Lsheeva_asm_page_mask
179 rsb r2, r2, #PAGE_SIZE
187 orr r3, lr, #PSR_I | PSR_F
189 mcr p15, 5, r0, c15, c14, 0 /* Inv zone start address */
190 mcr p15, 5, r2, c15, c14, 1 /* Inv zone end address */
202 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
205 END(sheeva_dcache_inv_range)
207 ENTRY(sheeva_dcache_wb_range)
210 /* Start with cache line aligned address */
211 ldr ip, .Lsheeva_cache_line_size
220 ldr ip, .Lsheeva_asm_page_mask
222 rsb r2, r2, #PAGE_SIZE
230 orr r3, lr, #PSR_I | PSR_F
232 mcr p15, 5, r0, c15, c13, 0 /* Clean zone start address */
233 mcr p15, 5, r2, c15, c13, 1 /* Clean zone end address */
245 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
248 END(sheeva_dcache_wb_range)
250 ENTRY(sheeva_l2cache_wbinv_range)
253 /* Start with cache line aligned address */
254 ldr ip, .Lsheeva_cache_line_size
263 ldr ip, .Lsheeva_asm_page_mask
265 rsb r2, r2, #PAGE_SIZE
273 orr r3, lr, #PSR_I | PSR_F
275 mcr p15, 1, r0, c15, c9, 4 /* Clean L2 zone start address */
276 mcr p15, 1, r2, c15, c9, 5 /* Clean L2 zone end address */
277 mcr p15, 1, r0, c15, c11, 4 /* Inv L2 zone start address */
278 mcr p15, 1, r2, c15, c11, 5 /* Inv L2 zone end address */
290 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
293 END(sheeva_l2cache_wbinv_range)
295 ENTRY(sheeva_l2cache_inv_range)
298 /* Start with cache line aligned address */
299 ldr ip, .Lsheeva_cache_line_size
308 ldr ip, .Lsheeva_asm_page_mask
310 rsb r2, r2, #PAGE_SIZE
318 orr r3, lr, #PSR_I | PSR_F
320 mcr p15, 1, r0, c15, c11, 4 /* Inv L2 zone start address */
321 mcr p15, 1, r2, c15, c11, 5 /* Inv L2 zone end address */
333 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
336 END(sheeva_l2cache_inv_range)
338 ENTRY(sheeva_l2cache_wb_range)
341 /* Start with cache line aligned address */
342 ldr ip, .Lsheeva_cache_line_size
351 ldr ip, .Lsheeva_asm_page_mask
353 rsb r2, r2, #PAGE_SIZE
361 orr r3, lr, #PSR_I | PSR_F
363 mcr p15, 1, r0, c15, c9, 4 /* Clean L2 zone start address */
364 mcr p15, 1, r2, c15, c9, 5 /* Clean L2 zone end address */
376 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
379 END(sheeva_l2cache_wb_range)
380 #endif /* !ELF_TRAMPOLINE */
382 ENTRY(sheeva_l2cache_wbinv_all)
385 orr r2, r1, #PSR_I | PSR_F
389 mcr p15, 1, r0, c15, c9, 0 /* Clean L2 */
390 mcr p15, 1, r0, c15, c11, 0 /* Invalidate L2 */
392 msr cpsr_c, r1 /* Reenable irqs */
394 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
396 END(sheeva_l2cache_wbinv_all)
398 #ifndef ELF_TRAMPOLINE
399 /* This function modifies register value as follows:
401 * arg1 arg EFFECT (bit value saved into register)
407 ENTRY(sheeva_control_ext)
408 mrc p15, 1, r3, c15, c1, 0 /* Read the control register */
409 bic r2, r3, r0 /* Clear bits */
410 eor r2, r2, r1 /* XOR bits */
412 teq r2, r3 /* Only write if there is a change */
413 mcrne p15, 1, r2, c15, c1, 0 /* Write new control register */
414 mov r0, r3 /* Return old value */
416 END(sheeva_control_ext)
418 ENTRY(sheeva_cpu_sleep)
420 mcr p15, 0, r0, c7, c10, 4 /* Drain write buffer */
421 mcr p15, 0, r0, c7, c0, 4 /* Wait for interrupt */
423 END(sheeva_cpu_sleep)
424 #endif /* !ELF_TRAMPOLINE */