2 * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
5 * Developed by Semihalf.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of MARVELL nor the names of contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <machine/armreg.h>
33 #include <machine/asm.h>
34 __FBSDID("$FreeBSD$");
36 #include <machine/param.h>
38 .Lsheeva_cache_line_size:
39 .word _C_LABEL(arm_pdcache_line_size)
40 .Lsheeva_asm_page_mask:
41 .word _C_LABEL(PAGE_MASK)
46 orr r3, r2, #PSR_I | PSR_F
50 mcr p15, 0, r1, c7, c5, 0 /* Invalidate ICache */
51 1: mrc p15, 0, r15, c7, c14, 3 /* Test, clean and invalidate DCache */
52 bne 1b /* More to do? */
54 mcr p15, 1, r1, c15, c9, 0 /* Clean L2 */
55 mcr p15, 1, r1, c15, c11, 0 /* Invalidate L2 */
60 mcr p15, 0, r1, c7, c10, 4 /* drain the write buffer */
62 mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
64 mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
68 ENTRY(sheeva_dcache_wbinv_range)
71 /* Start with cache line aligned address */
72 ldr ip, .Lsheeva_cache_line_size
81 ldr ip, .Lsheeva_asm_page_mask
83 rsb r2, r2, #PAGE_SIZE
91 orr r3, lr, #PSR_I | PSR_F
93 mcr p15, 5, r0, c15, c15, 0 /* Clean and inv zone start address */
94 mcr p15, 5, r2, c15, c15, 1 /* Clean and inv zone end address */
106 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
109 END(sheeva_dcache_wbinv_range)
111 ENTRY(sheeva_idcache_wbinv_range)
114 /* Start with cache line aligned address */
115 ldr ip, .Lsheeva_cache_line_size
124 ldr ip, .Lsheeva_asm_page_mask
126 rsb r2, r2, #PAGE_SIZE
134 orr r3, lr, #PSR_I | PSR_F
136 mcr p15, 5, r0, c15, c15, 0 /* Clean and inv zone start address */
137 mcr p15, 5, r2, c15, c15, 1 /* Clean and inv zone end address */
141 /* Invalidate and clean icache line by line */
142 ldr r3, .Lsheeva_cache_line_size
145 mcr p15, 0, r0, c7, c5, 1
158 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
161 END(sheeva_idcache_wbinv_range)
163 ENTRY(sheeva_dcache_inv_range)
166 /* Start with cache line aligned address */
167 ldr ip, .Lsheeva_cache_line_size
176 ldr ip, .Lsheeva_asm_page_mask
178 rsb r2, r2, #PAGE_SIZE
186 orr r3, lr, #PSR_I | PSR_F
188 mcr p15, 5, r0, c15, c14, 0 /* Inv zone start address */
189 mcr p15, 5, r2, c15, c14, 1 /* Inv zone end address */
201 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
204 END(sheeva_dcache_inv_range)
206 ENTRY(sheeva_dcache_wb_range)
209 /* Start with cache line aligned address */
210 ldr ip, .Lsheeva_cache_line_size
219 ldr ip, .Lsheeva_asm_page_mask
221 rsb r2, r2, #PAGE_SIZE
229 orr r3, lr, #PSR_I | PSR_F
231 mcr p15, 5, r0, c15, c13, 0 /* Clean zone start address */
232 mcr p15, 5, r2, c15, c13, 1 /* Clean zone end address */
244 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
247 END(sheeva_dcache_wb_range)
249 ENTRY(sheeva_l2cache_wbinv_range)
252 /* Start with cache line aligned address */
253 ldr ip, .Lsheeva_cache_line_size
262 ldr ip, .Lsheeva_asm_page_mask
264 rsb r2, r2, #PAGE_SIZE
272 orr r3, lr, #PSR_I | PSR_F
274 mcr p15, 1, r0, c15, c9, 4 /* Clean L2 zone start address */
275 mcr p15, 1, r2, c15, c9, 5 /* Clean L2 zone end address */
276 mcr p15, 1, r0, c15, c11, 4 /* Inv L2 zone start address */
277 mcr p15, 1, r2, c15, c11, 5 /* Inv L2 zone end address */
289 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
292 END(sheeva_l2cache_wbinv_range)
294 ENTRY(sheeva_l2cache_inv_range)
297 /* Start with cache line aligned address */
298 ldr ip, .Lsheeva_cache_line_size
307 ldr ip, .Lsheeva_asm_page_mask
309 rsb r2, r2, #PAGE_SIZE
317 orr r3, lr, #PSR_I | PSR_F
319 mcr p15, 1, r0, c15, c11, 4 /* Inv L2 zone start address */
320 mcr p15, 1, r2, c15, c11, 5 /* Inv L2 zone end address */
332 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
335 END(sheeva_l2cache_inv_range)
337 ENTRY(sheeva_l2cache_wb_range)
340 /* Start with cache line aligned address */
341 ldr ip, .Lsheeva_cache_line_size
350 ldr ip, .Lsheeva_asm_page_mask
352 rsb r2, r2, #PAGE_SIZE
360 orr r3, lr, #PSR_I | PSR_F
362 mcr p15, 1, r0, c15, c9, 4 /* Clean L2 zone start address */
363 mcr p15, 1, r2, c15, c9, 5 /* Clean L2 zone end address */
375 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
378 END(sheeva_l2cache_wb_range)
380 ENTRY(sheeva_l2cache_wbinv_all)
383 orr r2, r1, #PSR_I | PSR_F
387 mcr p15, 1, r0, c15, c9, 0 /* Clean L2 */
388 mcr p15, 1, r0, c15, c11, 0 /* Invalidate L2 */
390 msr cpsr_c, r1 /* Reenable irqs */
392 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
394 END(sheeva_l2cache_wbinv_all)
396 /* This function modifies register value as follows:
398 * arg1 arg EFFECT (bit value saved into register)
404 ENTRY(sheeva_control_ext)
405 mrc p15, 1, r3, c15, c1, 0 /* Read the control register */
406 bic r2, r3, r0 /* Clear bits */
407 eor r2, r2, r1 /* XOR bits */
409 teq r2, r3 /* Only write if there is a change */
410 mcrne p15, 1, r2, c15, c1, 0 /* Write new control register */
411 mov r0, r3 /* Return old value */
413 END(sheeva_control_ext)
415 ENTRY(sheeva_cpu_sleep)
417 mcr p15, 0, r0, c7, c10, 4 /* Drain write buffer */
418 mcr p15, 0, r0, c7, c0, 4 /* Wait for interrupt */
420 END(sheeva_cpu_sleep)