2 * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
5 * Developed by Semihalf.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of MARVELL nor the names of contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <machine/asm.h>
33 __FBSDID("$FreeBSD$");
35 #include <machine/param.h>
37 .Lsheeva_cache_line_size:
38 .word _C_LABEL(arm_pdcache_line_size)
39 .Lsheeva_asm_page_mask:
40 .word _C_LABEL(PAGE_MASK)
45 orr r3, r2, #I32_bit | F32_bit
49 mcr p15, 0, r1, c7, c5, 0 /* Invalidate ICache */
50 1: mrc p15, 0, r15, c7, c14, 3 /* Test, clean and invalidate DCache */
51 bne 1b /* More to do? */
53 mcr p15, 1, r1, c15, c9, 0 /* Clean L2 */
54 mcr p15, 1, r1, c15, c11, 0 /* Invalidate L2 */
59 mcr p15, 0, r1, c7, c10, 4 /* drain the write buffer */
61 mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
63 mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
67 ENTRY(sheeva_dcache_wbinv_range)
70 /* Start with cache line aligned address */
71 ldr ip, .Lsheeva_cache_line_size
80 ldr ip, .Lsheeva_asm_page_mask
82 rsb r2, r2, #PAGE_SIZE
90 orr r3, lr, #I32_bit | F32_bit
92 mcr p15, 5, r0, c15, c15, 0 /* Clean and inv zone start address */
93 mcr p15, 5, r2, c15, c15, 1 /* Clean and inv zone end address */
105 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
108 END(sheeva_dcache_wbinv_range)
110 ENTRY(sheeva_idcache_wbinv_range)
113 /* Start with cache line aligned address */
114 ldr ip, .Lsheeva_cache_line_size
123 ldr ip, .Lsheeva_asm_page_mask
125 rsb r2, r2, #PAGE_SIZE
133 orr r3, lr, #I32_bit | F32_bit
135 mcr p15, 5, r0, c15, c15, 0 /* Clean and inv zone start address */
136 mcr p15, 5, r2, c15, c15, 1 /* Clean and inv zone end address */
140 /* Invalidate and clean icache line by line */
141 ldr r3, .Lsheeva_cache_line_size
144 mcr p15, 0, r0, c7, c5, 1
157 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
160 END(sheeva_idcache_wbinv_range)
162 ENTRY(sheeva_dcache_inv_range)
165 /* Start with cache line aligned address */
166 ldr ip, .Lsheeva_cache_line_size
175 ldr ip, .Lsheeva_asm_page_mask
177 rsb r2, r2, #PAGE_SIZE
185 orr r3, lr, #I32_bit | F32_bit
187 mcr p15, 5, r0, c15, c14, 0 /* Inv zone start address */
188 mcr p15, 5, r2, c15, c14, 1 /* Inv zone end address */
200 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
203 END(sheeva_dcache_inv_range)
205 ENTRY(sheeva_dcache_wb_range)
208 /* Start with cache line aligned address */
209 ldr ip, .Lsheeva_cache_line_size
218 ldr ip, .Lsheeva_asm_page_mask
220 rsb r2, r2, #PAGE_SIZE
228 orr r3, lr, #I32_bit | F32_bit
230 mcr p15, 5, r0, c15, c13, 0 /* Clean zone start address */
231 mcr p15, 5, r2, c15, c13, 1 /* Clean zone end address */
243 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
246 END(sheeva_dcache_wb_range)
248 ENTRY(sheeva_l2cache_wbinv_range)
251 /* Start with cache line aligned address */
252 ldr ip, .Lsheeva_cache_line_size
261 ldr ip, .Lsheeva_asm_page_mask
263 rsb r2, r2, #PAGE_SIZE
271 orr r3, lr, #I32_bit | F32_bit
273 mcr p15, 1, r0, c15, c9, 4 /* Clean L2 zone start address */
274 mcr p15, 1, r2, c15, c9, 5 /* Clean L2 zone end address */
275 mcr p15, 1, r0, c15, c11, 4 /* Inv L2 zone start address */
276 mcr p15, 1, r2, c15, c11, 5 /* Inv L2 zone end address */
288 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
291 END(sheeva_l2cache_wbinv_range)
293 ENTRY(sheeva_l2cache_inv_range)
296 /* Start with cache line aligned address */
297 ldr ip, .Lsheeva_cache_line_size
306 ldr ip, .Lsheeva_asm_page_mask
308 rsb r2, r2, #PAGE_SIZE
316 orr r3, lr, #I32_bit | F32_bit
318 mcr p15, 1, r0, c15, c11, 4 /* Inv L2 zone start address */
319 mcr p15, 1, r2, c15, c11, 5 /* Inv L2 zone end address */
331 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
334 END(sheeva_l2cache_inv_range)
336 ENTRY(sheeva_l2cache_wb_range)
339 /* Start with cache line aligned address */
340 ldr ip, .Lsheeva_cache_line_size
349 ldr ip, .Lsheeva_asm_page_mask
351 rsb r2, r2, #PAGE_SIZE
359 orr r3, lr, #I32_bit | F32_bit
361 mcr p15, 1, r0, c15, c9, 4 /* Clean L2 zone start address */
362 mcr p15, 1, r2, c15, c9, 5 /* Clean L2 zone end address */
374 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
377 END(sheeva_l2cache_wb_range)
379 ENTRY(sheeva_l2cache_wbinv_all)
382 orr r2, r1, #I32_bit | F32_bit
386 mcr p15, 1, r0, c15, c9, 0 /* Clean L2 */
387 mcr p15, 1, r0, c15, c11, 0 /* Invalidate L2 */
389 msr cpsr_c, r1 /* Reenable irqs */
391 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
393 END(sheeva_l2cache_wbinv_all)
395 /* This function modifies register value as follows:
397 * arg1 arg EFFECT (bit value saved into register)
403 ENTRY(sheeva_control_ext)
404 mrc p15, 1, r3, c15, c1, 0 /* Read the control register */
405 bic r2, r3, r0 /* Clear bits */
406 eor r2, r2, r1 /* XOR bits */
408 teq r2, r3 /* Only write if there is a change */
409 mcrne p15, 1, r2, c15, c1, 0 /* Write new control register */
410 mov r0, r3 /* Return old value */
412 END(sheeva_control_ext)
414 ENTRY(sheeva_cpu_sleep)
416 mcr p15, 0, r0, c7, c10, 4 /* Drain write buffer */
417 mcr p15, 0, r0, c7, c0, 4 /* Wait for interrupt */
419 END(sheeva_cpu_sleep)