2 * Copyright (c) 2014 Robin Randhawa
3 * Copyright (c) 2015 The FreeBSD Foundation
6 * Portions of this software were developed by Andrew Turner
7 * under sponsorship from the FreeBSD Foundation
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/errno.h>
33 #include <machine/asm.h>
34 #include <machine/param.h>
39 * Need big.LITTLE awareness at some point.
40 * Using arm64_p[id]cache_line_size may not be the best option.
41 * Need better SMP awareness.
50 * Macro to handle the cache. This takes the start address in x0, length
51 * in x1. It will corrupt x0, x1, x2, x3, and x4.
53 .macro cache_handle_range dcop = 0, ic = 0, icop = 0
55 adrp x3, dcache_line_size /* Load the D cache line size */
56 ldr x3, [x3, :lo12:dcache_line_size]
58 adrp x3, idcache_line_size /* Load the I & D cache line size */
59 ldr x3, [x3, :lo12:idcache_line_size]
61 sub x4, x3, #1 /* Get the address mask */
62 and x2, x0, x4 /* Get the low bits of the address */
63 add x1, x1, x2 /* Add these to the size */
64 bic x0, x0, x4 /* Clear the low bit of the address */
66 mov x2, x0 /* Save the address */
67 mov x4, x1 /* Save the size */
71 add x0, x0, x3 /* Move to the next line */
72 subs x1, x1, x3 /* Reduce the size */
73 b.hi 1b /* Check if we are done */
78 add x2, x2, x3 /* Move to the next line */
79 subs x4, x4, x3 /* Reduce the size */
80 b.hi 2b /* Check if we are done */
91 * Generic functions to read/modify/write the internal coprocessor registers
94 ENTRY(arm64_tlb_flushID)
104 END(arm64_tlb_flushID)
107 * void arm64_dcache_wb_range(vm_offset_t, vm_size_t)
109 ENTRY(arm64_dcache_wb_range)
110 cache_handle_range dcop = cvac
112 END(arm64_dcache_wb_range)
115 * void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t)
117 ENTRY(arm64_dcache_wbinv_range)
118 cache_handle_range dcop = civac
120 END(arm64_dcache_wbinv_range)
123 * void arm64_dcache_inv_range(vm_offset_t, vm_size_t)
125 * Note, we must not invalidate everything. If the range is too big we
126 * must use wb-inv of the entire cache.
128 ENTRY(arm64_dcache_inv_range)
129 cache_handle_range dcop = ivac
131 END(arm64_dcache_inv_range)
134 * void arm64_dic_idc_icache_sync_range(vm_offset_t, vm_size_t)
135 * When the CTR_EL0.IDC bit is set cleaning to PoU becomes a dsb.
136 * When the CTR_EL0.DIC bit is set icache invalidation becomes an isb.
138 ENTRY(arm64_dic_idc_icache_sync_range)
142 END(arm64_dic_idc_icache_sync_range)
145 * void arm64_idc_aliasing_icache_sync_range(vm_offset_t, vm_size_t)
146 * When the CTR_EL0.IDC bit is set cleaning to PoU becomes a dsb.
148 ENTRY(arm64_idc_aliasing_icache_sync_range)
154 END(arm64_idc_aliasing_icache_sync_range)
157 * void arm64_aliasing_icache_sync_range(vm_offset_t, vm_size_t)
159 ENTRY(arm64_aliasing_icache_sync_range)
161 * XXX Temporary solution - I-cache flush should be range based for
162 * PIPT cache or IALLUIS for VIVT or VIPT caches
164 /* cache_handle_range dcop = cvau, ic = 1, icop = ivau */
165 cache_handle_range dcop = cvau
170 END(arm64_aliasing_icache_sync_range)
173 * int arm64_icache_sync_range_checked(vm_offset_t, vm_size_t)
175 ENTRY(arm64_icache_sync_range_checked)
176 adr x5, cache_maint_fault
177 SET_FAULT_HANDLER(x5, x6)
178 /* XXX: See comment in arm64_icache_sync_range */
179 cache_handle_range dcop = cvau
183 SET_FAULT_HANDLER(xzr, x6)
186 END(arm64_icache_sync_range_checked)
188 ENTRY(cache_maint_fault)
189 SET_FAULT_HANDLER(xzr, x1)
192 END(cache_maint_fault)