1 /* $NetBSD: cpufunc.h,v 1.29 2003/09/06 09:08:35 rearnsha Exp $ */
4 * SPDX-License-Identifier: BSD-4-Clause
6 * Copyright (c) 1997 Mark Brinicombe.
7 * Copyright (c) 1997 Causality Limited
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by Causality Limited.
21 * 4. The name of Causality Limited may not be used to endorse or promote
22 * products derived from this software without specific prior written
25 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
26 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
27 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
29 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * RiscBSD kernel project
41 * Prototypes for cpu, mmu and tlb related functions.
46 #ifndef _MACHINE_CPUFUNC_H_
47 #define _MACHINE_CPUFUNC_H_
51 #include <sys/types.h>
52 #include <machine/armreg.h>
60 struct cpu_functions {
64 void (*cf_cpwait) (void);
68 u_int (*cf_control) (u_int bic, u_int eor);
69 void (*cf_setttb) (u_int ttb);
73 void (*cf_tlb_flushID) (void);
74 void (*cf_tlb_flushID_SE) (u_int va);
75 void (*cf_tlb_flushD) (void);
76 void (*cf_tlb_flushD_SE) (u_int va);
81 * We define the following primitives:
83 * icache_sync_range Synchronize I-cache range
85 * dcache_wbinv_all Write-back and Invalidate D-cache
86 * dcache_wbinv_range Write-back and Invalidate D-cache range
87 * dcache_inv_range Invalidate D-cache range
88 * dcache_wb_range Write-back D-cache range
90 * idcache_wbinv_all Write-back and Invalidate D-cache,
92 * idcache_wbinv_range Write-back and Invalidate D-cache,
93 * Invalidate I-cache range
95 * Note that the ARM term for "write-back" is "clean". We use
96 * the term "write-back" since it's a more common way to describe
99 * There are some rules that must be followed:
101 * ID-cache Invalidate All:
102 * Unlike other functions, this one must never write back.
103 * It is used to intialize the MMU when it is in an unknown
104 * state (such as when it may have lines tagged as valid
105 * that belong to a previous set of mappings).
107 * I-cache Sync range:
108 * The goal is to synchronize the instruction stream,
109 * so you may beed to write-back dirty D-cache blocks
110 * first. If a range is requested, and you can't
111 * synchronize just a range, you have to hit the whole
114 * D-cache Write-Back and Invalidate range:
115 * If you can't WB-Inv a range, you must WB-Inv the
118 * D-cache Invalidate:
119 * If you can't Inv the D-cache, you must Write-Back
120 * and Invalidate. Code that uses this operation
121 * MUST NOT assume that the D-cache will not be written
124 * D-cache Write-Back:
125 * If you can't Write-back without doing an Inv,
126 * that's fine. Then treat this as a WB-Inv.
127 * Skipping the invalidate is merely an optimization.
130 * Valid virtual addresses must be passed to each
133 void (*cf_icache_sync_range) (vm_offset_t, vm_size_t);
135 void (*cf_dcache_wbinv_all) (void);
136 void (*cf_dcache_wbinv_range) (vm_offset_t, vm_size_t);
137 void (*cf_dcache_inv_range) (vm_offset_t, vm_size_t);
138 void (*cf_dcache_wb_range) (vm_offset_t, vm_size_t);
140 void (*cf_idcache_inv_all) (void);
141 void (*cf_idcache_wbinv_all) (void);
142 void (*cf_idcache_wbinv_range) (vm_offset_t, vm_size_t);
144 void (*cf_l2cache_wbinv_all) (void);
145 void (*cf_l2cache_wbinv_range) (vm_offset_t, vm_size_t);
146 void (*cf_l2cache_inv_range) (vm_offset_t, vm_size_t);
147 void (*cf_l2cache_wb_range) (vm_offset_t, vm_size_t);
148 void (*cf_l2cache_drain_writebuf) (void);
150 /* Other functions */
153 void (*cf_drain_writebuf) (void);
156 void (*cf_sleep) (int mode);
161 void (*cf_context_switch) (void);
164 void (*cf_setup) (void);
167 extern struct cpu_functions cpufuncs;
168 extern u_int cputype;
171 #define cpu_cpwait() cpufuncs.cf_cpwait()
173 #define cpu_control(c, e) cpufuncs.cf_control(c, e)
174 #define cpu_setttb(t) cpufuncs.cf_setttb(t)
176 #define cpu_tlb_flushID() cpufuncs.cf_tlb_flushID()
177 #define cpu_tlb_flushID_SE(e) cpufuncs.cf_tlb_flushID_SE(e)
178 #define cpu_tlb_flushD() cpufuncs.cf_tlb_flushD()
179 #define cpu_tlb_flushD_SE(e) cpufuncs.cf_tlb_flushD_SE(e)
181 #define cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s))
183 #define cpu_dcache_wbinv_all() cpufuncs.cf_dcache_wbinv_all()
184 #define cpu_dcache_wbinv_range(a, s) cpufuncs.cf_dcache_wbinv_range((a), (s))
185 #define cpu_dcache_inv_range(a, s) cpufuncs.cf_dcache_inv_range((a), (s))
186 #define cpu_dcache_wb_range(a, s) cpufuncs.cf_dcache_wb_range((a), (s))
188 #define cpu_idcache_inv_all() cpufuncs.cf_idcache_inv_all()
189 #define cpu_idcache_wbinv_all() cpufuncs.cf_idcache_wbinv_all()
190 #define cpu_idcache_wbinv_range(a, s) cpufuncs.cf_idcache_wbinv_range((a), (s))
193 #define cpu_l2cache_wbinv_all() cpufuncs.cf_l2cache_wbinv_all()
194 #define cpu_l2cache_wb_range(a, s) cpufuncs.cf_l2cache_wb_range((a), (s))
195 #define cpu_l2cache_inv_range(a, s) cpufuncs.cf_l2cache_inv_range((a), (s))
196 #define cpu_l2cache_wbinv_range(a, s) cpufuncs.cf_l2cache_wbinv_range((a), (s))
197 #define cpu_l2cache_drain_writebuf() cpufuncs.cf_l2cache_drain_writebuf()
200 #define cpu_drain_writebuf() cpufuncs.cf_drain_writebuf()
202 #define cpu_sleep(m) cpufuncs.cf_sleep(m)
204 #define cpu_setup() cpufuncs.cf_setup()
206 int set_cpufuncs (void);
207 #define ARCHITECTURE_NOT_PRESENT 1 /* known but not configured */
208 #define ARCHITECTURE_NOT_SUPPORTED 2 /* not known */
210 void cpufunc_nullop (void);
211 u_int cpu_ident (void);
212 u_int cpufunc_control (u_int clear, u_int bic);
213 void cpu_domains (u_int domains);
214 u_int cpu_faultstatus (void);
215 u_int cpu_faultaddress (void);
216 u_int cpu_get_control (void);
219 #if defined(CPU_FA526)
220 void fa526_setup (void);
221 void fa526_setttb (u_int ttb);
222 void fa526_context_switch (void);
223 void fa526_cpu_sleep (int);
224 void fa526_tlb_flushID_SE (u_int);
226 void fa526_icache_sync_range(vm_offset_t start, vm_size_t end);
227 void fa526_dcache_wbinv_all (void);
228 void fa526_dcache_wbinv_range(vm_offset_t start, vm_size_t end);
229 void fa526_dcache_inv_range (vm_offset_t start, vm_size_t end);
230 void fa526_dcache_wb_range (vm_offset_t start, vm_size_t end);
231 void fa526_idcache_wbinv_all(void);
232 void fa526_idcache_wbinv_range(vm_offset_t start, vm_size_t end);
236 #if defined(CPU_ARM9) || defined(CPU_ARM9E)
237 void arm9_setttb (u_int);
238 void arm9_tlb_flushID_SE (u_int va);
239 void arm9_context_switch (void);
242 #if defined(CPU_ARM9)
243 void arm9_icache_sync_range (vm_offset_t, vm_size_t);
245 void arm9_dcache_wbinv_all (void);
246 void arm9_dcache_wbinv_range (vm_offset_t, vm_size_t);
247 void arm9_dcache_inv_range (vm_offset_t, vm_size_t);
248 void arm9_dcache_wb_range (vm_offset_t, vm_size_t);
250 void arm9_idcache_wbinv_all (void);
251 void arm9_idcache_wbinv_range (vm_offset_t, vm_size_t);
253 void arm9_setup (void);
255 extern unsigned arm9_dcache_sets_max;
256 extern unsigned arm9_dcache_sets_inc;
257 extern unsigned arm9_dcache_index_max;
258 extern unsigned arm9_dcache_index_inc;
261 #if defined(CPU_ARM9E)
262 void arm10_setup (void);
264 u_int sheeva_control_ext (u_int, u_int);
265 void sheeva_cpu_sleep (int);
266 void sheeva_setttb (u_int);
267 void sheeva_dcache_wbinv_range (vm_offset_t, vm_size_t);
268 void sheeva_dcache_inv_range (vm_offset_t, vm_size_t);
269 void sheeva_dcache_wb_range (vm_offset_t, vm_size_t);
270 void sheeva_idcache_wbinv_range (vm_offset_t, vm_size_t);
272 void sheeva_l2cache_wbinv_range (vm_offset_t, vm_size_t);
273 void sheeva_l2cache_inv_range (vm_offset_t, vm_size_t);
274 void sheeva_l2cache_wb_range (vm_offset_t, vm_size_t);
275 void sheeva_l2cache_wbinv_all (void);
278 #if defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B) || defined(CPU_KRAIT)
279 void armv7_cpu_sleep (int);
280 void armv7_setup (void);
282 void cortexa_setup (void);
284 #if defined(CPU_MV_PJ4B)
285 void pj4b_config (void);
286 void pj4bv7_setup (void);
289 #if defined(CPU_ARM1176)
290 void arm11x6_setup (void);
291 void arm11x6_sleep (int); /* no ref. for errata */
294 #if defined(CPU_ARM9E)
295 void armv5_ec_setttb(u_int);
297 void armv5_ec_icache_sync_range(vm_offset_t, vm_size_t);
299 void armv5_ec_dcache_wbinv_all(void);
300 void armv5_ec_dcache_wbinv_range(vm_offset_t, vm_size_t);
301 void armv5_ec_dcache_inv_range(vm_offset_t, vm_size_t);
302 void armv5_ec_dcache_wb_range(vm_offset_t, vm_size_t);
304 void armv5_ec_idcache_wbinv_all(void);
305 void armv5_ec_idcache_wbinv_range(vm_offset_t, vm_size_t);
308 #if defined(CPU_ARM9) || defined(CPU_ARM9E) || \
309 defined(CPU_FA526) || \
310 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_81342)
312 void armv4_tlb_flushID (void);
313 void armv4_tlb_flushD (void);
314 void armv4_tlb_flushD_SE (u_int va);
316 void armv4_drain_writebuf (void);
317 void armv4_idcache_inv_all (void);
320 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_81342)
321 void xscale_cpwait (void);
323 void xscale_cpu_sleep (int mode);
325 u_int xscale_control (u_int clear, u_int bic);
327 void xscale_setttb (u_int ttb);
329 void xscale_tlb_flushID_SE (u_int va);
331 void xscale_cache_flushID (void);
332 void xscale_cache_flushI (void);
333 void xscale_cache_flushD (void);
334 void xscale_cache_flushD_SE (u_int entry);
336 void xscale_cache_cleanID (void);
337 void xscale_cache_cleanD (void);
338 void xscale_cache_cleanD_E (u_int entry);
340 void xscale_cache_clean_minidata (void);
342 void xscale_cache_purgeID (void);
343 void xscale_cache_purgeID_E (u_int entry);
344 void xscale_cache_purgeD (void);
345 void xscale_cache_purgeD_E (u_int entry);
347 void xscale_cache_syncI (void);
348 void xscale_cache_cleanID_rng (vm_offset_t start, vm_size_t end);
349 void xscale_cache_cleanD_rng (vm_offset_t start, vm_size_t end);
350 void xscale_cache_purgeID_rng (vm_offset_t start, vm_size_t end);
351 void xscale_cache_purgeD_rng (vm_offset_t start, vm_size_t end);
352 void xscale_cache_syncI_rng (vm_offset_t start, vm_size_t end);
353 void xscale_cache_flushD_rng (vm_offset_t start, vm_size_t end);
355 void xscale_context_switch (void);
357 void xscale_setup (void);
358 #endif /* CPU_XSCALE_PXA2X0 */
360 #ifdef CPU_XSCALE_81342
362 void xscalec3_l2cache_purge (void);
363 void xscalec3_cache_purgeID (void);
364 void xscalec3_cache_purgeD (void);
365 void xscalec3_cache_cleanID (void);
366 void xscalec3_cache_cleanD (void);
367 void xscalec3_cache_syncI (void);
369 void xscalec3_cache_purgeID_rng (vm_offset_t start, vm_size_t end);
370 void xscalec3_cache_purgeD_rng (vm_offset_t start, vm_size_t end);
371 void xscalec3_cache_cleanID_rng (vm_offset_t start, vm_size_t end);
372 void xscalec3_cache_cleanD_rng (vm_offset_t start, vm_size_t end);
373 void xscalec3_cache_syncI_rng (vm_offset_t start, vm_size_t end);
375 void xscalec3_l2cache_flush_rng (vm_offset_t, vm_size_t);
376 void xscalec3_l2cache_clean_rng (vm_offset_t start, vm_size_t end);
377 void xscalec3_l2cache_purge_rng (vm_offset_t start, vm_size_t end);
380 void xscalec3_setttb (u_int ttb);
381 void xscalec3_context_switch (void);
383 #endif /* CPU_XSCALE_81342 */
386 * Macros for manipulating CPU interrupts
389 #define __ARM_INTR_BITS (PSR_I | PSR_F)
391 #define __ARM_INTR_BITS (PSR_I | PSR_F | PSR_A)
394 static __inline uint32_t
395 __set_cpsr(uint32_t bic, uint32_t eor)
400 "mrs %0, cpsr\n" /* Get the CPSR */
401 "bic %1, %0, %2\n" /* Clear bits */
402 "eor %1, %1, %3\n" /* XOR bits */
403 "msr cpsr_xc, %1\n" /* Set the CPSR */
404 : "=&r" (ret), "=&r" (tmp)
405 : "r" (bic), "r" (eor) : "memory");
410 static __inline uint32_t
411 disable_interrupts(uint32_t mask)
414 return (__set_cpsr(mask & __ARM_INTR_BITS, mask & __ARM_INTR_BITS));
417 static __inline uint32_t
418 enable_interrupts(uint32_t mask)
421 return (__set_cpsr(mask & __ARM_INTR_BITS, 0));
424 static __inline uint32_t
425 restore_interrupts(uint32_t old_cpsr)
428 return (__set_cpsr(__ARM_INTR_BITS, old_cpsr & __ARM_INTR_BITS));
431 static __inline register_t
435 return (disable_interrupts(PSR_I | PSR_F));
439 intr_restore(register_t s)
442 restore_interrupts(s);
444 #undef __ARM_INTR_BITS
447 * Functions to manipulate cpu r13
448 * (in arm/arm32/setstack.S)
451 void set_stackptr (u_int mode, u_int address);
452 u_int get_stackptr (u_int mode);
455 * CPU functions from locore.S
458 void cpu_reset (void) __attribute__((__noreturn__));
461 * Cache info variables.
464 /* PRIMARY CACHE VARIABLES */
465 extern int arm_picache_size;
466 extern int arm_picache_line_size;
467 extern int arm_picache_ways;
469 extern int arm_pdcache_size; /* and unified */
470 extern int arm_pdcache_line_size;
471 extern int arm_pdcache_ways;
473 extern int arm_pcache_type;
474 extern int arm_pcache_unified;
476 extern int arm_dcache_align;
477 extern int arm_dcache_align_mask;
479 extern u_int arm_cache_level;
480 extern u_int arm_cache_loc;
481 extern u_int arm_cache_type[14];
490 * This matches the instruction used by GDB for software
497 #endif /* _MACHINE_CPUFUNC_H_ */
499 /* End of cpufunc.h */