]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm/include/cpu-v6.h
Provide armv4/v5 implementations of several of the armv6 cache maintenance
[FreeBSD/FreeBSD.git] / sys / arm / include / cpu-v6.h
1 /*-
2  * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
3  * Copyright 2014 Michal Meloun <meloun@miracle.cz>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  */
29 #ifndef MACHINE_CPU_V6_H
30 #define MACHINE_CPU_V6_H
31
32 /* There are no user serviceable parts here, they may change without notice */
33 #ifndef _KERNEL
34 #error Only include this file in the kernel
35 #else
36
37 #include <machine/acle-compat.h>
38 #include "machine/atomic.h"
39 #include "machine/cpufunc.h"
40 #include "machine/cpuinfo.h"
41 #include "machine/sysreg.h"
42
43 #define CPU_ASID_KERNEL 0
44
45 vm_offset_t dcache_wb_pou_checked(vm_offset_t, vm_size_t);
46 vm_offset_t icache_inv_pou_checked(vm_offset_t, vm_size_t);
47
48 /*
49  * Macros to generate CP15 (system control processor) read/write functions.
50  */
51 #define _FX(s...) #s
52
53 #define _RF0(fname, aname...)                                           \
54 static __inline register_t                                              \
55 fname(void)                                                             \
56 {                                                                       \
57         register_t reg;                                                 \
58         __asm __volatile("mrc\t" _FX(aname): "=r" (reg));               \
59         return(reg);                                                    \
60 }
61
62 #define _R64F0(fname, aname)                                            \
63 static __inline uint64_t                                                \
64 fname(void)                                                             \
65 {                                                                       \
66         uint64_t reg;                                                   \
67         __asm __volatile("mrrc\t" _FX(aname): "=r" (reg));              \
68         return(reg);                                                    \
69 }
70
71 #define _WF0(fname, aname...)                                           \
72 static __inline void                                                    \
73 fname(void)                                                             \
74 {                                                                       \
75         __asm __volatile("mcr\t" _FX(aname));                           \
76 }
77
78 #define _WF1(fname, aname...)                                           \
79 static __inline void                                                    \
80 fname(register_t reg)                                                   \
81 {                                                                       \
82         __asm __volatile("mcr\t" _FX(aname):: "r" (reg));               \
83 }
84
85 #define _W64F1(fname, aname...)                                         \
86 static __inline void                                                    \
87 fname(uint64_t reg)                                                     \
88 {                                                                       \
89         __asm __volatile("mcrr\t" _FX(aname):: "r" (reg));              \
90 }
91
92 /*
93  * Raw CP15  maintenance operations
94  * !!! not for external use !!!
95  */
96
97 /* TLB */
98
99 _WF0(_CP15_TLBIALL, CP15_TLBIALL)               /* Invalidate entire unified TLB */
100 #if __ARM_ARCH >= 7 && defined SMP
101 _WF0(_CP15_TLBIALLIS, CP15_TLBIALLIS)           /* Invalidate entire unified TLB IS */
102 #endif
103 _WF1(_CP15_TLBIASID, CP15_TLBIASID(%0))         /* Invalidate unified TLB by ASID */
104 #if __ARM_ARCH >= 7 && defined SMP
105 _WF1(_CP15_TLBIASIDIS, CP15_TLBIASIDIS(%0))     /* Invalidate unified TLB by ASID IS */
106 #endif
107 _WF1(_CP15_TLBIMVAA, CP15_TLBIMVAA(%0))         /* Invalidate unified TLB by MVA, all ASID */
108 #if __ARM_ARCH >= 7 && defined SMP
109 _WF1(_CP15_TLBIMVAAIS, CP15_TLBIMVAAIS(%0))     /* Invalidate unified TLB by MVA, all ASID IS */
110 #endif
111 _WF1(_CP15_TLBIMVA, CP15_TLBIMVA(%0))           /* Invalidate unified TLB by MVA */
112
113 _WF1(_CP15_TTB_SET, CP15_TTBR0(%0))
114
115 /* Cache and Branch predictor */
116
117 _WF0(_CP15_BPIALL, CP15_BPIALL)                 /* Branch predictor invalidate all */
118 #if __ARM_ARCH >= 7 && defined SMP
119 _WF0(_CP15_BPIALLIS, CP15_BPIALLIS)             /* Branch predictor invalidate all IS */
120 #endif
121 _WF1(_CP15_BPIMVA, CP15_BPIMVA(%0))             /* Branch predictor invalidate by MVA */
122 _WF1(_CP15_DCCIMVAC, CP15_DCCIMVAC(%0))         /* Data cache clean and invalidate by MVA PoC */
123 _WF1(_CP15_DCCISW, CP15_DCCISW(%0))             /* Data cache clean and invalidate by set/way */
124 _WF1(_CP15_DCCMVAC, CP15_DCCMVAC(%0))           /* Data cache clean by MVA PoC */
125 #if __ARM_ARCH >= 7
126 _WF1(_CP15_DCCMVAU, CP15_DCCMVAU(%0))           /* Data cache clean by MVA PoU */
127 #endif
128 _WF1(_CP15_DCCSW, CP15_DCCSW(%0))               /* Data cache clean by set/way */
129 _WF1(_CP15_DCIMVAC, CP15_DCIMVAC(%0))           /* Data cache invalidate by MVA PoC */
130 _WF1(_CP15_DCISW, CP15_DCISW(%0))               /* Data cache invalidate by set/way */
131 _WF0(_CP15_ICIALLU, CP15_ICIALLU)               /* Instruction cache invalidate all PoU */
132 #if __ARM_ARCH >= 7 && defined SMP
133 _WF0(_CP15_ICIALLUIS, CP15_ICIALLUIS)           /* Instruction cache invalidate all PoU IS */
134 #endif
135 _WF1(_CP15_ICIMVAU, CP15_ICIMVAU(%0))           /* Instruction cache invalidate */
136
137 /*
138  * Publicly accessible functions
139  */
140
141 /* Various control registers */
142
143 _RF0(cp15_cpacr_get, CP15_CPACR(%0))
144 _WF1(cp15_cpacr_set, CP15_CPACR(%0))
145 _RF0(cp15_dfsr_get, CP15_DFSR(%0))
146 _RF0(cp15_ifsr_get, CP15_IFSR(%0))
147 _WF1(cp15_prrr_set, CP15_PRRR(%0))
148 _WF1(cp15_nmrr_set, CP15_NMRR(%0))
149 _RF0(cp15_ttbr_get, CP15_TTBR0(%0))
150 _RF0(cp15_dfar_get, CP15_DFAR(%0))
151 #if __ARM_ARCH >= 7
152 _RF0(cp15_ifar_get, CP15_IFAR(%0))
153 _RF0(cp15_l2ctlr_get, CP15_L2CTLR(%0))
154 #endif
155 /* ARMv6+ and XScale */
156 _RF0(cp15_actlr_get, CP15_ACTLR(%0))
157 _WF1(cp15_actlr_set, CP15_ACTLR(%0))
158 #if __ARM_ARCH >= 6
159 _WF1(cp15_ats1cpr_set, CP15_ATS1CPR(%0));
160 _RF0(cp15_par_get, CP15_PAR);
161 _RF0(cp15_sctlr_get, CP15_SCTLR(%0))
162 #endif
163
164 /*CPU id registers */
165 _RF0(cp15_midr_get, CP15_MIDR(%0))
166 _RF0(cp15_ctr_get, CP15_CTR(%0))
167 _RF0(cp15_tcmtr_get, CP15_TCMTR(%0))
168 _RF0(cp15_tlbtr_get, CP15_TLBTR(%0))
169 _RF0(cp15_mpidr_get, CP15_MPIDR(%0))
170 _RF0(cp15_revidr_get, CP15_REVIDR(%0))
171 _RF0(cp15_ccsidr_get, CP15_CCSIDR(%0))
172 _RF0(cp15_clidr_get, CP15_CLIDR(%0))
173 _RF0(cp15_aidr_get, CP15_AIDR(%0))
174 _WF1(cp15_csselr_set, CP15_CSSELR(%0))
175 _RF0(cp15_id_pfr0_get, CP15_ID_PFR0(%0))
176 _RF0(cp15_id_pfr1_get, CP15_ID_PFR1(%0))
177 _RF0(cp15_id_dfr0_get, CP15_ID_DFR0(%0))
178 _RF0(cp15_id_afr0_get, CP15_ID_AFR0(%0))
179 _RF0(cp15_id_mmfr0_get, CP15_ID_MMFR0(%0))
180 _RF0(cp15_id_mmfr1_get, CP15_ID_MMFR1(%0))
181 _RF0(cp15_id_mmfr2_get, CP15_ID_MMFR2(%0))
182 _RF0(cp15_id_mmfr3_get, CP15_ID_MMFR3(%0))
183 _RF0(cp15_id_isar0_get, CP15_ID_ISAR0(%0))
184 _RF0(cp15_id_isar1_get, CP15_ID_ISAR1(%0))
185 _RF0(cp15_id_isar2_get, CP15_ID_ISAR2(%0))
186 _RF0(cp15_id_isar3_get, CP15_ID_ISAR3(%0))
187 _RF0(cp15_id_isar4_get, CP15_ID_ISAR4(%0))
188 _RF0(cp15_id_isar5_get, CP15_ID_ISAR5(%0))
189 _RF0(cp15_cbar_get, CP15_CBAR(%0))
190
191 /* Performance Monitor registers */
192
193 #if __ARM_ARCH == 6 && defined(CPU_ARM1176)
194 _RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0))
195 _WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0))
196 _RF0(cp15_pmcr_get, CP15_PMCR(%0))
197 _WF1(cp15_pmcr_set, CP15_PMCR(%0))
198 _RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0))
199 _WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0))
200 #elif __ARM_ARCH > 6
201 _RF0(cp15_pmcr_get, CP15_PMCR(%0))
202 _WF1(cp15_pmcr_set, CP15_PMCR(%0))
203 _RF0(cp15_pmcnten_get, CP15_PMCNTENSET(%0))
204 _WF1(cp15_pmcnten_set, CP15_PMCNTENSET(%0))
205 _WF1(cp15_pmcnten_clr, CP15_PMCNTENCLR(%0))
206 _RF0(cp15_pmovsr_get, CP15_PMOVSR(%0))
207 _WF1(cp15_pmovsr_set, CP15_PMOVSR(%0))
208 _WF1(cp15_pmswinc_set, CP15_PMSWINC(%0))
209 _RF0(cp15_pmselr_get, CP15_PMSELR(%0))
210 _WF1(cp15_pmselr_set, CP15_PMSELR(%0))
211 _RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0))
212 _WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0))
213 _RF0(cp15_pmxevtyper_get, CP15_PMXEVTYPER(%0))
214 _WF1(cp15_pmxevtyper_set, CP15_PMXEVTYPER(%0))
215 _RF0(cp15_pmxevcntr_get, CP15_PMXEVCNTRR(%0))
216 _WF1(cp15_pmxevcntr_set, CP15_PMXEVCNTRR(%0))
217 _RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0))
218 _WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0))
219 _RF0(cp15_pminten_get, CP15_PMINTENSET(%0))
220 _WF1(cp15_pminten_set, CP15_PMINTENSET(%0))
221 _WF1(cp15_pminten_clr, CP15_PMINTENCLR(%0))
222 #endif
223
224 _RF0(cp15_tpidrurw_get, CP15_TPIDRURW(%0))
225 _WF1(cp15_tpidrurw_set, CP15_TPIDRURW(%0))
226 _RF0(cp15_tpidruro_get, CP15_TPIDRURO(%0))
227 _WF1(cp15_tpidruro_set, CP15_TPIDRURO(%0))
228 _RF0(cp15_tpidrpwr_get, CP15_TPIDRPRW(%0))
229 _WF1(cp15_tpidrpwr_set, CP15_TPIDRPRW(%0))
230
231 /* Generic Timer registers - only use when you know the hardware is available */
232 _RF0(cp15_cntfrq_get, CP15_CNTFRQ(%0))
233 _WF1(cp15_cntfrq_set, CP15_CNTFRQ(%0))
234 _RF0(cp15_cntkctl_get, CP15_CNTKCTL(%0))
235 _WF1(cp15_cntkctl_set, CP15_CNTKCTL(%0))
236 _RF0(cp15_cntp_tval_get, CP15_CNTP_TVAL(%0))
237 _WF1(cp15_cntp_tval_set, CP15_CNTP_TVAL(%0))
238 _RF0(cp15_cntp_ctl_get, CP15_CNTP_CTL(%0))
239 _WF1(cp15_cntp_ctl_set, CP15_CNTP_CTL(%0))
240 _RF0(cp15_cntv_tval_get, CP15_CNTV_TVAL(%0))
241 _WF1(cp15_cntv_tval_set, CP15_CNTV_TVAL(%0))
242 _RF0(cp15_cntv_ctl_get, CP15_CNTV_CTL(%0))
243 _WF1(cp15_cntv_ctl_set, CP15_CNTV_CTL(%0))
244 _RF0(cp15_cnthctl_get, CP15_CNTHCTL(%0))
245 _WF1(cp15_cnthctl_set, CP15_CNTHCTL(%0))
246 _RF0(cp15_cnthp_tval_get, CP15_CNTHP_TVAL(%0))
247 _WF1(cp15_cnthp_tval_set, CP15_CNTHP_TVAL(%0))
248 _RF0(cp15_cnthp_ctl_get, CP15_CNTHP_CTL(%0))
249 _WF1(cp15_cnthp_ctl_set, CP15_CNTHP_CTL(%0))
250
251 _R64F0(cp15_cntpct_get, CP15_CNTPCT(%Q0, %R0))
252 _R64F0(cp15_cntvct_get, CP15_CNTVCT(%Q0, %R0))
253 _R64F0(cp15_cntp_cval_get, CP15_CNTP_CVAL(%Q0, %R0))
254 _W64F1(cp15_cntp_cval_set, CP15_CNTP_CVAL(%Q0, %R0))
255 _R64F0(cp15_cntv_cval_get, CP15_CNTV_CVAL(%Q0, %R0))
256 _W64F1(cp15_cntv_cval_set, CP15_CNTV_CVAL(%Q0, %R0))
257 _R64F0(cp15_cntvoff_get, CP15_CNTVOFF(%Q0, %R0))
258 _W64F1(cp15_cntvoff_set, CP15_CNTVOFF(%Q0, %R0))
259 _R64F0(cp15_cnthp_cval_get, CP15_CNTHP_CVAL(%Q0, %R0))
260 _W64F1(cp15_cnthp_cval_set, CP15_CNTHP_CVAL(%Q0, %R0))
261
262 #undef  _FX
263 #undef  _RF0
264 #undef  _WF0
265 #undef  _WF1
266
267 #if __ARM_ARCH >= 6
268 /*
269  * Cache and TLB maintenance operations for armv6+ code.  The #else block
270  * provides armv4/v5 implementations for a few of these used in common code.
271  */
272
273 /*
274  * TLB maintenance operations.
275  */
276
277 /* Local (i.e. not broadcasting ) operations.  */
278
279 /* Flush all TLB entries (even global). */
280 static __inline void
281 tlb_flush_all_local(void)
282 {
283
284         dsb();
285         _CP15_TLBIALL();
286         dsb();
287 }
288
289 /* Flush all not global TLB entries. */
290 static __inline void
291 tlb_flush_all_ng_local(void)
292 {
293
294         dsb();
295         _CP15_TLBIASID(CPU_ASID_KERNEL);
296         dsb();
297 }
298
299 /* Flush single TLB entry (even global). */
300 static __inline void
301 tlb_flush_local(vm_offset_t va)
302 {
303
304         KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
305
306         dsb();
307         _CP15_TLBIMVA(va | CPU_ASID_KERNEL);
308         dsb();
309 }
310
311 /* Flush range of TLB entries (even global). */
312 static __inline void
313 tlb_flush_range_local(vm_offset_t va, vm_size_t size)
314 {
315         vm_offset_t eva = va + size;
316
317         KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
318         KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__,
319             size));
320
321         dsb();
322         for (; va < eva; va += PAGE_SIZE)
323                 _CP15_TLBIMVA(va | CPU_ASID_KERNEL);
324         dsb();
325 }
326
327 /* Broadcasting operations. */
328 #if __ARM_ARCH >= 7 && defined SMP
329
330 static __inline void
331 tlb_flush_all(void)
332 {
333
334         dsb();
335         _CP15_TLBIALLIS();
336         dsb();
337 }
338
339 static __inline void
340 tlb_flush_all_ng(void)
341 {
342
343         dsb();
344         _CP15_TLBIASIDIS(CPU_ASID_KERNEL);
345         dsb();
346 }
347
348 static __inline void
349 tlb_flush(vm_offset_t va)
350 {
351
352         KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
353
354         dsb();
355         _CP15_TLBIMVAAIS(va);
356         dsb();
357 }
358
359 static __inline void
360 tlb_flush_range(vm_offset_t va,  vm_size_t size)
361 {
362         vm_offset_t eva = va + size;
363
364         KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
365         KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__,
366             size));
367
368         dsb();
369         for (; va < eva; va += PAGE_SIZE)
370                 _CP15_TLBIMVAAIS(va);
371         dsb();
372 }
373 #else /* SMP */
374
375 #define tlb_flush_all()                 tlb_flush_all_local()
376 #define tlb_flush_all_ng()              tlb_flush_all_ng_local()
377 #define tlb_flush(va)                   tlb_flush_local(va)
378 #define tlb_flush_range(va, size)       tlb_flush_range_local(va, size)
379
380 #endif /* SMP */
381
382 /*
383  * Cache maintenance operations.
384  */
385
386 /*  Sync I and D caches to PoU */
387 static __inline void
388 icache_sync(vm_offset_t va, vm_size_t size)
389 {
390         vm_offset_t eva = va + size;
391
392         dsb();
393         va &= ~cpuinfo.dcache_line_mask;
394         for ( ; va < eva; va += cpuinfo.dcache_line_size) {
395 #if __ARM_ARCH >= 7 && defined SMP
396                 _CP15_DCCMVAU(va);
397 #else
398                 _CP15_DCCMVAC(va);
399 #endif
400         }
401         dsb();
402 #if __ARM_ARCH >= 7 && defined SMP
403         _CP15_ICIALLUIS();
404 #else
405         _CP15_ICIALLU();
406 #endif
407         dsb();
408         isb();
409 }
410
411 /*  Invalidate I cache */
412 static __inline void
413 icache_inv_all(void)
414 {
415 #if __ARM_ARCH >= 7 && defined SMP
416         _CP15_ICIALLUIS();
417 #else
418         _CP15_ICIALLU();
419 #endif
420         dsb();
421         isb();
422 }
423
424 /* Invalidate branch predictor buffer */
425 static __inline void
426 bpb_inv_all(void)
427 {
428 #if __ARM_ARCH >= 7 && defined SMP
429         _CP15_BPIALLIS();
430 #else
431         _CP15_BPIALL();
432 #endif
433         dsb();
434         isb();
435 }
436
437 /* Write back D-cache to PoU */
438 static __inline void
439 dcache_wb_pou(vm_offset_t va, vm_size_t size)
440 {
441         vm_offset_t eva = va + size;
442
443         dsb();
444         va &= ~cpuinfo.dcache_line_mask;
445         for ( ; va < eva; va += cpuinfo.dcache_line_size) {
446 #if __ARM_ARCH >= 7 && defined SMP
447                 _CP15_DCCMVAU(va);
448 #else
449                 _CP15_DCCMVAC(va);
450 #endif
451         }
452         dsb();
453 }
454
455 /*
456  * Invalidate D-cache to PoC
457  *
458  * Caches are invalidated from outermost to innermost as fresh cachelines
459  * flow in this direction. In given range, if there was no dirty cacheline
460  * in any cache before, no stale cacheline should remain in them after this
461  * operation finishes.
462  */
463 static __inline void
464 dcache_inv_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
465 {
466         vm_offset_t eva = va + size;
467
468         dsb();
469         /* invalidate L2 first */
470         cpu_l2cache_inv_range(pa, size);
471
472         /* then L1 */
473         va &= ~cpuinfo.dcache_line_mask;
474         for ( ; va < eva; va += cpuinfo.dcache_line_size) {
475                 _CP15_DCIMVAC(va);
476         }
477         dsb();
478 }
479
480 /*
481  * Discard D-cache lines to PoC, prior to overwrite by DMA engine.
482  *
483  * Normal invalidation does L2 then L1 to ensure that stale data from L2 doesn't
484  * flow into L1 while invalidating.  This routine is intended to be used only
485  * when invalidating a buffer before a DMA operation loads new data into memory.
486  * The concern in this case is that dirty lines are not evicted to main memory,
487  * overwriting the DMA data.  For that reason, the L1 is done first to ensure
488  * that an evicted L1 line doesn't flow to L2 after the L2 has been cleaned.
489  */
490 static __inline void
491 dcache_inv_poc_dma(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
492 {
493         vm_offset_t eva = va + size;
494
495         /* invalidate L1 first */
496         dsb();
497         va &= ~cpuinfo.dcache_line_mask;
498         for ( ; va < eva; va += cpuinfo.dcache_line_size) {
499                 _CP15_DCIMVAC(va);
500         }
501         dsb();
502
503         /* then L2 */
504         cpu_l2cache_inv_range(pa, size);
505 }
506
507 /*
508  * Write back D-cache to PoC
509  *
510  * Caches are written back from innermost to outermost as dirty cachelines
511  * flow in this direction. In given range, no dirty cacheline should remain
512  * in any cache after this operation finishes.
513  */
514 static __inline void
515 dcache_wb_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
516 {
517         vm_offset_t eva = va + size;
518
519         dsb();
520         va &= ~cpuinfo.dcache_line_mask;
521         for ( ; va < eva; va += cpuinfo.dcache_line_size) {
522                 _CP15_DCCMVAC(va);
523         }
524         dsb();
525
526         cpu_l2cache_wb_range(pa, size);
527 }
528
529 /* Write back and invalidate D-cache to PoC */
530 static __inline void
531 dcache_wbinv_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size)
532 {
533         vm_offset_t va;
534         vm_offset_t eva = sva + size;
535
536         dsb();
537         /* write back L1 first */
538         va = sva & ~cpuinfo.dcache_line_mask;
539         for ( ; va < eva; va += cpuinfo.dcache_line_size) {
540                 _CP15_DCCMVAC(va);
541         }
542         dsb();
543
544         /* then write back and invalidate L2 */
545         cpu_l2cache_wbinv_range(pa, size);
546
547         /* then invalidate L1 */
548         va = sva & ~cpuinfo.dcache_line_mask;
549         for ( ; va < eva; va += cpuinfo.dcache_line_size) {
550                 _CP15_DCIMVAC(va);
551         }
552         dsb();
553 }
554
555 /* Set TTB0 register */
556 static __inline void
557 cp15_ttbr_set(uint32_t reg)
558 {
559         dsb();
560         _CP15_TTB_SET(reg);
561         dsb();
562         _CP15_BPIALL();
563         dsb();
564         isb();
565         tlb_flush_all_ng_local();
566 }
567
568 #else /* ! __ARM_ARCH >= 6 */
569
570 /*
571  * armv4/5 compatibility shims.
572  *
573  * These functions provide armv4 cache maintenance using the new armv6 names.
574  * Included here are just the functions actually used now in common code; it may
575  * be necessary to add things here over time.
576  *
577  * The callers of the dcache functions expect these routines to handle address
578  * and size values which are not aligned to cacheline boundaries; the armv4 and
579  * armv5 asm code handles that.
580  */
581
582 static __inline void
583 dcache_inv_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
584 {
585
586         cpu_dcache_inv_range(va, size);
587         cpu_l2cache_inv_range(va, size);
588 }
589
590 static __inline void
591 dcache_inv_poc_dma(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
592 {
593
594         /* See armv6 code, above, for why we do L2 before L1 in this case. */
595         cpu_l2cache_inv_range(va, size);
596         cpu_dcache_inv_range(va, size);
597 }
598
599 static __inline void
600 dcache_wb_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
601 {
602
603         cpu_dcache_wb_range(va, size);
604         cpu_l2cache_wb_range(va, size);
605 }
606
607 #endif /* __ARM_ARCH >= 6 */
608
609 #endif /* _KERNEL */
610
611 #endif /* !MACHINE_CPU_V6_H */