2 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
3 * Copyright 2014 Michal Meloun <meloun@miracle.cz>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 /* $NetBSD: cpu.h,v 1.2 2001/02/23 21:23:52 reinoud Exp $ */
32 #include <machine/armreg.h>
33 #include <machine/frame.h>
38 #include <machine/atomic.h>
39 #include <machine/cpufunc.h>
40 #include <machine/cpuinfo.h>
41 #include <machine/sysreg.h>
44 * Some kernel modules (dtrace all for example) are compiled
45 * unconditionally with -DSMP. Although it looks like a bug,
46 * handle this case here and in #elif condition in ARM_SMP_UP macro.
48 #if __ARM_ARCH <= 6 && defined(SMP) && !defined(KLD_MODULE)
49 #error SMP option is not supported on ARMv6
52 #if __ARM_ARCH <= 6 && defined(SMP_ON_UP)
53 #error SMP_ON_UP option is only supported on ARMv7+ CPUs
56 #if !defined(SMP) && defined(SMP_ON_UP)
57 #error SMP option must be defined for SMP_ON_UP option
60 #define CPU_ASID_KERNEL 0
62 #if defined(SMP_ON_UP)
63 #define ARM_SMP_UP(smp_code, up_code) \
65 if (cpuinfo.mp_ext != 0) { \
71 #elif defined(SMP) && __ARM_ARCH > 6
72 #define ARM_SMP_UP(smp_code, up_code) \
77 #define ARM_SMP_UP(smp_code, up_code) \
83 void dcache_wbinv_poc_all(void); /* !!! NOT SMP coherent function !!! */
84 vm_offset_t dcache_wb_pou_checked(vm_offset_t, vm_size_t);
85 vm_offset_t icache_inv_pou_checked(vm_offset_t, vm_size_t);
89 #define PMU_OVSR_C 0x80000000 /* Cycle Counter */
90 extern uint32_t ccnt_hi[MAXCPU];
91 extern int pmu_attched;
94 #define sev() __asm __volatile("sev" : : : "memory")
95 #define wfe() __asm __volatile("wfe" : : : "memory")
98 * Macros to generate CP15 (system control processor) read/write functions.
102 #define _RF0(fname, aname...) \
103 static __inline uint32_t \
107 __asm __volatile("mrc\t" _FX(aname): "=r" (reg)); \
111 #define _R64F0(fname, aname) \
112 static __inline uint64_t \
116 __asm __volatile("mrrc\t" _FX(aname): "=r" (reg)); \
120 #define _WF0(fname, aname...) \
121 static __inline void \
124 __asm __volatile("mcr\t" _FX(aname)); \
127 #define _WF1(fname, aname...) \
128 static __inline void \
129 fname(uint32_t reg) \
131 __asm __volatile("mcr\t" _FX(aname):: "r" (reg)); \
134 #define _W64F1(fname, aname...) \
135 static __inline void \
136 fname(uint64_t reg) \
138 __asm __volatile("mcrr\t" _FX(aname):: "r" (reg)); \
142 * Raw CP15 maintenance operations
143 * !!! not for external use !!!
148 _WF0(_CP15_TLBIALL, CP15_TLBIALL) /* Invalidate entire unified TLB */
149 #if __ARM_ARCH >= 7 && defined(SMP)
150 _WF0(_CP15_TLBIALLIS, CP15_TLBIALLIS) /* Invalidate entire unified TLB IS */
152 _WF1(_CP15_TLBIASID, CP15_TLBIASID(%0)) /* Invalidate unified TLB by ASID */
153 #if __ARM_ARCH >= 7 && defined(SMP)
154 _WF1(_CP15_TLBIASIDIS, CP15_TLBIASIDIS(%0)) /* Invalidate unified TLB by ASID IS */
156 _WF1(_CP15_TLBIMVAA, CP15_TLBIMVAA(%0)) /* Invalidate unified TLB by MVA, all ASID */
157 #if __ARM_ARCH >= 7 && defined(SMP)
158 _WF1(_CP15_TLBIMVAAIS, CP15_TLBIMVAAIS(%0)) /* Invalidate unified TLB by MVA, all ASID IS */
160 _WF1(_CP15_TLBIMVA, CP15_TLBIMVA(%0)) /* Invalidate unified TLB by MVA */
162 _WF1(_CP15_TTB_SET, CP15_TTBR0(%0))
164 /* Cache and Branch predictor */
166 _WF0(_CP15_BPIALL, CP15_BPIALL) /* Branch predictor invalidate all */
167 #if __ARM_ARCH >= 7 && defined(SMP)
168 _WF0(_CP15_BPIALLIS, CP15_BPIALLIS) /* Branch predictor invalidate all IS */
170 _WF1(_CP15_BPIMVA, CP15_BPIMVA(%0)) /* Branch predictor invalidate by MVA */
171 _WF1(_CP15_DCCIMVAC, CP15_DCCIMVAC(%0)) /* Data cache clean and invalidate by MVA PoC */
172 _WF1(_CP15_DCCISW, CP15_DCCISW(%0)) /* Data cache clean and invalidate by set/way */
173 _WF1(_CP15_DCCMVAC, CP15_DCCMVAC(%0)) /* Data cache clean by MVA PoC */
175 _WF1(_CP15_DCCMVAU, CP15_DCCMVAU(%0)) /* Data cache clean by MVA PoU */
177 _WF1(_CP15_DCCSW, CP15_DCCSW(%0)) /* Data cache clean by set/way */
178 _WF1(_CP15_DCIMVAC, CP15_DCIMVAC(%0)) /* Data cache invalidate by MVA PoC */
179 _WF1(_CP15_DCISW, CP15_DCISW(%0)) /* Data cache invalidate by set/way */
180 _WF0(_CP15_ICIALLU, CP15_ICIALLU) /* Instruction cache invalidate all PoU */
181 #if __ARM_ARCH >= 7 && defined(SMP)
182 _WF0(_CP15_ICIALLUIS, CP15_ICIALLUIS) /* Instruction cache invalidate all PoU IS */
184 _WF1(_CP15_ICIMVAU, CP15_ICIMVAU(%0)) /* Instruction cache invalidate */
187 * Publicly accessible functions
190 /* CP14 Debug Registers */
191 _RF0(cp14_dbgdidr_get, CP14_DBGDIDR(%0))
192 _RF0(cp14_dbgprsr_get, CP14_DBGPRSR(%0))
193 _RF0(cp14_dbgoslsr_get, CP14_DBGOSLSR(%0))
194 _RF0(cp14_dbgosdlr_get, CP14_DBGOSDLR(%0))
195 _RF0(cp14_dbgdscrint_get, CP14_DBGDSCRint(%0))
197 _WF1(cp14_dbgdscr_v6_set, CP14_DBGDSCRext_V6(%0))
198 _WF1(cp14_dbgdscr_v7_set, CP14_DBGDSCRext_V7(%0))
199 _WF1(cp14_dbgvcr_set, CP14_DBGVCR(%0))
200 _WF1(cp14_dbgoslar_set, CP14_DBGOSLAR(%0))
202 /* Various control registers */
204 _RF0(cp15_cpacr_get, CP15_CPACR(%0))
205 _WF1(cp15_cpacr_set, CP15_CPACR(%0))
206 _RF0(cp15_dfsr_get, CP15_DFSR(%0))
207 _RF0(cp15_ifsr_get, CP15_IFSR(%0))
208 _WF1(cp15_prrr_set, CP15_PRRR(%0))
209 _WF1(cp15_nmrr_set, CP15_NMRR(%0))
210 _RF0(cp15_ttbr_get, CP15_TTBR0(%0))
211 _RF0(cp15_dfar_get, CP15_DFAR(%0))
213 _RF0(cp15_ifar_get, CP15_IFAR(%0))
214 _RF0(cp15_l2ctlr_get, CP15_L2CTLR(%0))
216 _RF0(cp15_actlr_get, CP15_ACTLR(%0))
217 _WF1(cp15_actlr_set, CP15_ACTLR(%0))
218 _WF1(cp15_ats1cpr_set, CP15_ATS1CPR(%0))
219 _WF1(cp15_ats1cpw_set, CP15_ATS1CPW(%0))
220 _WF1(cp15_ats1cur_set, CP15_ATS1CUR(%0))
221 _WF1(cp15_ats1cuw_set, CP15_ATS1CUW(%0))
222 _RF0(cp15_par_get, CP15_PAR(%0))
223 _RF0(cp15_sctlr_get, CP15_SCTLR(%0))
225 /*CPU id registers */
226 _RF0(cp15_midr_get, CP15_MIDR(%0))
227 _RF0(cp15_ctr_get, CP15_CTR(%0))
228 _RF0(cp15_tcmtr_get, CP15_TCMTR(%0))
229 _RF0(cp15_tlbtr_get, CP15_TLBTR(%0))
230 _RF0(cp15_mpidr_get, CP15_MPIDR(%0))
231 _RF0(cp15_revidr_get, CP15_REVIDR(%0))
232 _RF0(cp15_ccsidr_get, CP15_CCSIDR(%0))
233 _RF0(cp15_clidr_get, CP15_CLIDR(%0))
234 _RF0(cp15_aidr_get, CP15_AIDR(%0))
235 _WF1(cp15_csselr_set, CP15_CSSELR(%0))
236 _RF0(cp15_id_pfr0_get, CP15_ID_PFR0(%0))
237 _RF0(cp15_id_pfr1_get, CP15_ID_PFR1(%0))
238 _RF0(cp15_id_dfr0_get, CP15_ID_DFR0(%0))
239 _RF0(cp15_id_afr0_get, CP15_ID_AFR0(%0))
240 _RF0(cp15_id_mmfr0_get, CP15_ID_MMFR0(%0))
241 _RF0(cp15_id_mmfr1_get, CP15_ID_MMFR1(%0))
242 _RF0(cp15_id_mmfr2_get, CP15_ID_MMFR2(%0))
243 _RF0(cp15_id_mmfr3_get, CP15_ID_MMFR3(%0))
244 _RF0(cp15_id_isar0_get, CP15_ID_ISAR0(%0))
245 _RF0(cp15_id_isar1_get, CP15_ID_ISAR1(%0))
246 _RF0(cp15_id_isar2_get, CP15_ID_ISAR2(%0))
247 _RF0(cp15_id_isar3_get, CP15_ID_ISAR3(%0))
248 _RF0(cp15_id_isar4_get, CP15_ID_ISAR4(%0))
249 _RF0(cp15_id_isar5_get, CP15_ID_ISAR5(%0))
250 _RF0(cp15_cbar_get, CP15_CBAR(%0))
252 /* Performance Monitor registers */
254 #if __ARM_ARCH == 6 && defined(CPU_ARM1176)
255 _RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0))
256 _WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0))
257 _RF0(cp15_pmcr_get, CP15_PMCR(%0))
258 _WF1(cp15_pmcr_set, CP15_PMCR(%0))
259 _RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0))
260 _WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0))
262 _RF0(cp15_pmcr_get, CP15_PMCR(%0))
263 _WF1(cp15_pmcr_set, CP15_PMCR(%0))
264 _RF0(cp15_pmcnten_get, CP15_PMCNTENSET(%0))
265 _WF1(cp15_pmcnten_set, CP15_PMCNTENSET(%0))
266 _WF1(cp15_pmcnten_clr, CP15_PMCNTENCLR(%0))
267 _RF0(cp15_pmovsr_get, CP15_PMOVSR(%0))
268 _WF1(cp15_pmovsr_set, CP15_PMOVSR(%0))
269 _WF1(cp15_pmswinc_set, CP15_PMSWINC(%0))
270 _RF0(cp15_pmselr_get, CP15_PMSELR(%0))
271 _WF1(cp15_pmselr_set, CP15_PMSELR(%0))
272 _RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0))
273 _WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0))
274 _RF0(cp15_pmxevtyper_get, CP15_PMXEVTYPER(%0))
275 _WF1(cp15_pmxevtyper_set, CP15_PMXEVTYPER(%0))
276 _RF0(cp15_pmxevcntr_get, CP15_PMXEVCNTRR(%0))
277 _WF1(cp15_pmxevcntr_set, CP15_PMXEVCNTRR(%0))
278 _RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0))
279 _WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0))
280 _RF0(cp15_pminten_get, CP15_PMINTENSET(%0))
281 _WF1(cp15_pminten_set, CP15_PMINTENSET(%0))
282 _WF1(cp15_pminten_clr, CP15_PMINTENCLR(%0))
285 _RF0(cp15_tpidrurw_get, CP15_TPIDRURW(%0))
286 _WF1(cp15_tpidrurw_set, CP15_TPIDRURW(%0))
287 _RF0(cp15_tpidruro_get, CP15_TPIDRURO(%0))
288 _WF1(cp15_tpidruro_set, CP15_TPIDRURO(%0))
289 _RF0(cp15_tpidrpwr_get, CP15_TPIDRPRW(%0))
290 _WF1(cp15_tpidrpwr_set, CP15_TPIDRPRW(%0))
292 /* Generic Timer registers - only use when you know the hardware is available */
293 _RF0(cp15_cntfrq_get, CP15_CNTFRQ(%0))
294 _WF1(cp15_cntfrq_set, CP15_CNTFRQ(%0))
295 _RF0(cp15_cntkctl_get, CP15_CNTKCTL(%0))
296 _WF1(cp15_cntkctl_set, CP15_CNTKCTL(%0))
297 _RF0(cp15_cntp_tval_get, CP15_CNTP_TVAL(%0))
298 _WF1(cp15_cntp_tval_set, CP15_CNTP_TVAL(%0))
299 _RF0(cp15_cntp_ctl_get, CP15_CNTP_CTL(%0))
300 _WF1(cp15_cntp_ctl_set, CP15_CNTP_CTL(%0))
301 _RF0(cp15_cntv_tval_get, CP15_CNTV_TVAL(%0))
302 _WF1(cp15_cntv_tval_set, CP15_CNTV_TVAL(%0))
303 _RF0(cp15_cntv_ctl_get, CP15_CNTV_CTL(%0))
304 _WF1(cp15_cntv_ctl_set, CP15_CNTV_CTL(%0))
305 _RF0(cp15_cnthctl_get, CP15_CNTHCTL(%0))
306 _WF1(cp15_cnthctl_set, CP15_CNTHCTL(%0))
307 _RF0(cp15_cnthp_tval_get, CP15_CNTHP_TVAL(%0))
308 _WF1(cp15_cnthp_tval_set, CP15_CNTHP_TVAL(%0))
309 _RF0(cp15_cnthp_ctl_get, CP15_CNTHP_CTL(%0))
310 _WF1(cp15_cnthp_ctl_set, CP15_CNTHP_CTL(%0))
312 _R64F0(cp15_cntpct_get, CP15_CNTPCT(%Q0, %R0))
313 _R64F0(cp15_cntvct_get, CP15_CNTVCT(%Q0, %R0))
314 _R64F0(cp15_cntp_cval_get, CP15_CNTP_CVAL(%Q0, %R0))
315 _W64F1(cp15_cntp_cval_set, CP15_CNTP_CVAL(%Q0, %R0))
316 _R64F0(cp15_cntv_cval_get, CP15_CNTV_CVAL(%Q0, %R0))
317 _W64F1(cp15_cntv_cval_set, CP15_CNTV_CVAL(%Q0, %R0))
318 _R64F0(cp15_cntvoff_get, CP15_CNTVOFF(%Q0, %R0))
319 _W64F1(cp15_cntvoff_set, CP15_CNTVOFF(%Q0, %R0))
320 _R64F0(cp15_cnthp_cval_get, CP15_CNTHP_CVAL(%Q0, %R0))
321 _W64F1(cp15_cnthp_cval_set, CP15_CNTHP_CVAL(%Q0, %R0))
329 * TLB maintenance operations.
332 /* Local (i.e. not broadcasting ) operations. */
334 /* Flush all TLB entries (even global). */
336 tlb_flush_all_local(void)
344 /* Flush all not global TLB entries. */
346 tlb_flush_all_ng_local(void)
350 _CP15_TLBIASID(CPU_ASID_KERNEL);
354 /* Flush single TLB entry (even global). */
356 tlb_flush_local(vm_offset_t va)
359 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
362 _CP15_TLBIMVA(va | CPU_ASID_KERNEL);
366 /* Flush range of TLB entries (even global). */
368 tlb_flush_range_local(vm_offset_t va, vm_size_t size)
370 vm_offset_t eva = va + size;
372 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
373 KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__,
377 for (; va < eva; va += PAGE_SIZE)
378 _CP15_TLBIMVA(va | CPU_ASID_KERNEL);
382 /* Broadcasting operations. */
383 #if __ARM_ARCH >= 7 && defined(SMP)
398 tlb_flush_all_ng(void)
403 _CP15_TLBIASIDIS(CPU_ASID_KERNEL),
404 _CP15_TLBIASID(CPU_ASID_KERNEL)
410 tlb_flush(vm_offset_t va)
413 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
417 _CP15_TLBIMVAAIS(va),
418 _CP15_TLBIMVA(va | CPU_ASID_KERNEL)
424 tlb_flush_range(vm_offset_t va, vm_size_t size)
426 vm_offset_t eva = va + size;
428 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
429 KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__,
435 for (; va < eva; va += PAGE_SIZE)
436 _CP15_TLBIMVAAIS(va);
439 for (; va < eva; va += PAGE_SIZE)
440 _CP15_TLBIMVA(va | CPU_ASID_KERNEL);
445 #else /* __ARM_ARCH < 7 */
447 #define tlb_flush_all() tlb_flush_all_local()
448 #define tlb_flush_all_ng() tlb_flush_all_ng_local()
449 #define tlb_flush(va) tlb_flush_local(va)
450 #define tlb_flush_range(va, size) tlb_flush_range_local(va, size)
452 #endif /* __ARM_ARCH < 7 */
455 * Cache maintenance operations.
458 /* Sync I and D caches to PoU */
460 icache_sync(vm_offset_t va, vm_size_t size)
462 vm_offset_t eva = va + size;
465 va &= ~cpuinfo.dcache_line_mask;
467 for ( ; va < eva; va += cpuinfo.dcache_line_size) {
483 /* Invalidate I cache */
496 /* Invalidate branch predictor buffer */
509 /* Write back D-cache to PoU */
511 dcache_wb_pou(vm_offset_t va, vm_size_t size)
513 vm_offset_t eva = va + size;
516 va &= ~cpuinfo.dcache_line_mask;
517 for ( ; va < eva; va += cpuinfo.dcache_line_size) {
528 * Invalidate D-cache to PoC
530 * Caches are invalidated from outermost to innermost as fresh cachelines
531 * flow in this direction. In given range, if there was no dirty cacheline
532 * in any cache before, no stale cacheline should remain in them after this
533 * operation finishes.
536 dcache_inv_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
538 vm_offset_t eva = va + size;
541 /* invalidate L2 first */
542 cpu_l2cache_inv_range(pa, size);
545 va &= ~cpuinfo.dcache_line_mask;
546 for ( ; va < eva; va += cpuinfo.dcache_line_size) {
553 * Discard D-cache lines to PoC, prior to overwrite by DMA engine.
555 * Normal invalidation does L2 then L1 to ensure that stale data from L2 doesn't
556 * flow into L1 while invalidating. This routine is intended to be used only
557 * when invalidating a buffer before a DMA operation loads new data into memory.
558 * The concern in this case is that dirty lines are not evicted to main memory,
559 * overwriting the DMA data. For that reason, the L1 is done first to ensure
560 * that an evicted L1 line doesn't flow to L2 after the L2 has been cleaned.
563 dcache_inv_poc_dma(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
565 vm_offset_t eva = va + size;
567 /* invalidate L1 first */
569 va &= ~cpuinfo.dcache_line_mask;
570 for ( ; va < eva; va += cpuinfo.dcache_line_size) {
576 cpu_l2cache_inv_range(pa, size);
580 * Write back D-cache to PoC
582 * Caches are written back from innermost to outermost as dirty cachelines
583 * flow in this direction. In given range, no dirty cacheline should remain
584 * in any cache after this operation finishes.
587 dcache_wb_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
589 vm_offset_t eva = va + size;
592 va &= ~cpuinfo.dcache_line_mask;
593 for ( ; va < eva; va += cpuinfo.dcache_line_size) {
598 cpu_l2cache_wb_range(pa, size);
601 /* Write back and invalidate D-cache to PoC */
603 dcache_wbinv_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size)
606 vm_offset_t eva = sva + size;
609 /* write back L1 first */
610 va = sva & ~cpuinfo.dcache_line_mask;
611 for ( ; va < eva; va += cpuinfo.dcache_line_size) {
616 /* then write back and invalidate L2 */
617 cpu_l2cache_wbinv_range(pa, size);
619 /* then invalidate L1 */
620 va = sva & ~cpuinfo.dcache_line_mask;
621 for ( ; va < eva; va += cpuinfo.dcache_line_size) {
627 /* Set TTB0 register */
629 cp15_ttbr_set(uint32_t reg)
637 tlb_flush_all_ng_local();
641 * Functions for address checking:
643 * cp15_ats1cpr_check() ... check stage 1 privileged (PL1) read access
644 * cp15_ats1cpw_check() ... check stage 1 privileged (PL1) write access
645 * cp15_ats1cur_check() ... check stage 1 unprivileged (PL0) read access
646 * cp15_ats1cuw_check() ... check stage 1 unprivileged (PL0) write access
648 * They must be called while interrupts are disabled to get consistent result.
651 cp15_ats1cpr_check(vm_offset_t addr)
654 cp15_ats1cpr_set(addr);
656 return (cp15_par_get() & 0x01 ? EFAULT : 0);
660 cp15_ats1cpw_check(vm_offset_t addr)
663 cp15_ats1cpw_set(addr);
665 return (cp15_par_get() & 0x01 ? EFAULT : 0);
669 cp15_ats1cur_check(vm_offset_t addr)
672 cp15_ats1cur_set(addr);
674 return (cp15_par_get() & 0x01 ? EFAULT : 0);
678 cp15_ats1cuw_check(vm_offset_t addr)
681 cp15_ats1cuw_set(addr);
683 return (cp15_par_get() & 0x01 ? EFAULT : 0);
686 static __inline uint64_t
689 #if __ARM_ARCH > 6 || (__ARM_ARCH == 6 && defined(CPU_ARM1176))
690 #if (__ARM_ARCH > 6) && defined(DEV_PMU)
696 cpu = PCPU_GET(cpuid);
697 h = (uint64_t)atomic_load_acq_32(&ccnt_hi[cpu]);
698 l = cp15_pmccntr_get();
699 /* In case interrupts are disabled we need to check for overflow. */
700 r = cp15_pmovsr_get();
701 if (r & PMU_OVSR_C) {
702 atomic_add_32(&ccnt_hi[cpu], 1);
703 /* Clear the event. */
704 cp15_pmovsr_set(PMU_OVSR_C);
706 /* Make sure there was no wrap-around while we read the lo half. */
707 h2 = (uint64_t)atomic_load_acq_32(&ccnt_hi[cpu]);
709 l = cp15_pmccntr_get();
710 return (h2 << 32 | l);
713 return cp15_pmccntr_get();
714 #else /* No performance counters, so use nanotime(9). */
718 return (tv.tv_sec * (uint64_t)1000000000ull + tv.tv_nsec);
723 #define TRAPF_USERMODE(frame) ((frame->tf_spsr & PSR_MODE) == PSR_USR32_MODE)
725 #define TRAPF_PC(tfp) ((tfp)->tf_pc)
727 #define cpu_getstack(td) ((td)->td_frame->tf_usr_sp)
728 #define cpu_setstack(td, sp) ((td)->td_frame->tf_usr_sp = (sp))
729 #define cpu_spinwait() /* nothing */
730 #define cpu_lock_delay() DELAY(1)
733 #define ARM_VEC_ALL 0xffffffff
735 extern vm_offset_t vector_page;
738 * Params passed into initarm. If you change the size of this you will
739 * need to update locore.S to allocate more memory on the stack before
742 struct arm_boot_params {
743 register_t abp_size; /* Size of this structure */
744 register_t abp_r0; /* r0 from the boot loader */
745 register_t abp_r1; /* r1 from the boot loader */
746 register_t abp_r2; /* r2 from the boot loader */
747 register_t abp_r3; /* r3 from the boot loader */
748 vm_offset_t abp_physaddr; /* The kernel physical address */
749 vm_offset_t abp_pagetable; /* The early page table */
752 void arm_vector_init(vm_offset_t, int);
753 void fork_trampoline(void);
754 void identify_arm_cpu(void);
755 void *initarm(struct arm_boot_params *);
759 int badaddr_read(void *, size_t, void *);
760 #endif /* !MACHINE_CPU_H */