2 * Copyright 2004-2014 Olivier Houchard <cognet@FreeBSD.org>
3 * Copyright 2012-2014 Ian Lepore <ian@FreeBSD.org>
4 * Copyright 2013-2014 Andrew Turner <andrew@FreeBSD.org>
5 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
6 * Copyright 2014 Michal Meloun <meloun@miracle.cz>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/syscall.h>
33 #include <machine/asm.h>
34 #include <machine/asmacros.h>
35 #include <machine/armreg.h>
36 #include <machine/sysreg.h>
37 #include <machine/cpuconf.h>
38 #include <machine/pte.h>
40 __FBSDID("$FreeBSD$");
43 #define PTE1_OFFSET L1_S_OFFSET
44 #define PTE1_SHIFT L1_S_SHIFT
45 #define PTE1_SIZE L1_S_SIZE
49 #if defined(__ARM_ARCH_7VE__) || defined(__clang__)
51 * HYP support is in bintuils >= 2.21 and gcc >= 4.9 defines __ARM_ARCH_7VE__
52 * when enabled. llvm >= 3.6 supports it too.
55 #define MSR_ELR_HYP(regnum) msr elr_hyp, lr
58 #define MSR_ELR_HYP(regnum) .word (0xe12ef300 | regnum)
59 #define ERET .word 0xe160006e
61 #endif /* __ARM_ARCH >= 7 */
63 /* A small statically-allocated stack used only during initarm() and AP startup. */
64 #define INIT_ARM_STACK_SIZE 2048
71 /* Leave HYP mode */ ;\
73 and r0, r0, #(PSR_MODE) /* Mode is in the low 5 bits of CPSR */ ;\
74 teq r0, #(PSR_HYP32_MODE) /* Hyp Mode? */ ;\
76 /* Ensure that IRQ, FIQ and Aborts will be disabled after eret */ ;\
78 bic r0, r0, #(PSR_MODE) ;\
79 orr r0, r0, #(PSR_SVC32_MODE) ;\
80 orr r0, r0, #(PSR_I | PSR_F | PSR_A) ;\
82 /* Exit hypervisor mode */ ;\
89 #endif /* __ARM_ARCH >= 7 */
92 * On entry for FreeBSD boot ABI:
93 * r0 - metadata pointer or 0 (boothowto on AT91's boot2)
94 * r1 - if (r0 == 0) then metadata pointer
95 * On entry for Linux boot ABI:
97 * r1 - machine type (passed as arg2 to initarm)
98 * r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
100 * For both types of boot we gather up the args, put them in a struct arm_boot_params
101 * structure and pass that to initarm.
106 STOP_UNWINDING /* Can't unwind into the bootloader! */
108 /* Make sure interrupts are disabled. */
111 mov r8, r0 /* 0 or boot mode from boot2 */
112 mov r9, r1 /* Save Machine type */
113 mov r10, r2 /* Save meta data */
114 mov r11, r3 /* Future expansion */
119 * Check whether data cache is enabled. If it is, then we know
120 * current tags are valid (not power-on garbage values) and there
121 * might be dirty lines that need cleaning. Disable cache to prevent
122 * new lines being allocated, then call wbinv_poc_all to clean it.
125 tst r7, #CPU_CONTROL_DC_ENABLE
126 blne dcache_wbinv_poc_all
128 /* ! Do not write to memory between wbinv and disabling cache ! */
131 * Now there are no dirty lines, but there may still be lines marked
132 * valid. Disable all caches and the MMU, and invalidate everything
133 * before setting up new page tables and re-enabling the mmu.
136 bic r7, #CPU_CONTROL_DC_ENABLE
137 bic r7, #CPU_CONTROL_MMU_ENABLE
138 bic r7, #CPU_CONTROL_IC_ENABLE
139 bic r7, #CPU_CONTROL_UNAL_ENABLE
140 bic r7, #CPU_CONTROL_BPRD_ENABLE
141 bic r7, #CPU_CONTROL_SW_ENABLE
142 orr r7, #CPU_CONTROL_AFLT_ENABLE
143 orr r7, #CPU_CONTROL_VECRELOC
147 bl dcache_inv_poc_all
153 * Build page table from scratch.
157 * Figure out the physical address we're loaded at by assuming this
158 * entry point code is in the first L1 section and so if we clear the
159 * offset bits of the pc that will give us the section-aligned load
160 * address, which remains in r5 throughout all the following code.
162 ldr r2, =(L1_S_OFFSET)
165 /* Find the delta between VA and PA, result stays in r0 throughout. */
167 bl translate_va_to_pa
170 * First map the entire 4GB address space as VA=PA. It's mapped as
171 * normal (cached) memory because it's for things like accessing the
172 * parameters passed in from the bootloader, which might be at any
173 * physical address, different for every platform.
181 * Next we do 64MiB starting at the physical load address, mapped to
182 * the VA the kernel is linked for.
185 ldr r2, =(KERNVIRTADDR)
189 /* Create a device mapping for early_printf if specified. */
190 #if defined(SOCDEV_PA) && defined(SOCDEV_VA)
194 bl build_device_pagetables
198 /* Transition the PC from physical to virtual addressing. */
202 /* Setup stack, clear BSS */
204 ldmia r1, {r1, r2, sp} /* Set initial stack and */
205 add sp, sp, #INIT_ARM_STACK_SIZE
206 sub r2, r2, r1 /* get zero init data */
209 str r3, [r1], #0x0004 /* get zero init data */
213 mov r1, #28 /* loader info size is 28 bytes also second arg */
214 subs sp, sp, r1 /* allocate arm_boot_params struct on stack */
215 mov r0, sp /* loader info pointer is first arg */
216 bic sp, sp, #7 /* align stack to 8 bytes */
217 str r1, [r0] /* Store length of loader info */
218 str r8, [r0, #4] /* Store r0 from boot loader */
219 str r9, [r0, #8] /* Store r1 from boot loader */
220 str r10, [r0, #12] /* store r2 from boot loader */
221 str r11, [r0, #16] /* store r3 from boot loader */
222 str r5, [r0, #20] /* store the physical address */
223 adr r4, Lpagetable /* load the pagetable address */
225 str r5, [r0, #24] /* store the pagetable address */
226 mov fp, #0 /* trace back starts here */
227 bl _C_LABEL(initarm) /* Off we go */
229 /* init arm will return the new stack pointer. */
232 bl _C_LABEL(mi_startup) /* call mi_startup()! */
234 ldr r0, =.Lmainreturned
239 #define VA_TO_PA_POINTER(name, table) \
245 * Returns the physical address of a magic va to pa pointer.
246 * r0 - The pagetable data pointer. This must be built using the
247 * VA_TO_PA_POINTER macro.
249 * VA_TO_PA_POINTER(Lpagetable, pagetable)
252 * bl translate_va_to_pa
253 * r0 will now contain the physical address of pagetable
259 /* At this point: r2 = VA - PA */
262 * Find the physical address of the table. After these two
266 * r0 = va(pagetable) - (VA - PA)
267 * = va(pagetable) - VA + PA
276 * r0 - the table base address
281 /* Setup TLB and MMU registers */
282 mcr CP15_TTBR0(r0) /* Set TTB */
284 mcr CP15_CONTEXTIDR(r0) /* Set ASID to 0 */
286 /* Set the Domain Access register */
287 mov r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
292 * Set TEX remap registers
293 * - All is set to uncacheable memory
300 mcr CP15_TLBIALL /* Flush TLB */
306 orr r0, r0, #CPU_CONTROL_MMU_ENABLE
307 orr r0, r0, #CPU_CONTROL_V6_EXTPAGE
309 orr r0, r0, #CPU_CONTROL_TR_ENABLE
311 orr r0, r0, #CPU_CONTROL_AF_ENABLE
315 mcr CP15_TLBIALL /* Flush TLB */
316 mcr CP15_BPIALL /* Flush Branch predictor */
325 * Init SMP coherent mode, enable caching and switch to final MMU table.
326 * Called with disabled caches
327 * r0 - The table base address
328 * r1 - clear bits for aux register
329 * r2 - set bits for aux register
331 ASENTRY_NP(reinit_mmu)
337 /* !! Be very paranoid here !! */
338 /* !! We cannot write single bit here !! */
340 #if 0 /* XXX writeback shouldn't be necessary */
341 /* Write back and invalidate all integrated caches */
342 bl dcache_wbinv_poc_all
344 bl dcache_inv_pou_all
350 /* Set auxiliary register */
352 bic r8, r7, r5 /* Mask bits */
353 eor r8, r8, r6 /* Set bits */
361 orr r7, #CPU_CONTROL_DC_ENABLE
362 orr r7, #CPU_CONTROL_IC_ENABLE
363 orr r7, #CPU_CONTROL_BPRD_ENABLE
367 mcr CP15_TTBR0(r4) /* Set new TTB */
371 mcr CP15_TLBIALL /* Flush TLB */
372 mcr CP15_BPIALL /* Flush Branch predictor */
376 #if 0 /* XXX writeback shouldn't be necessary */
377 /* Write back and invalidate all integrated caches */
378 bl dcache_wbinv_poc_all
380 bl dcache_inv_pou_all
391 * Builds the page table
392 * r0 - The table base address
393 * r1 - The physical address (trashed)
394 * r2 - The virtual address (trashed)
395 * r3 - The number of 1MiB sections
398 * Addresses must be 1MiB aligned
400 build_device_pagetables:
401 #if defined(ARM_NEW_PMAP)
402 ldr r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0
404 ldr r4, =(L1_TYPE_S|L1_S_AP(AP_KRW)|L1_SHARED)
406 ldr r4, =(L1_TYPE_S|L1_S_AP(AP_KRW))
410 /* Set the required page attributed */
411 #if defined(ARM_NEW_PMAP)
412 ldr r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0
414 ldr r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW)|L1_SHARED)
416 ldr r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
421 /* Move the virtual address to the correct bit location */
422 lsr r2, #(PTE1_SHIFT - 2)
428 add r1, r1, #(PTE1_SIZE)
434 VA_TO_PA_POINTER(Lpagetable, boot_pt1)
438 .word _edata /* Note that these three items are */
439 .word _ebss /* loaded with a single ldmia and */
440 .word svcstk /* must remain in order together. */
443 .asciz "main() returned"
448 .space INIT_ARM_STACK_SIZE * MAXCPU
451 * Memory for the initial pagetable. We are unable to place this in
452 * the bss as this will be cleared after the table is loaded.
454 .section ".init_pagetable"
455 .align 14 /* 16KiB aligned */
464 .word _C_LABEL(cpufuncs)
469 /* Make sure interrupts are disabled. */
474 /* Setup core, disable all caches. */
476 bic r0, #CPU_CONTROL_MMU_ENABLE
477 bic r0, #CPU_CONTROL_DC_ENABLE
478 bic r0, #CPU_CONTROL_IC_ENABLE
479 bic r0, #CPU_CONTROL_UNAL_ENABLE
480 bic r0, #CPU_CONTROL_BPRD_ENABLE
481 bic r0, #CPU_CONTROL_SW_ENABLE
482 orr r0, #CPU_CONTROL_AFLT_ENABLE
483 orr r0, #CPU_CONTROL_VECRELOC
488 /* Invalidate L1 cache I+D cache */
489 bl dcache_inv_pou_all
494 /* Find the delta between VA and PA */
496 bl translate_va_to_pa
500 adr r1, .Lstart+8 /* Get initstack pointer from */
501 ldr sp, [r1] /* startup data. */
502 mrc CP15_MPIDR(r0) /* Get processor id number. */
504 mov r1, #INIT_ARM_STACK_SIZE
505 mul r2, r1, r0 /* Point sp to initstack */
506 add sp, sp, r2 /* area for this processor. */
508 /* Switch to virtual addresses. */
511 mov fp, #0 /* trace back starts here */
512 bl _C_LABEL(init_secondary)/* Off we go, cpu id in r0. */
520 .asciz "init_secondary() returned"
526 /* XXX re-implement !!! */
528 bl dcache_wbinv_poc_all
530 ldr r4, .Lcpu_reset_address
539 * _cpu_reset_address contains the address to branch to, to complete
540 * the cpu reset after turning the MMU off
541 * This variable is provided by the hardware specific code
544 .word _C_LABEL(cpu_reset_address)
564 .global _C_LABEL(esym)
565 _C_LABEL(esym): .word _C_LABEL(end)
576 * Call the sigreturn system call.
578 * We have to load r7 manually rather than using
579 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
580 * correct. Using the alternative places esigcode at the address
581 * of the data rather than the address one past the data.
584 ldr r7, [pc, #12] /* Load SYS_sigreturn */
587 /* Well if that failed we better exit quick ! */
589 ldr r7, [pc, #8] /* Load SYS_exit */
592 /* Branch back to retry SYS_sigreturn */
599 .global _C_LABEL(esigcode)
605 .long esigcode-sigcode
607 /* End of locore.S */