1 /* $NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $ */
4 * Copyright 2011 Semihalf
5 * Copyright (C) 1994-1997 Mark Brinicombe
6 * Copyright (C) 1994 Brini
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by Brini.
20 * 4. The name of Brini may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <sys/syscall.h>
38 #include <machine/asm.h>
39 #include <machine/armreg.h>
40 #include <machine/cpuconf.h>
41 #include <machine/pte.h>
43 __FBSDID("$FreeBSD$");
45 /* What size should this really be ? It is only used by initarm() */
46 #define INIT_ARM_STACK_SIZE (2048 * 4)
48 #define CPWAIT_BRANCH \
52 mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\
53 mov tmp, tmp /* wait for it to complete */ ;\
54 CPWAIT_BRANCH /* branch to next insn */
57 * This is for kvm_mkdb, and should be the address of the beginning
58 * of the kernel text segment (not necessarily the same as kernbase).
63 .set kernbase,KERNBASE
65 .set physaddr,PHYSADDR
68 * On entry for FreeBSD boot ABI:
69 * r0 - metadata pointer or 0 (boothowto on AT91's boot2)
70 * r1 - if (r0 == 0) then metadata pointer
71 * On entry for Linux boot ABI:
73 * r1 - machine type (passed as arg2 to initarm)
74 * r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
76 * For both types of boot we gather up the args, put them in a struct arm_boot_params
77 * structure and pass that to initarm.
82 STOP_UNWINDING /* Can't unwind into the bootloader! */
84 mov r9, r0 /* 0 or boot mode from boot2 */
85 mov r8, r1 /* Save Machine type */
86 mov ip, r2 /* Save meta data */
87 mov fp, r3 /* Future expansion */
89 /* Make sure interrupts are disabled. */
91 orr r7, r7, #(I32_bit|F32_bit)
94 #if defined (FLASHADDR) && defined(LOADERRAMADDR)
95 /* Check if we're running from flash. */
98 * If we're running with MMU disabled, test against the
99 * physical address instead.
101 mrc p15, 0, r2, c1, c0, 0
102 ands r2, r2, #CPU_CONTROL_MMU_ENABLE
104 ldrne r6, =LOADERRAMADDR
126 Lram_offset: .word from_ram-_C_LABEL(_start)
131 bic r7, r7, #0xf0000000
132 orr r7, r7, #PHYSADDR
136 /* Disable MMU for a while */
137 mrc p15, 0, r2, c1, c0, 0
138 bic r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
139 CPU_CONTROL_WBUF_ENABLE)
140 bic r2, r2, #(CPU_CONTROL_IC_ENABLE)
141 bic r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
142 mcr p15, 0, r2, c1, c0, 0
150 * Build page table from scratch.
153 /* Find the delta between VA and PA */
157 /* At this point: r2 = VA - PA */
160 * Find the physical address of the table. After these two
164 * r0 = va(pagetable) - (VA - PA)
165 * = va(pagetable) - VA + PA
173 * Some of the older ports (the various XScale, mostly) assume
174 * that the memory before the kernel is mapped, and use it for
175 * the various stacks, page tables, etc. For those CPUs, map the
176 * 64 first MB of RAM, as it used to be.
184 /* Map 64MiB, preserved over calls to build_pagetables */
188 /* Create the kernel map to jump to */
192 ldr r5, =(KERNPHYSADDR)
197 /* Find the start kernels load address */
199 ldr r2, =(L1_S_OFFSET)
203 /* Map 64MiB, preserved over calls to build_pagetables */
207 /* Create the kernel map to jump to */
209 ldr r2, =(KERNVIRTADDR)
213 #if defined(SOCDEV_PA) && defined(SOCDEV_VA)
214 /* Create the custom map */
221 orr r0, r0, #2 /* Set TTB shared memory flag */
223 mcr p15, 0, r0, c2, c0, 0 /* Set TTB */
224 mcr p15, 0, r0, c8, c7, 0 /* Flush TLB */
226 #if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B) || defined(CPU_KRAIT)
228 mcr p15, 0, r0, c13, c0, 1 /* Set ASID to 0 */
231 /* Set the Domain Access register. Very important! */
232 mov r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
233 mcr p15, 0, r0, c3, c0, 0
236 * On armv6 enable extended page tables, and set alignment checking
237 * to modulo-4 (CPU_CONTROL_UNAL_ENABLE) for the ldrd/strd
238 * instructions emitted by clang.
240 mrc p15, 0, r0, c1, c0, 0
242 orr r0, r0, #(CPU_CONTROL_V6_EXTPAGE | CPU_CONTROL_UNAL_ENABLE)
243 orr r0, r0, #(CPU_CONTROL_AFLT_ENABLE)
244 orr r0, r0, #(CPU_CONTROL_AF_ENABLE)
246 orr r0, r0, #(CPU_CONTROL_MMU_ENABLE)
247 mcr p15, 0, r0, c1, c0, 0
256 ldmia r1, {r1, r2, sp} /* Set initial stack and */
257 sub r2, r2, r1 /* get zero init data */
260 str r3, [r1], #0x0004 /* get zero init data */
266 mov r1, #28 /* loader info size is 28 bytes also second arg */
267 subs sp, sp, r1 /* allocate arm_boot_params struct on stack */
268 mov r0, sp /* loader info pointer is first arg */
269 bic sp, sp, #7 /* align stack to 8 bytes */
270 str r1, [r0] /* Store length of loader info */
271 str r9, [r0, #4] /* Store r0 from boot loader */
272 str r8, [r0, #8] /* Store r1 from boot loader */
273 str ip, [r0, #12] /* store r2 from boot loader */
274 str fp, [r0, #16] /* store r3 from boot loader */
275 str r5, [r0, #20] /* store the physical address */
276 adr r4, Lpagetable /* load the pagetable address */
278 str r5, [r0, #24] /* store the pagetable address */
279 mov fp, #0 /* trace back starts here */
280 bl _C_LABEL(initarm) /* Off we go */
282 /* init arm will return the new stack pointer. */
285 bl _C_LABEL(mi_startup) /* call mi_startup()! */
287 adr r0, .Lmainreturned
293 * Builds the page table
294 * r0 - The table base address
295 * r1 - The physical address (trashed)
296 * r2 - The virtual address (trashed)
297 * r3 - The number of 1MiB sections
300 * Addresses must be 1MiB aligned
303 /* Set the required page attributed */
304 ldr r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
310 /* Move the virtual address to the correct bit location */
311 lsr r2, #(L1_S_SHIFT - 2)
317 add r1, r1, #(L1_S_SIZE)
339 .word svcstk + INIT_ARM_STACK_SIZE
345 .asciz "main() returned"
350 .space INIT_ARM_STACK_SIZE
353 * Memory for the initial pagetable. We are unable to place this in
354 * the bss as this will be cleared after the table is loaded.
356 .section ".init_pagetable"
357 .align 14 /* 16KiB aligned */
365 .word _C_LABEL(cpufuncs)
371 Lstartup_pagetable_secondary:
376 /* Make sure interrupts are disabled. */
378 orr r7, r7, #(I32_bit|F32_bit)
381 /* Disable MMU. It should be disabled already, but make sure. */
382 mrc p15, 0, r2, c1, c0, 0
383 bic r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
384 CPU_CONTROL_WBUF_ENABLE)
385 bic r2, r2, #(CPU_CONTROL_IC_ENABLE)
386 bic r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
387 mcr p15, 0, r2, c1, c0, 0
394 bl armv6_idcache_inv_all /* Modifies r0 only */
396 bl armv7_idcache_inv_all /* Modifies r0-r3, ip */
399 ldr r0, Lstartup_pagetable_secondary
400 bic r0, r0, #0xf0000000
401 orr r0, r0, #PHYSADDR
403 orr r0, r0, #2 /* Set TTB shared memory flag */
404 mcr p15, 0, r0, c2, c0, 0 /* Set TTB */
405 mcr p15, 0, r0, c8, c7, 0 /* Flush TLB */
408 mcr p15, 0, r0, c13, c0, 1 /* Set ASID to 0 */
410 /* Set the Domain Access register. Very important! */
411 mov r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
412 mcr p15, 0, r0, c3, c0, 0
414 mrc p15, 0, r0, c1, c0, 0
415 orr r0, r0, #CPU_CONTROL_V6_EXTPAGE
416 orr r0, r0, #CPU_CONTROL_AF_ENABLE
417 orr r0, r0, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
418 CPU_CONTROL_WBUF_ENABLE)
419 orr r0, r0, #(CPU_CONTROL_IC_ENABLE)
420 orr r0, r0, #(CPU_CONTROL_BPRD_ENABLE)
421 mcr p15, 0, r0, c1, c0, 0
428 ldmia r1, {r1, r2, sp} /* Set initial stack and */
429 mrc p15, 0, r0, c0, c0, 5
435 ldr pc, .Lmpvirt_done
439 mov fp, #0 /* trace back starts here */
440 bl _C_LABEL(init_secondary) /* Off we go */
447 .asciz "init_secondary() returned"
454 bic r2, r2, #(PSR_MODE)
455 orr r2, r2, #(PSR_SVC32_MODE)
456 orr r2, r2, #(I32_bit | F32_bit)
459 ldr r4, .Lcpu_reset_address
464 ldr pc, [r0, #CF_IDCACHE_WBINV_ALL]
466 ldr pc, [r0, #CF_L2CACHE_WBINV_ALL]
469 * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's
473 ldr r1, .Lcpu_reset_needs_v4_MMU_disable
479 * MMU & IDC off, 32 bit program & data space
480 * Hurl ourselves into the ROM
482 mov r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE)
483 mcr 15, 0, r0, c1, c0, 0
484 mcrne 15, 0, r2, c8, c7, 0 /* nail I+D TLB on ARMv4 and greater */
488 * _cpu_reset_address contains the address to branch to, to complete
489 * the cpu reset after turning the MMU off
490 * This variable is provided by the hardware specific code
493 .word _C_LABEL(cpu_reset_address)
496 * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the
497 * v4 MMU disable instruction needs executing... it is an illegal instruction
498 * on f.e. ARM6/7 that locks up the computer in an endless illegal
499 * instruction / data-abort / reset loop.
501 .Lcpu_reset_needs_v4_MMU_disable:
502 .word _C_LABEL(cpu_reset_needs_v4_MMU_disable)
522 .global _C_LABEL(esym)
523 _C_LABEL(esym): .word _C_LABEL(end)
534 * Call the sigreturn system call.
536 * We have to load r7 manually rather than using
537 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
538 * correct. Using the alternative places esigcode at the address
539 * of the data rather than the address one past the data.
542 ldr r7, [pc, #12] /* Load SYS_sigreturn */
545 /* Well if that failed we better exit quick ! */
547 ldr r7, [pc, #8] /* Load SYS_exit */
550 /* Branch back to retry SYS_sigreturn */
557 .global _C_LABEL(esigcode)
563 .long esigcode-sigcode
565 /* End of locore.S */