1 /* $NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $ */
4 * Copyright 2011 Semihalf
5 * Copyright (C) 1994-1997 Mark Brinicombe
6 * Copyright (C) 1994 Brini
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by Brini.
20 * 4. The name of Brini may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <sys/syscall.h>
38 #include <machine/asm.h>
39 #include <machine/armreg.h>
40 #include <machine/pte-v4.h>
42 __FBSDID("$FreeBSD$");
44 /* 2K initial stack is plenty, it is only used by initarm() */
45 #define INIT_ARM_STACK_SIZE 2048
47 #define CPWAIT_BRANCH \
51 mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\
52 mov tmp, tmp /* wait for it to complete */ ;\
53 CPWAIT_BRANCH /* branch to next insn */
56 * This is for libkvm, and should be the address of the beginning
57 * of the kernel text segment (not necessarily the same as kernbase).
59 * These are being phased out. Newer copies of libkvm don't need these
60 * values as the information is added to the core file by inspecting
67 .set kernbase,KERNVIRTADDR
71 .set physaddr,PHYSADDR
75 * On entry for FreeBSD boot ABI:
76 * r0 - metadata pointer or 0 (boothowto on AT91's boot2)
77 * r1 - if (r0 == 0) then metadata pointer
78 * On entry for Linux boot ABI:
80 * r1 - machine type (passed as arg2 to initarm)
81 * r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
83 * For both types of boot we gather up the args, put them in a struct arm_boot_params
84 * structure and pass that to initarm.
89 STOP_UNWINDING /* Can't unwind into the bootloader! */
91 mov r9, r0 /* 0 or boot mode from boot2 */
92 mov r8, r1 /* Save Machine type */
93 mov ip, r2 /* Save meta data */
94 mov fp, r3 /* Future expansion */
96 /* Make sure interrupts are disabled. */
98 orr r7, r7, #(PSR_I | PSR_F)
101 #if defined (FLASHADDR) && defined(LOADERRAMADDR)
103 * Sanity check the configuration.
104 * FLASHADDR and LOADERRAMADDR depend on PHYSADDR in some cases.
105 * ARMv4 and ARMv5 make assumptions on where they are loaded.
106 * TODO: Fix the ARMv4/v5 case.
109 #error PHYSADDR must be defined for this configuration
112 /* Check if we're running from flash. */
115 * If we're running with MMU disabled, test against the
116 * physical address instead.
119 ands r2, r2, #CPU_CONTROL_MMU_ENABLE
121 ldrne r6, =LOADERRAMADDR
143 Lram_offset: .word from_ram-_C_LABEL(_start)
149 /* Disable MMU for a while */
151 bic r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
152 CPU_CONTROL_WBUF_ENABLE)
153 bic r2, r2, #(CPU_CONTROL_IC_ENABLE)
154 bic r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
164 * Build page table from scratch.
168 * Figure out the physical address we're loaded at by assuming this
169 * entry point code is in the first L1 section and so if we clear the
170 * offset bits of the pc that will give us the section-aligned load
171 * address, which remains in r5 throughout all the following code.
173 ldr r2, =(L1_S_OFFSET)
176 /* Find the delta between VA and PA, result stays in r0 throughout. */
178 bl translate_va_to_pa
181 * First map the entire 4GB address space as VA=PA. It's mapped as
182 * normal (cached) memory because it's for things like accessing the
183 * parameters passed in from the bootloader, which might be at any
184 * physical address, different for every platform.
192 * Next we do 64MiB starting at the physical load address, mapped to
193 * the VA the kernel is linked for.
196 ldr r2, =(KERNVIRTADDR)
199 #if defined(PHYSADDR) && (KERNVIRTADDR != KERNBASE)
201 * If the kernel wasn't loaded at the beginning of the ram, map the memory
202 * before the kernel too, as some ports use that for pagetables, stack, etc...
206 ldr r3, =((KERNVIRTADDR - KERNBASE) / L1_S_SIZE)
210 /* Create a device mapping for early_printf if specified. */
211 #if defined(SOCDEV_PA) && defined(SOCDEV_VA)
215 bl build_device_pagetables
218 mcr p15, 0, r0, c2, c0, 0 /* Set TTB */
219 mcr p15, 0, r0, c8, c7, 0 /* Flush TLB */
221 /* Set the Domain Access register. Very important! */
222 mov r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
223 mcr p15, 0, r0, c3, c0, 0
228 orr r0, r0, #(CPU_CONTROL_MMU_ENABLE)
235 /* Transition the PC from physical to virtual addressing. */
241 ldmia r1, {r1, r2, sp} /* Set initial stack and */
242 sub r2, r2, r1 /* get zero init data */
245 str r3, [r1], #0x0004 /* get zero init data */
250 mov r1, #28 /* loader info size is 28 bytes also second arg */
251 subs sp, sp, r1 /* allocate arm_boot_params struct on stack */
252 mov r0, sp /* loader info pointer is first arg */
253 bic sp, sp, #7 /* align stack to 8 bytes */
254 str r1, [r0] /* Store length of loader info */
255 str r9, [r0, #4] /* Store r0 from boot loader */
256 str r8, [r0, #8] /* Store r1 from boot loader */
257 str ip, [r0, #12] /* store r2 from boot loader */
258 str fp, [r0, #16] /* store r3 from boot loader */
259 str r5, [r0, #20] /* store the physical address */
260 adr r4, Lpagetable /* load the pagetable address */
262 str r5, [r0, #24] /* store the pagetable address */
263 mov fp, #0 /* trace back starts here */
264 bl _C_LABEL(initarm) /* Off we go */
266 /* init arm will return the new stack pointer. */
269 bl _C_LABEL(mi_startup) /* call mi_startup()! */
271 adr r0, .Lmainreturned
276 #define VA_TO_PA_POINTER(name, table) \
282 * Returns the physical address of a magic va to pa pointer.
283 * r0 - The pagetable data pointer. This must be built using the
284 * VA_TO_PA_POINTER macro.
286 * VA_TO_PA_POINTER(Lpagetable, pagetable)
289 * bl translate_va_to_pa
290 * r0 will now contain the physical address of pagetable
296 /* At this point: r2 = VA - PA */
299 * Find the physical address of the table. After these two
303 * r0 = va(pagetable) - (VA - PA)
304 * = va(pagetable) - VA + PA
312 * Builds the page table
313 * r0 - The table base address
314 * r1 - The physical address (trashed)
315 * r2 - The virtual address (trashed)
316 * r3 - The number of 1MiB sections
319 * Addresses must be 1MiB aligned
321 build_device_pagetables:
322 ldr r4, =(L1_TYPE_S|L1_S_AP(AP_KRW))
325 /* Set the required page attributed */
326 ldr r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
330 /* Move the virtual address to the correct bit location */
331 lsr r2, #(L1_S_SHIFT - 2)
337 add r1, r1, #(L1_S_SIZE)
343 VA_TO_PA_POINTER(Lpagetable, pagetable)
353 .word svcstk + INIT_ARM_STACK_SIZE
359 .asciz "main() returned"
364 .space INIT_ARM_STACK_SIZE
367 * Memory for the initial pagetable. We are unable to place this in
368 * the bss as this will be cleared after the table is loaded.
370 .section ".init_pagetable", "aw", %nobits
371 .align 14 /* 16KiB aligned */
379 .word _C_LABEL(cpufuncs)
383 bic r2, r2, #(PSR_MODE)
384 orr r2, r2, #(PSR_SVC32_MODE)
385 orr r2, r2, #(PSR_I | PSR_F)
388 ldr r4, .Lcpu_reset_address
393 ldr pc, [r0, #CF_IDCACHE_WBINV_ALL]
395 ldr pc, [r0, #CF_L2CACHE_WBINV_ALL]
398 * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's
402 ldr r1, .Lcpu_reset_needs_v4_MMU_disable
408 * MMU & IDC off, 32 bit program & data space
409 * Hurl ourselves into the ROM
411 mov r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE)
413 mcrne p15, 0, r2, c8, c7, 0 /* nail I+D TLB on ARMv4 and greater */
417 * _cpu_reset_address contains the address to branch to, to complete
418 * the cpu reset after turning the MMU off
419 * This variable is provided by the hardware specific code
422 .word _C_LABEL(cpu_reset_address)
425 * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the
426 * v4 MMU disable instruction needs executing... it is an illegal instruction
427 * on f.e. ARM6/7 that locks up the computer in an endless illegal
428 * instruction / data-abort / reset loop.
430 .Lcpu_reset_needs_v4_MMU_disable:
431 .word _C_LABEL(cpu_reset_needs_v4_MMU_disable)
451 .global _C_LABEL(esym)
452 _C_LABEL(esym): .word _C_LABEL(end)
463 * Call the sigreturn system call.
465 * We have to load r7 manually rather than using
466 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
467 * correct. Using the alternative places esigcode at the address
468 * of the data rather than the address one past the data.
471 ldr r7, [pc, #12] /* Load SYS_sigreturn */
474 /* Well if that failed we better exit quick ! */
476 ldr r7, [pc, #8] /* Load SYS_exit */
479 /* Branch back to retry SYS_sigreturn */
486 .global _C_LABEL(esigcode)
492 .long esigcode-sigcode
494 /* End of locore.S */