4 * Copyright (C) 2010-2016 Nathan Whitehorn
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
22 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
23 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
24 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
25 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include <sys/syscall.h>
34 #include <machine/trap.h>
35 #include <machine/param.h>
36 #include <machine/spr.h>
37 #include <machine/asm.h>
38 #include <machine/vmparam.h>
44 /* Glue for linker script */
46 .set kernbase, KERNBASE
61 #define TMPSTKSZ 16384 /* 16K temporary stack */
69 #define TRAPSTKSZ 8192 /* 8k trap stack */
77 * Entry point for bootloaders that do not fully implement ELF and start
78 * at the beginning of the image (kexec, notably). In its own section so
79 * that it ends up before any linker-generated call stubs and actually at
80 * the beginning of the image. kexec on some systems also enters at
81 * (start of image) + 0x60, so put a spin loop there.
83 .section ".text.kboot", "x", @progbits
85 #ifdef __LITTLE_ENDIAN__
86 RETURN_TO_NATIVE_ENDIAN
89 . = kbootentry + 0x40 /* Magic address used in platform layer */
93 . = kbootentry + 0x60 /* Entry point for kexec APs */
94 ap_kexec_start: /* At 0x60 past start, copied to 0x60 by kexec */
95 /* r3 set to CPU ID by kexec */
97 /* Invalidate icache for low-memory copy and jump there */
103 ba 0x80 /* Absolute branch to next inst */
105 . = kbootentry + 0x80 /* Aligned to cache line */
106 1: or 31,31,31 /* yield */
108 lwz %r1,0x40(0) /* Spin on ap_kexec_spin_sem */
109 cmpw %r1,%r3 /* Until it equals our CPU ID */
113 or 2,2,2 /* unyield */
115 /* Make sure that it will be software reset. Clear SRR1 */
121 * Now start the real text section
129 * Main kernel entry point.
131 * Calling convention:
132 * r3: Flattened Device Tree pointer (or zero)
134 * r5: OF client interface pointer (or zero)
135 * r6: Loader metadata pointer (or zero)
136 * r7: Magic cookie (0xfb5d104d) to indicate that r6 has loader metadata
139 _NAKED_ENTRY(__start)
141 #ifdef __LITTLE_ENDIAN__
142 RETURN_TO_NATIVE_ENDIAN
144 /* Set 64-bit mode if not yet set before branching to C */
150 nop /* Make this block a multiple of 8 bytes */
152 /* Set up the TOC pointer */
157 .llong __tocbase + 0x8000 - .
162 /* Get load offset */
163 ld %r31,-0x8000(%r2) /* First TOC entry is TOC base */
164 subf %r31,%r31,%r2 /* Subtract from real TOC base to get base */
166 /* Set up the stack pointer */
168 .llong tmpstk + TMPSTKSZ - 96 - .
174 /* Relocate kernel */
196 mr %r4,%r2 /* Replace ignored r4 with tocbase for trap handlers */
200 /* Set stack pointer to new value and branch to mi_startup */
211 ASENTRY_NOPROF(__restartkernel_virtual)
213 * When coming in via this entry point, we need to alter the SLB to
214 * shadow the segment register emulation entries in DMAP space.
215 * We need to do this dance because we are running with virtual-mode
216 * OpenFirmware and have not yet taken over the MMU.
219 * 1) The kernel is currently identity-mapped.
220 * 2) We are currently executing at an address compatible with
222 * 3) The first 16 SLB entries are emulating SRs.
223 * 4) The rest of the SLB is not in use.
224 * 5) OpenFirmware is not manipulating the SLB at runtime.
225 * 6) We are running on 64-bit AIM.
230 /* Switch to real mode because we are about to mess with the SLB. */
231 andi. %r14, %r14, ~(PSL_DR|PSL_IR|PSL_ME|PSL_RI)@l
234 /* Prepare variables for later use. */
237 oris %r18, %r18, 0xc000
238 sldi %r18, %r18, 32 /* r18: 0xc000000000000000 */
241 * Loop over the first 16 SLB entries.
242 * Offset the SLBE into the DMAP, add 16 to the index, and write
243 * it back to the SLB.
245 /* XXX add more safety checks */
248 or %r16, %r16, %r14 /* index is 0-15 */
249 ori %r16, %r16, 0x10 /* add 16 to index. */
250 or %r16, %r16, %r18 /* SLBE DMAP offset */
251 rldicr %r17, %r16, 0, 37 /* Invalidation SLBE */
263 * Now that we are set up with a temporary direct map, we can
264 * continue with __restartkernel. Translation will be switched
265 * back on at the rfid, at which point we will be executing from
266 * the temporary direct map we just installed, until the kernel
267 * takes over responsibility for the MMU.
271 ASEND(__restartkernel_virtual)
273 ASENTRY_NOPROF(__restartkernel)
275 * r3-r7: arguments to go to __start
276 * r8: offset from current kernel address to apply
277 * r9: MSR to set when (atomically) jumping to __start + r8
288 ASEND(__restartkernel)
290 #include <powerpc/aim/trap_subr64.S>