4 * Copyright (C) 2010-2016 Nathan Whitehorn
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
22 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
23 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
24 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
25 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include <sys/syscall.h>
34 #include <machine/trap.h>
35 #include <machine/param.h>
36 #include <machine/spr.h>
37 #include <machine/asm.h>
38 #include <machine/vmparam.h>
44 /* Glue for linker script */
46 .set kernbase, KERNBASE
61 #define TMPSTKSZ 16384 /* 16K temporary stack */
69 #define TRAPSTKSZ 8192 /* 8k trap stack */
77 * Entry point for bootloaders that do not fully implement ELF and start
78 * at the beginning of the image (kexec, notably). In its own section so
79 * that it ends up before any linker-generated call stubs and actually at
80 * the beginning of the image. kexec on some systems also enters at
81 * (start of image) + 0x60, so put a spin loop there.
83 .section ".text.kboot", "x", @progbits
86 . = kbootentry + 0x40 /* Magic address used in platform layer */
90 . = kbootentry + 0x60 /* Entry point for kexec APs */
91 ap_kexec_start: /* At 0x60 past start, copied to 0x60 by kexec */
92 /* r3 set to CPU ID by kexec */
94 /* Invalidate icache for low-memory copy and jump there */
100 ba 0x80 /* Absolute branch to next inst */
102 . = kbootentry + 0x80 /* Aligned to cache line */
103 1: or 31,31,31 /* yield */
105 lwz %r1,0x40(0) /* Spin on ap_kexec_spin_sem */
106 cmpw %r1,%r3 /* Until it equals our CPU ID */
110 or 2,2,2 /* unyield */
112 /* Make sure that it will be software reset. Clear SRR1 */
118 * Now start the real text section
126 * Main kernel entry point.
128 * Calling convention:
129 * r3: Flattened Device Tree pointer (or zero)
131 * r5: OF client interface pointer (or zero)
132 * r6: Loader metadata pointer (or zero)
133 * r7: Magic cookie (0xfb5d104d) to indicate that r6 has loader metadata
136 ASENTRY_NOPROF(__start)
137 /* Set 64-bit mode if not yet set before branching to C */
143 nop /* Make this block a multiple of 8 bytes */
145 /* Set up the TOC pointer */
150 .llong __tocbase + 0x8000 - .
155 /* Get load offset */
156 ld %r31,-0x8000(%r2) /* First TOC entry is TOC base */
157 subf %r31,%r31,%r2 /* Subtract from real TOC base to get base */
159 /* Set up the stack pointer */
161 .llong tmpstk + TMPSTKSZ - 96 - .
167 /* Relocate kernel */
189 mr %r4,%r2 /* Replace ignored r4 with tocbase for trap handlers */
193 /* Set stack pointer to new value and branch to mi_startup */
203 ASENTRY_NOPROF(__restartkernel_virtual)
205 * When coming in via this entry point, we need to alter the SLB to
206 * shadow the segment register emulation entries in DMAP space.
207 * We need to do this dance because we are running with virtual-mode
208 * OpenFirmware and have not yet taken over the MMU.
211 * 1) The kernel is currently identity-mapped.
212 * 2) We are currently executing at an address compatible with
214 * 3) The first 16 SLB entries are emulating SRs.
215 * 4) The rest of the SLB is not in use.
216 * 5) OpenFirmware is not manipulating the SLB at runtime.
217 * 6) We are running on 64-bit AIM.
222 /* Switch to real mode because we are about to mess with the SLB. */
223 andi. %r14, %r14, ~(PSL_DR|PSL_IR|PSL_ME|PSL_RI)@l
226 /* Prepare variables for later use. */
229 oris %r18, %r18, 0xc000
230 sldi %r18, %r18, 32 /* r18: 0xc000000000000000 */
233 * Loop over the first 16 SLB entries.
234 * Offset the SLBE into the DMAP, add 16 to the index, and write
235 * it back to the SLB.
237 /* XXX add more safety checks */
240 or %r16, %r16, %r14 /* index is 0-15 */
241 ori %r16, %r16, 0x10 /* add 16 to index. */
242 or %r16, %r16, %r18 /* SLBE DMAP offset */
243 rldicr %r17, %r16, 0, 37 /* Invalidation SLBE */
254 ASENTRY_NOPROF(__restartkernel)
256 * r3-r7: arguments to go to __start
257 * r8: offset from current kernel address to apply
258 * r9: MSR to set when (atomically) jumping to __start + r8
270 #include <powerpc/aim/trap_subr64.S>