2 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
19 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include <machine/param.h>
33 #include <machine/asm.h>
34 #include <machine/spr.h>
35 #include <machine/psl.h>
36 #include <machine/pte.h>
37 #include <machine/trap.h>
38 #include <machine/vmparam.h>
39 #include <machine/tlb.h>
40 #include <machine/bootinfo.h>
42 #define TMPSTACKSZ 16384
45 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
46 * mark the start of kernel text.
53 * Startup entry. Note, this must be the first thing in the text segment!
60 * Assumption on a boot loader:
61 * - system memory starts from physical address 0
62 * - kernel is loaded at 16MB boundary
63 * - it's mapped by a single TBL1 entry
64 * - TLB1 mapping is 1:1 pa to va
65 * - all PID registers are set to the same value
67 * Loader register use:
69 * r3 : metadata pointer
71 * We rearrange the TLB1 layout as follows:
72 * - find AS and entry kernel started in
73 * - make sure it's protected, ivalidate other entries
74 * - create temp entry in the second AS (make sure it's not TLB[15])
75 * - switch to temp mapping
76 * - map 16MB of RAM in TLB1[15]
77 * - use AS=1, set EPN to KERNBASE and RPN to kernel load address
78 * - switch to to TLB1[15] mapping
79 * - invalidate temp mapping
81 * locore register use:
86 * r5 : metadata pointer
88 * r10 : entry we started in
90 * r12 : AS we started in
91 * r13-r31 : auxiliary registers
95 * Move metadata ptr to r5
102 li %r16, 0x200 /* Keep debug exceptions for CodeWarrior. */
113 /* Issue INV_ALL Invalidate on TLB0 */
120 * Use tblsx to locate the TLB1 entry that maps kernel code
122 bl 1f /* Current address */
125 /* Find entry that maps current address */
127 slwi %r17, %r17, MAS6_SPID0_SHIFT
132 /* Copy entry number to r10 */
134 rlwinm %r10, %r17, 16, 28, 31
136 /* Invalidate TLB1, skipping our entry. */
137 mfspr %r17, SPR_TLB1CFG /* Get number of entries */
138 andi. %r17, %r17, TLBCFG_NENTRY_MASK@l
139 li %r16, 0 /* Start from Entry 0 */
141 2: lis %r15, MAS0_TLBSEL1@h /* Select TLB1 */
142 rlwimi %r15, %r16, 16, 12, 15
149 /* Clear VALID and IPROT bits for other entries */
150 rlwinm %r15, %r15, 0, 2, 31
156 3: addi %r16, %r16, 1
157 cmpw %r16, %r17 /* Check if this is the last entry */
161 * Create temporary mapping in the other Address Space
163 lis %r17, MAS0_TLBSEL1@h /* Select TLB1 */
164 rlwimi %r17, %r10, 16, 12, 15 /* Select our entry */
167 tlbre /* Read it in */
169 /* Prepare and write temp entry */
170 lis %r17, MAS0_TLBSEL1@h /* Select TLB1 */
171 addi %r11, %r10, 0x1 /* Use next entry. */
172 rlwimi %r17, %r11, 16, 12, 15 /* Select temp entry */
177 li %r15, 1 /* AS 1 */
178 rlwimi %r16, %r15, 12, 19, 19
181 rlwimi %r16, %r17, 0, 8, 15 /* Global mapping, TID=0 */
189 ori %r16, %r16, 0x30 /* Switch to AS 1. */
191 bl 4f /* Find current execution address */
193 addi %r15, %r15, 20 /* Increment to instruction after rfi */
196 rfi /* Switch context */
199 * Invalidate initial entry
205 * Setup final mapping in TLB1[1] and switch to it
207 /* Final kernel mapping, map in 16 MB of RAM */
208 lis %r16, MAS0_TLBSEL1@h /* Select TLB1 */
209 li %r17, 1 /* Entry 1 */
210 rlwimi %r16, %r17, 16, 12, 15
214 li %r16, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
215 oris %r16, %r16, (MAS1_VALID | MAS1_IPROT)@h
220 ori %r19, %r19, KERNBASE@l
221 mtspr SPR_MAS2, %r19 /* Set final EPN, clear WIMG */
225 5: mflr %r16 /* Use current address */
226 lis %r18, 0xff00 /* 16MB alignment mask */
228 mr %r25, %r16 /* Copy kernel load address */
229 ori %r16, %r16, (MAS3_SX | MAS3_SW | MAS3_SR)@l
230 mtspr SPR_MAS3, %r16 /* Set RPN and protection */
236 /* Switch to the above TLB1[1] mapping */
237 lis %r18, 0x00ff /* 16MB offset mask */
238 ori %r18, %r18, 0xffff
240 6: mflr %r20 /* Use current address */
241 and %r20, %r20, %r18 /* Offset from kernel load address */
242 add %r20, %r20, %r19 /* Move to kernel virtual address */
243 addi %r20, %r20, 32 /* Increment to instr. after rfi */
249 /* Save kernel load address for later use */
250 lis %r24, kernload@ha
251 addi %r24, %r24, kernload@l
255 * Invalidate temp mapping
261 * Setup a temporary stack
264 addi %r1, %r1, tmpstack@l
265 addi %r1, %r1, (TMPSTACKSZ - 8)
268 * Intialise exception vector offsets
273 * Jump to system initialization code
275 * Setup first two arguments for e500_init, metadata (r5) is already in place.
277 lis %r3, kernel_text@ha
278 addi %r3, %r3, kernel_text@l
280 addi %r4, %r4, _end@l
284 /* Switch to thread0.td_kstack */
289 bl mi_startup /* Machine independet part, does not return */
291 /************************************************************************/
292 /* locore subroutines */
293 /************************************************************************/
296 lis %r17, MAS0_TLBSEL1@h /* Select TLB1 */
297 rlwimi %r17, %r22, 16, 12, 15 /* Select our entry */
300 tlbre /* Read it in */
311 /* Set base address of interrupt handler routines */
312 lis %r21, interrupt_vector_base@h
315 /* Assign interrupt handler routines offsets */
316 li %r21, int_critical_input@l
317 mtspr SPR_IVOR0, %r21
318 li %r21, int_machine_check@l
319 mtspr SPR_IVOR1, %r21
320 li %r21, int_data_storage@l
321 mtspr SPR_IVOR2, %r21
322 li %r21, int_instr_storage@l
323 mtspr SPR_IVOR3, %r21
324 li %r21, int_external_input@l
325 mtspr SPR_IVOR4, %r21
326 li %r21, int_alignment@l
327 mtspr SPR_IVOR5, %r21
328 li %r21, int_program@l
329 mtspr SPR_IVOR6, %r21
330 li %r21, int_syscall@l
331 mtspr SPR_IVOR8, %r21
332 li %r21, int_decrementer@l
333 mtspr SPR_IVOR10, %r21
334 li %r21, int_fixed_interval_timer@l
335 mtspr SPR_IVOR11, %r21
336 li %r21, int_watchdog@l
337 mtspr SPR_IVOR12, %r21
338 li %r21, int_data_tlb_error@l
339 mtspr SPR_IVOR13, %r21
340 li %r21, int_inst_tlb_error@l
341 mtspr SPR_IVOR14, %r21
343 mtspr SPR_IVOR15, %r21
347 * void tlb1_inval_va(vm_offset_t va)
349 * r3 - va to invalidate
367 * void tlb0_inval_va(vm_offset_t va)
369 * r3 - va to invalidate
372 /* EA mask, this also clears TLBSEL, selecting TLB0 */
384 * Cache disable/enable/inval sequences according
385 * to section 2.16 of E500CORE RM.
388 /* Invalidate d-cache */
389 mfspr %r3, SPR_L1CSR0
390 ori %r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
393 mtspr SPR_L1CSR0, %r3
397 ENTRY(dcache_disable)
398 /* Disable d-cache */
399 mfspr %r3, SPR_L1CSR0
405 mtspr SPR_L1CSR0, %r3
411 mfspr %r3, SPR_L1CSR0
412 oris %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
413 ori %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
416 mtspr SPR_L1CSR0, %r3
421 /* Invalidate i-cache */
422 mfspr %r3, SPR_L1CSR1
423 ori %r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
425 mtspr SPR_L1CSR1, %r3
429 ENTRY(icache_disable)
430 /* Disable i-cache */
431 mfspr %r3, SPR_L1CSR1
436 mtspr SPR_L1CSR1, %r3
442 mfspr %r3, SPR_L1CSR1
443 oris %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
444 ori %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
446 mtspr SPR_L1CSR1, %r3
453 * Similar to setjmp to setup for handling faults on accesses to user memory.
454 * Any routine using this may only call bcopy, either the form below,
455 * or the (currently used) C code optimized, so it doesn't use any non-volatile
462 lwz %r4, PC_CURTHREAD(%r4)
464 stw %r3, PCB_ONFAULT(%r4)
471 stmw %r10, 12(%r3) /* store CR, CTR, XER, [r13 .. r31] */
472 li %r3, 0 /* return FALSE */
475 /************************************************************************/
477 /************************************************************************/
484 * Compiled KERNBASE locations
487 .set kernbase, KERNBASE
492 #define INTRCNT_COUNT 256 /* max(HROWPIC_IRQMAX,OPENPIC_IRQMAX) */
497 .space INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
501 .space INTRCNT_COUNT * 4 * 2
504 #include <powerpc/booke/trap_subr.S>