2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/mutex.h>
33 #include <machine/asm.h>
34 #include <machine/hid.h>
35 #include <machine/param.h>
36 #include <machine/spr.h>
37 #include <machine/psl.h>
38 #include <machine/pte.h>
39 #include <machine/trap.h>
40 #include <machine/vmparam.h>
41 #include <machine/tlb.h>
42 #include <machine/bootinfo.h>
44 #define TMPSTACKSZ 16384
51 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
52 * mark the start of kernel text.
58 * Startup entry. Note, this must be the first thing in the text segment!
65 * Assumptions on the boot loader:
66 * - system memory starts from physical address 0
67 * - it's mapped by a single TBL1 entry
68 * - TLB1 mapping is 1:1 pa to va
69 * - kernel is loaded at 16MB boundary
70 * - all PID registers are set to the same value
71 * - CPU is running in AS=0
73 * Registers contents provided by the loader(8):
75 * r3 : metadata pointer
77 * We rearrange the TLB1 layout as follows:
78 * - find TLB1 entry we started in
79 * - make sure it's protected, ivalidate other entries
80 * - create temp entry in the second AS (make sure it's not TLB[1])
81 * - switch to temp mapping
82 * - map 16MB of RAM in TLB1[1]
83 * - use AS=1, set EPN to KERNBASE and RPN to kernel load address
84 * - switch to to TLB1[1] mapping
85 * - invalidate temp mapping
87 * locore registers use:
89 * r2 : trace pointer (AP only, for early diagnostics)
90 * r3-r27 : scratch registers
92 * r29 : temp TLB1 entry
93 * r30 : initial TLB1 entry we started in
94 * r31 : metadata pointer
98 * Keep metadata ptr in r31 for later use.
105 li %r3, PSL_DE /* Keep debug exceptions for CodeWarrior. */
109 lis %r3, HID0_E500_DEFAULT_SET@h
110 ori %r3, %r3, HID0_E500_DEFAULT_SET@l
113 lis %r3, HID1_E500_DEFAULT_SET@h
114 ori %r3, %r3, HID1_E500_DEFAULT_SET@l
118 /* Invalidate all entries in TLB0 */
123 * Locate the TLB1 entry that maps this code
127 bl tlb1_find_current /* the entry number found is returned in r30 */
129 bl tlb1_inval_all_but_current
131 * Create temporary mapping in AS=1 and switch to it
133 bl tlb1_temp_mapping_as1
136 ori %r3, %r3, (PSL_IS | PSL_DS)
142 rfi /* Switch context */
145 * Invalidate initial entry
151 * Setup final mapping in TLB1[1] and switch to it
153 /* Final kernel mapping, map in 16 MB of RAM */
154 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
155 li %r4, 1 /* Entry 1 */
156 rlwimi %r3, %r4, 16, 12, 15
160 li %r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
161 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
162 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
166 ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */
168 ori %r3, %r3, MAS2_M@l /* WIMGE = 0b00100 */
173 /* Discover phys load address */
175 3: mflr %r4 /* Use current address */
176 rlwinm %r4, %r4, 0, 0, 7 /* 16MB alignment mask */
177 mr %r28, %r4 /* Keep kernel load address */
178 ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
179 mtspr SPR_MAS3, %r4 /* Set RPN and protection */
185 /* Switch to the above TLB1[1] mapping */
188 rlwinm %r4, %r4, 0, 8, 31 /* Current offset from kernel load address */
189 rlwinm %r3, %r3, 0, 0, 19
190 add %r4, %r4, %r3 /* Convert to kernel virtual address */
192 li %r3, PSL_DE /* Note AS=0 */
198 * Invalidate temp mapping
204 * Save kernel load address for later use.
207 addi %r3, %r3, kernload@l
211 * APs need a separate copy of kernload info within the __boot_page
212 * area so they can access this value very early, before their TLBs
213 * are fully set up and the kernload global location is available.
215 lis %r3, kernload_ap@ha
216 addi %r3, %r3, kernload_ap@l
222 * Setup a temporary stack
225 addi %r1, %r1, tmpstack@l
226 addi %r1, %r1, (TMPSTACKSZ - 8)
229 * Initialise exception vector offsets
234 * Set up arguments and jump to system initialization code
236 lis %r3, kernel_text@ha
237 addi %r3, %r3, kernel_text@l
239 addi %r4, %r4, _end@l
240 mr %r5, %r31 /* metadata ptr */
242 /* Prepare e500 core */
245 /* Switch to thread0.td_kstack now */
250 /* Machine independet part, does not return */
257 /************************************************************************/
259 /************************************************************************/
270 * Initial configuration
274 lis %r3, HID0_E500_DEFAULT_SET@h
275 ori %r3, %r3, HID0_E500_DEFAULT_SET@l
278 lis %r3, HID1_E500_DEFAULT_SET@h
279 ori %r3, %r3, HID1_E500_DEFAULT_SET@l
283 /* Enable branch prediction */
288 /* Invalidate all entries in TLB0 */
293 * Find TLB1 entry which is translating us now
297 bl tlb1_find_current /* the entry number found is in r30 */
299 bl tlb1_inval_all_but_current
301 * Create temporary translation in AS=1 and switch to it
303 bl tlb1_temp_mapping_as1
306 ori %r3, %r3, (PSL_IS | PSL_DS)
312 rfi /* Switch context */
315 * Invalidate initial entry
321 * Setup final mapping in TLB1[1] and switch to it
323 /* Final kernel mapping, map in 16 MB of RAM */
324 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
325 li %r4, 1 /* Entry 1 */
326 rlwimi %r3, %r4, 16, 4, 15
330 li %r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
331 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
332 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
336 ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */
337 ori %r3, %r3, MAS2_M@l /* WIMGE = 0b00100 */
341 /* Retrieve kernel load [physical] address from kernload_ap */
344 rlwinm %r3, %r3, 0, 0, 19
345 lis %r4, kernload_ap@h
346 ori %r4, %r4, kernload_ap@l
347 lis %r5, __boot_page@h
348 ori %r5, %r5, __boot_page@l
349 sub %r4, %r4, %r5 /* offset of kernload_ap within __boot_page */
352 /* Set RPN and protection */
353 ori %r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l
360 /* Switch to the final mapping */
363 rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */
364 add %r3, %r3, %r5 /* Make this virtual address */
366 li %r4, 0 /* Note AS=0 */
372 * At this point we're running at virtual addresses KERNBASE and beyond so
373 * it's allowed to directly access all locations the kernel was linked
378 * Invalidate temp mapping
384 * Setup a temporary stack
387 addi %r1, %r1, tmpstack@l
388 addi %r1, %r1, (TMPSTACKSZ - 8)
391 * Initialise exception vector offsets
396 * Assign our pcpu instance
399 ori %r3, %r3, ap_pcpu@l
405 bl cpudep_ap_bootstrap
406 /* Switch to the idle thread's kstack */
409 bl machdep_ap_bootstrap
416 * Invalidate all entries in the given TLB.
421 rlwinm %r3, %r3, 3, 0x18 /* TLBSEL */
422 ori %r3, %r3, 0x4 /* INVALL */
432 * expects address to look up in r3, returns entry number in r30
434 * FIXME: the hidden assumption is we are now running in AS=0, but we should
435 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
439 slwi %r17, %r17, MAS6_SPID0_SHIFT
444 rlwinm %r30, %r17, 16, 20, 31 /* MAS0[ESEL] -> r30 */
446 /* Make sure we have IPROT set on the entry */
448 oris %r17, %r17, MAS1_IPROT@h
457 * Invalidates a single entry in TLB1.
463 lis %r4, MAS0_TLBSEL1@h /* Select TLB1 */
464 rlwimi %r4, %r3, 16, 12, 15 /* Select our entry */
468 li %r5, 0 /* MAS1[V] = 0 */
477 * r30 current entry number
478 * r29 returned temp entry
481 tlb1_temp_mapping_as1:
482 /* Read our current translation */
483 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
484 rlwimi %r3, %r30, 16, 12, 15 /* Select our current entry */
490 * Prepare and write temp entry
492 * FIXME this is not robust against overflow i.e. when the current
493 * entry is the last in TLB1
495 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
496 addi %r29, %r30, 1 /* Use next entry. */
501 1: rlwimi %r3, %r29, 16, 12, 15 /* Select temp entry */
506 rlwimi %r5, %r4, 12, 19, 19
507 li %r4, 0 /* Global mapping, TID=0 */
508 rlwimi %r5, %r4, 16, 8, 15
509 oris %r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
518 * Loops over TLB1, invalidates all entries skipping the one which currently
524 tlb1_inval_all_but_current:
526 mfspr %r3, SPR_TLB1CFG /* Get number of entries */
527 andi. %r3, %r3, TLBCFG_NENTRY_MASK@l
528 li %r4, 0 /* Start from Entry 0 */
529 1: lis %r5, MAS0_TLBSEL1@h
530 rlwimi %r5, %r4, 16, 12, 15
535 cmpw %r4, %r30 /* our current entry? */
537 rlwinm %r5, %r5, 0, 2, 31 /* clear VALID and IPROT bits */
544 cmpw %r4, %r3 /* Check if this is the last entry */
551 * Boot page needs to be exactly 4K, with the last word of this page
552 * acting as the reset vector, so we need to stuff the remainder.
553 * Upon release from holdoff CPU fetches the last word of the boot
556 .space 4092 - (__boot_page_padding - __boot_page)
560 /************************************************************************/
561 /* locore subroutines */
562 /************************************************************************/
565 /* Set base address of interrupt handler routines */
566 lis %r3, interrupt_vector_base@h
569 /* Assign interrupt handler routines offsets */
570 li %r3, int_critical_input@l
572 li %r3, int_machine_check@l
574 li %r3, int_data_storage@l
576 li %r3, int_instr_storage@l
578 li %r3, int_external_input@l
580 li %r3, int_alignment@l
582 li %r3, int_program@l
584 li %r3, int_syscall@l
586 li %r3, int_decrementer@l
587 mtspr SPR_IVOR10, %r3
588 li %r3, int_fixed_interval_timer@l
589 mtspr SPR_IVOR11, %r3
590 li %r3, int_watchdog@l
591 mtspr SPR_IVOR12, %r3
592 li %r3, int_data_tlb_error@l
593 mtspr SPR_IVOR13, %r3
594 li %r3, int_inst_tlb_error@l
595 mtspr SPR_IVOR14, %r3
597 mtspr SPR_IVOR15, %r3
601 * void tid_flush(tlbtid_t tid);
603 * Invalidate all TLB0 entries which match the given TID. Note this is
604 * dedicated for cases when invalidation(s) should NOT be propagated to other
607 * Global vars tlb0_ways, tlb0_entries_per_way are assumed to have been set up
608 * correctly (by tlb0_get_tlbconf()).
612 cmpwi %r3, TID_KERNEL
613 beq tid_flush_end /* don't evict kernel translations */
615 /* Number of TLB0 ways */
617 ori %r4, %r4, tlb0_ways@l
620 /* Number of entries / way */
621 lis %r5, tlb0_entries_per_way@h
622 ori %r5, %r5, tlb0_entries_per_way@l
625 /* Disable interrupts */
629 li %r6, 0 /* ways counter */
631 li %r7, 0 /* entries [per way] counter */
633 /* Select TLB0 and ESEL (way) */
634 lis %r8, MAS0_TLBSEL0@h
635 rlwimi %r8, %r6, 16, 14, 15
639 /* Select EPN (entry within the way) */
640 rlwinm %r8, %r7, 12, 13, 19
645 /* Check if valid entry */
647 andis. %r9, %r8, MAS1_VALID@h
648 beq next_entry /* invalid entry */
650 /* Check if this is our TID */
651 rlwinm %r9, %r8, 16, 24, 31
654 bne next_entry /* not our TID */
656 /* Clear VALID bit */
657 rlwinm %r8, %r8, 0, 1, 31
674 /* Restore MSR (possibly re-enable interrupts) */
682 * Cache disable/enable/inval sequences according
683 * to section 2.16 of E500CORE RM.
686 /* Invalidate d-cache */
687 mfspr %r3, SPR_L1CSR0
688 ori %r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
691 mtspr SPR_L1CSR0, %r3
693 1: mfspr %r3, SPR_L1CSR0
694 andi. %r3, %r3, L1CSR0_DCFI
698 ENTRY(dcache_disable)
699 /* Disable d-cache */
700 mfspr %r3, SPR_L1CSR0
706 mtspr SPR_L1CSR0, %r3
712 mfspr %r3, SPR_L1CSR0
713 oris %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
714 ori %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
717 mtspr SPR_L1CSR0, %r3
722 /* Invalidate i-cache */
723 mfspr %r3, SPR_L1CSR1
724 ori %r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
726 mtspr SPR_L1CSR1, %r3
728 1: mfspr %r3, SPR_L1CSR1
729 andi. %r3, %r3, L1CSR1_ICFI
733 ENTRY(icache_disable)
734 /* Disable i-cache */
735 mfspr %r3, SPR_L1CSR1
740 mtspr SPR_L1CSR1, %r3
746 mfspr %r3, SPR_L1CSR1
747 oris %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
748 ori %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
750 mtspr SPR_L1CSR1, %r3
757 * Similar to setjmp to setup for handling faults on accesses to user memory.
758 * Any routine using this may only call bcopy, either the form below,
759 * or the (currently used) C code optimized, so it doesn't use any non-volatile
766 lwz %r4, PC_CURTHREAD(%r4)
768 stw %r3, PCB_ONFAULT(%r4)
775 stmw %r10, 12(%r3) /* store CR, CTR, XER, [r13 .. r31] */
776 li %r3, 0 /* return FALSE */
779 /************************************************************************/
781 /************************************************************************/
788 * Compiled KERNBASE locations
791 .set kernbase, KERNBASE
796 #define INTRCNT_COUNT 256 /* max(HROWPIC_IRQMAX,OPENPIC_IRQMAX) */
801 .space INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
805 .space INTRCNT_COUNT * 4 * 2
808 #include <powerpc/booke/trap_subr.S>