2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include "opt_hwpmc_hooks.h"
31 #include <machine/asm.h>
32 #include <machine/hid.h>
33 #include <machine/param.h>
34 #include <machine/spr.h>
35 #include <machine/pte.h>
36 #include <machine/trap.h>
37 #include <machine/vmparam.h>
38 #include <machine/tlb.h>
44 #define TMPSTACKSZ 16384
47 #define GET_TOCBASE(r) \
49 #define TOC_RESTORE nop
59 #define THREAD_REG %r13
64 #define GET_TOCBASE(r)
75 #define THREAD_REG %r2
82 /* Placate lld by creating a kboot stub. */
83 .section ".text.kboot", "x", @progbits
92 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
93 * mark the start of kernel text.
99 * Startup entry. Note, this must be the first thing in the text segment!
106 * Assumptions on the boot loader:
107 * - System memory starts from physical address 0
108 * - It's mapped by a single TLB1 entry
109 * - TLB1 mapping is 1:1 pa to va
110 * - Kernel is loaded at 64MB boundary
111 * - All PID registers are set to the same value
112 * - CPU is running in AS=0
114 * Registers contents provided by the loader(8):
116 * r3 : metadata pointer
118 * We rearrange the TLB1 layout as follows:
119 * - Find TLB1 entry we started in
120 * - Make sure it's protected, invalidate other entries
121 * - Create temp entry in the second AS (make sure it's not TLB[1])
122 * - Switch to temp mapping
123 * - Map 64MB of RAM in TLB1[1]
124 * - Use AS=0, set EPN to VM_MIN_KERNEL_ADDRESS and RPN to kernel load address
125 * - Switch to TLB1[1] mapping
126 * - Invalidate temp mapping
128 * locore registers use:
130 * r2 : trace pointer (AP only, for early diagnostics)
131 * r3-r27 : scratch registers
132 * r28 : temp TLB1 entry
133 * r29 : initial TLB1 entry we started in
134 * r30-r31 : arguments (metadata pointer)
138 * Keep arguments in r30 & r31 for later use.
146 li %r3, PSL_DE /* Keep debug exceptions for CodeWarrior. */
148 oris %r3, %r3, PSL_CM@h
154 * Initial HIDs configuration
158 rlwinm %r3, %r3, 16, 16, 31
160 lis %r4, HID0_E500_DEFAULT_SET@h
161 ori %r4, %r4, HID0_E500_DEFAULT_SET@l
163 /* Check for e500mc and e5500 */
164 cmpli 0, 0, %r3, FSL_E500mc
167 lis %r4, HID0_E500MC_DEFAULT_SET@h
168 ori %r4, %r4, HID0_E500MC_DEFAULT_SET@l
171 cmpli 0, 0, %r3, FSL_E5500
174 lis %r4, HID0_E5500_DEFAULT_SET@h
175 ori %r4, %r4, HID0_E5500_DEFAULT_SET@l
182 * E500mc and E5500 do not have HID1 register, so skip HID1 setup on
185 cmpli 0, 0, %r3, FSL_E500mc
187 cmpli 0, 0, %r3, FSL_E5500
189 cmpli 0, 0, %r3, FSL_E6500
192 lis %r3, HID1_E500_DEFAULT_SET@h
193 ori %r3, %r3, HID1_E500_DEFAULT_SET@l
197 /* Invalidate all entries in TLB0 */
205 * Locate the TLB1 entry that maps this code
209 bl tlb1_find_current /* the entry found is returned in r29 */
211 bl tlb1_inval_all_but_current
214 * Create temporary mapping in AS=1 and switch to it
216 bl tlb1_temp_mapping_as1
219 ori %r3, %r3, (PSL_IS | PSL_DS)
222 addi %r4, %r4, (3f - 2b)
225 rfi /* Switch context */
228 * Invalidate initial entry
235 * Setup final mapping in TLB1[1] and switch to it
237 /* Final kernel mapping, map in 64 MB of RAM */
238 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
239 li %r4, 0 /* Entry 0 */
240 rlwimi %r3, %r4, 16, 10, 15
244 li %r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
245 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
246 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
249 LOAD_ADDR(%r3, VM_MIN_KERNEL_ADDRESS)
250 ori %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
254 /* Discover phys load address */
256 3: mflr %r4 /* Use current address */
257 rlwinm %r4, %r4, 0, 0, 5 /* 64MB alignment mask */
258 ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
259 mtspr SPR_MAS3, %r4 /* Set RPN and protection */
268 /* Switch to the above TLB1[1] mapping */
275 rlwinm %r4, %r4, 0, 6, 31 /* Current offset from kernel load address */
276 rlwinm %r3, %r3, 0, 0, 19
278 add %r4, %r4, %r3 /* Convert to kernel virtual address */
279 addi %r4, %r4, (5f - 4b)
280 li %r3, PSL_DE /* Note AS=0 */
282 oris %r3, %r3, PSL_CM@h
289 * Invalidate temp mapping
298 /* Set up the TOC pointer */
303 .llong __tocbase + 0x8000 - .
310 /* Get load offset */
311 ld %r31,-0x8000(%r2) /* First TOC entry is TOC base */
312 subf %r31,%r31,%r2 /* Subtract from real TOC base to get base */
314 /* Set up the stack pointer */
316 .llong tmpstack + TMPSTACKSZ - 96 - .
331 * Setup a temporary stack
338 addi %r1, %r1, (TMPSTACKSZ - 16)
345 .long _GLOBAL_OFFSET_TABLE_-.
347 lwz %r3,0(%r5) /* _DYNAMIC in %r3 */
349 lwz %r4,4(%r5) /* GOT pointer */
351 lwz %r4,4(%r4) /* got[0] is _DYNAMIC link addr */
352 subf %r4,%r4,%r3 /* subtract to calculate relocbase */
354 bl CNAME(elf_reloc_self)
358 * Initialise exception vector offsets
364 * Set up arguments and jump to system initialization code
373 /* Switch to thread0.td_kstack now */
378 /* Machine independet part, does not return */
386 /************************************************************************/
388 /************************************************************************/
394 * The boot page is a special page of memory used during AP bringup.
395 * Before the AP comes out of reset, the physical 4K page holding this
396 * code is arranged to be mapped at 0xfffff000 by use of
397 * platform-dependent registers.
399 * Alternatively, this page may be executed using an ePAPR-standardized
400 * method -- writing to the address specified in "cpu-release-addr".
402 * In either case, execution begins at the last instruction of the
403 * page, which is a branch back to the start of the page.
405 * The code in the page must do initial MMU setup and normalize the
406 * TLBs for regular operation in the correct address space before
407 * reading outside the page.
409 * This implementation accomplishes this by:
410 * 1) Wiping TLB0 and all TLB1 entries but the one currently in use.
411 * 2) Establishing a temporary 4K TLB1 mapping in AS=1, and switching
412 * to it with rfi. This entry must NOT be in TLB1 slot 0.
413 * (This is needed to give the code freedom to clean up AS=0.)
414 * 3) Removing the initial TLB1 entry, leaving us with a single valid
415 * TLB1 entry, NOT in slot 0.
416 * 4) Installing an AS0 entry in TLB1 slot 0 mapping the 64MB kernel
417 * segment at its final virtual address. A second rfi is done to
418 * switch to the final address space. At this point we can finally
419 * access the rest of the kernel segment safely.
420 * 5) The temporary TLB1 AS=1 entry is removed, finally leaving us in
421 * a consistent (but minimal) state.
422 * 6) Set up TOC, stack, and pcpu registers.
423 * 7) Now that we can finally call C code, call pmap_boostrap_ap(),
424 * which finishes copying in the shared TLB1 entries.
426 * At this point, the MMU is fully set up, and we can proceed with
427 * running the actual AP bootstrap code.
429 * Pieces of this code are also used for UP kernel, but in this case
430 * the sections specific to boot page functionality are dropped by
434 nop /* PPC64 alignment word. 64-bit target. */
436 bl 1f /* 32-bit target. */
440 ADDR(0) /* Trace pointer (%r31). */
444 .llong 0 /* Kern phys. load address. */
448 ADDR(0) /* Virt. address of __boot_page. */
451 * Initial configuration
454 mflr %r31 /* r31 hold the address of bp_trace */
458 rlwinm %r3, %r3, 16, 16, 31
460 /* HID0 for E500 is default */
461 lis %r4, HID0_E500_DEFAULT_SET@h
462 ori %r4, %r4, HID0_E500_DEFAULT_SET@l
464 cmpli 0, 0, %r3, FSL_E500mc
466 lis %r4, HID0_E500MC_DEFAULT_SET@h
467 ori %r4, %r4, HID0_E500MC_DEFAULT_SET@l
470 cmpli 0, 0, %r3, FSL_E5500
472 lis %r4, HID0_E5500_DEFAULT_SET@h
473 ori %r4, %r4, HID0_E5500_DEFAULT_SET@l
478 /* Enable branch prediction */
483 /* Invalidate all entries in TLB0 */
488 * Find TLB1 entry which is translating us now
492 bl tlb1_find_current /* the entry number found is in r29 */
494 bl tlb1_inval_all_but_current
497 * Create temporary translation in AS=1 and switch to it
500 bl tlb1_temp_mapping_as1
503 ori %r3, %r3, (PSL_IS | PSL_DS)
505 oris %r3, %r3, PSL_CM@h /* Ensure we're in 64-bit after RFI */
509 addi %r4, %r4, (4f - 3b)
512 rfi /* Switch context */
515 * Invalidate initial entry
522 * Setup final mapping in TLB1[0] and switch to it
524 /* Final kernel mapping, map in 64 MB of RAM */
525 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
526 li %r4, 0 /* Entry 0 */
527 rlwimi %r3, %r4, 16, 4, 15
531 li %r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
532 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
533 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
536 LOAD_ADDR(%r3, VM_MIN_KERNEL_ADDRESS)
537 ori %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
541 /* Retrieve kernel load [physical] address from bp_kernload */
545 clrrdi %r3, %r3, PAGE_SHIFT /* trunc_page(%r3) */
547 clrrwi %r3, %r3, PAGE_SHIFT /* trunc_page(%r3) */
549 /* Load lower half of the kernel loadaddr. */
550 lwz %r4, (bp_kernload - __boot_page + 4)(%r3)
551 LOAD %r5, (bp_virtaddr - __boot_page)(%r3)
553 /* Set RPN and protection */
554 ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
557 lwz %r4, (bp_kernload - __boot_page)(%r3)
564 /* Switch to the final mapping */
567 rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */
568 add %r3, %r3, %r5 /* Make this a virtual address */
569 addi %r3, %r3, (7f - 6b) /* And figure out return address. */
571 lis %r4, PSL_CM@h /* Note AS=0 */
573 li %r4, 0 /* Note AS=0 */
581 * At this point we're running at virtual addresses VM_MIN_KERNEL_ADDRESS and
582 * beyond so it's allowed to directly access all locations the kernel was linked
587 * Invalidate temp mapping
593 /* Set up the TOC pointer */
598 .llong __tocbase + 0x8000 - .
604 /* Set up the stack pointer */
605 addis %r1,%r2,TOC_REF(tmpstack)@ha
606 ld %r1,TOC_REF(tmpstack)@l(%r1)
607 addi %r1,%r1,TMPSTACKSZ-96
610 * Setup a temporary stack
618 addi %r1, %r1, (TMPSTACKSZ - 16)
622 * Initialise exception vector offsets
628 * Assign our pcpu instance
638 bl CNAME(pmap_bootstrap_ap)
641 bl CNAME(cpudep_ap_bootstrap)
643 /* Switch to the idle thread's kstack */
646 bl CNAME(machdep_ap_bootstrap)
653 #if defined (BOOKE_E500)
655 * Invalidate all entries in the given TLB.
660 rlwinm %r3, %r3, 3, (1 << 3) /* TLBSEL */
661 ori %r3, %r3, (1 << 2) /* INVALL */
671 * expects address to look up in r3, returns entry number in r29
673 * FIXME: the hidden assumption is we are now running in AS=0, but we should
674 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
678 slwi %r17, %r17, MAS6_SPID0_SHIFT
683 rlwinm %r29, %r17, 16, 26, 31 /* MAS0[ESEL] -> r29 */
685 /* Make sure we have IPROT set on the entry */
687 oris %r17, %r17, MAS1_IPROT@h
696 * Invalidates a single entry in TLB1.
702 lis %r4, MAS0_TLBSEL1@h /* Select TLB1 */
703 rlwimi %r4, %r3, 16, 10, 15 /* Select our entry */
707 li %r5, 0 /* MAS1[V] = 0 */
716 * r29 current entry number
717 * r28 returned temp entry
720 tlb1_temp_mapping_as1:
721 /* Read our current translation */
722 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
723 rlwimi %r3, %r29, 16, 10, 15 /* Select our current entry */
729 * Prepare and write temp entry
731 * FIXME this is not robust against overflow i.e. when the current
732 * entry is the last in TLB1
734 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
735 addi %r28, %r29, 1 /* Use next entry. */
736 rlwimi %r3, %r28, 16, 10, 15 /* Select temp entry */
741 rlwimi %r5, %r4, 12, 19, 19
742 li %r4, 0 /* Global mapping, TID=0 */
743 rlwimi %r5, %r4, 16, 8, 15
744 oris %r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
758 * Loops over TLB1, invalidates all entries skipping the one which currently
764 tlb1_inval_all_but_current:
765 mfspr %r3, SPR_TLB1CFG /* Get number of entries */
766 andi. %r3, %r3, TLBCFG_NENTRY_MASK@l
767 li %r4, 0 /* Start from Entry 0 */
768 1: lis %r5, MAS0_TLBSEL1@h
769 rlwimi %r5, %r4, 16, 10, 15
774 cmpw %r4, %r29 /* our current entry? */
776 rlwinm %r5, %r5, 0, 2, 31 /* clear VALID and IPROT bits */
783 cmpw %r4, %r3 /* Check if this is the last entry */
791 * The __boot_tlb1 table is used to hold BSP TLB1 entries
792 * marked with _TLB_ENTRY_SHARED flag during AP bootstrap.
793 * The BSP fills in the table in tlb_ap_prep() function. Next,
794 * AP loads its contents to TLB1 hardware in pmap_bootstrap_ap().
797 .space TLB1_MAX_ENTRIES * TLB_ENTRY_SIZE
801 * Boot page needs to be exactly 4K, with the last word of this page
802 * acting as the reset vector, so we need to stuff the remainder.
803 * Upon release from holdoff CPU fetches the last word of the boot
806 .space 4092 - (__boot_page_padding - __boot_page)
809 * This is the end of the boot page.
810 * During AP startup, the previous instruction is at 0xfffffffc
811 * virtual (i.e. the reset vector.)
815 /************************************************************************/
816 /* locore subroutines */
817 /************************************************************************/
820 * Cache disable/enable/inval sequences according
821 * to section 2.16 of E500CORE RM.
824 /* Invalidate d-cache */
825 mfspr %r3, SPR_L1CSR0
826 ori %r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
829 mtspr SPR_L1CSR0, %r3
831 1: mfspr %r3, SPR_L1CSR0
832 andi. %r3, %r3, L1CSR0_DCFI
837 ENTRY(dcache_disable)
838 /* Disable d-cache */
839 mfspr %r3, SPR_L1CSR0
845 mtspr SPR_L1CSR0, %r3
852 mfspr %r3, SPR_L1CSR0
853 oris %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
854 ori %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
857 mtspr SPR_L1CSR0, %r3
863 /* Invalidate i-cache */
864 mfspr %r3, SPR_L1CSR1
865 ori %r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
867 mtspr SPR_L1CSR1, %r3
869 1: mfspr %r3, SPR_L1CSR1
870 andi. %r3, %r3, L1CSR1_ICFI
875 ENTRY(icache_disable)
876 /* Disable i-cache */
877 mfspr %r3, SPR_L1CSR1
882 mtspr SPR_L1CSR1, %r3
889 mfspr %r3, SPR_L1CSR1
890 oris %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
891 ori %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
893 mtspr SPR_L1CSR1, %r3
899 * L2 cache disable/enable/inval sequences for E500mc.
903 mfspr %r3, SPR_L2CSR0
904 oris %r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@h
905 ori %r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@l
907 mtspr SPR_L2CSR0, %r3
909 1: mfspr %r3, SPR_L2CSR0
910 andis. %r3, %r3, L2CSR0_L2FI@h
915 ENTRY(l2cache_enable)
916 mfspr %r3, SPR_L2CSR0
917 oris %r3, %r3, (L2CSR0_L2E | L2CSR0_L2PE)@h
919 mtspr SPR_L2CSR0, %r3
925 * Branch predictor setup.
929 ori %r3, %r3, BUCSR_BBFI
933 ori %r3, %r3, BUCSR_BPEN
941 * XXX: This should be moved to a shared AIM/booke asm file, if one ever is
945 /* Note: The spr number is patched at runtime */
950 /************************************************************************/
952 /************************************************************************/
955 GLOBAL(__startkernel)
963 .space 10240 /* XXX: this really should not be necessary */
967 TOC_ENTRY(bp_kernload)
972 * Compiled KERNBASE locations
975 .set kernbase, KERNBASE
977 #include <powerpc/booke/trap_subr.S>