2 * Copyright (c) 2012-2014 Andrew Turner
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include "opt_kstack_pages.h"
31 #include <sys/syscall.h>
32 #include <machine/asm.h>
33 #include <machine/armreg.h>
34 #include <machine/hypervisor.h>
35 #include <machine/param.h>
36 #include <machine/pte.h>
37 #include <machine/vm.h>
38 #include <machine/vmparam.h>
41 #define DMAP_TABLES ((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
44 .set kernbase, KERNBASE
47 /* U-Boot booti related constants. */
48 #if defined(LINUX_BOOT_ABI)
49 #define FDT_MAGIC 0xEDFE0DD0 /* FDT blob Magic */
51 #ifndef UBOOT_IMAGE_OFFSET
52 #define UBOOT_IMAGE_OFFSET 0 /* Image offset from start of */
53 #endif /* 2 MiB page */
55 #ifndef UBOOT_IMAGE_SIZE /* Total size of image */
56 #define UBOOT_IMAGE_SIZE _end - _start
59 #ifndef UBOOT_IMAGE_FLAGS
60 #define UBOOT_IMAGE_FLAGS 0 /* LE kernel, unspecified */
61 #endif /* page size */
62 #endif /* defined(LINUX_BOOT_ABI) */
66 * MMU on with an identity map, or off
69 * We are loaded at a 2MiB aligned address
75 #if defined(LINUX_BOOT_ABI)
76 /* U-boot image header */
79 .quad UBOOT_IMAGE_OFFSET /* Image offset in 2 MiB page, LE */
80 .quad UBOOT_IMAGE_SIZE /* Image size, LE */
81 .quad UBOOT_IMAGE_FLAGS /* Flags for kernel. LE */
82 .quad 0 /* Reserved */
83 .quad 0 /* Reserved */
84 .quad 0 /* Reserved */
85 .long 0x644d5241 /* Magic "ARM\x64", LE */
86 .long 0 /* Reserved for PE COFF offset*/
88 #endif /* defined(LINUX_BOOT_ABI) */
94 * Disable the MMU. We may have entered the kernel with it on and
95 * will need to update the tables later. If this has been set up
96 * with anything other than a VA == PA map then this will fail,
97 * but in this case the code to find where we are running from
98 * would have also failed.
106 /* Set the context id */
107 msr contextidr_el1, xzr
109 /* Get the virt -> phys offset */
115 * x28 = Our physical load address
118 /* Create the page tables */
124 * x26 = Kernel L1 table
131 /* Jump to the virtual address space */
136 /* Set up the stack */
137 adr x25, initstack_end
139 sub sp, sp, #PCB_SIZE
149 /* Backup the module pointer */
152 /* Make the page table base a virtual address */
156 sub sp, sp, #BOOTPARAMS_SIZE
159 /* Degate the delda so it is VA -> PA */
162 str x1, [x0, #BP_MODULEP]
163 str x26, [x0, #BP_KERN_L1PT]
164 str x29, [x0, #BP_KERN_DELTA]
166 str x25, [x0, #BP_KERN_STACK]
167 str x24, [x0, #BP_KERN_L0PT]
168 str x23, [x0, #BP_BOOT_EL]
170 /* trace back starts here */
172 /* Branch to C code */
176 /* We should not get here */
189 * mpentry(unsigned long)
191 * Called by a core when it is being brought online.
192 * The data in x0 is passed straight to init_secondary.
195 /* Disable interrupts */
201 /* Set the context id */
202 msr contextidr_el1, xzr
204 /* Load the kernel page table */
205 adr x24, pagetable_l0_ttbr1
206 /* Load the identity page table */
207 adr x27, pagetable_l0_ttbr0
212 /* Jump to the virtual address space */
213 ldr x15, =mp_virtdone
217 ldr x4, =secondary_stacks
218 mov x5, #(PAGE_SIZE * KSTACK_PAGES)
227 * If we are started in EL2, configure the required hypervisor
228 * registers and drop to EL1.
237 /* Configure the Hypervisor */
241 /* Load the Virtualization Process ID Register */
245 /* Load the Virtualization Multiprocess ID Register */
249 /* Set the bits that need to be 1 in sctlr_el1 */
253 /* Don't trap to EL2 for exceptions */
257 /* Don't trap to EL2 for CP15 traps */
260 /* Enable access to the physical timers at EL1 */
262 orr x2, x2, #(CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN)
265 /* Set the counter offset to a known value */
268 /* Hypervisor trap functions */
272 mov x2, #(PSR_F | PSR_I | PSR_A | PSR_D | PSR_M_EL1h)
275 /* Configure GICv3 CPU interface */
276 mrs x2, id_aa64pfr0_el1
277 /* Extract GIC bits from the register */
278 ubfx x2, x2, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_BITS
279 /* GIC[3:0] == 0001 - GIC CPU interface via special regs. supported */
280 cmp x2, #(ID_AA64PFR0_GIC_CPUIF_EN >> ID_AA64PFR0_GIC_SHIFT)
284 orr x2, x2, #ICC_SRE_EL2_EN /* Enable access from insecure EL1 */
285 orr x2, x2, #ICC_SRE_EL2_SRE /* Enable system registers */
289 /* Set the address to return to our return address */
305 VECT_EMPTY /* Synchronous EL2t */
306 VECT_EMPTY /* IRQ EL2t */
307 VECT_EMPTY /* FIQ EL2t */
308 VECT_EMPTY /* Error EL2t */
310 VECT_EMPTY /* Synchronous EL2h */
311 VECT_EMPTY /* IRQ EL2h */
312 VECT_EMPTY /* FIQ EL2h */
313 VECT_EMPTY /* Error EL2h */
315 VECT_EMPTY /* Synchronous 64-bit EL1 */
316 VECT_EMPTY /* IRQ 64-bit EL1 */
317 VECT_EMPTY /* FIQ 64-bit EL1 */
318 VECT_EMPTY /* Error 64-bit EL1 */
320 VECT_EMPTY /* Synchronous 32-bit EL1 */
321 VECT_EMPTY /* IRQ 32-bit EL1 */
322 VECT_EMPTY /* FIQ 32-bit EL1 */
323 VECT_EMPTY /* Error 32-bit EL1 */
326 * Get the delta between the physical address we were loaded to and the
327 * virtual address we expect to run from. This is used when building the
328 * initial page table.
331 /* Load the physical address of virt_map */
333 /* Load the virtual address of virt_map stored in virt_map */
335 /* Find PA - VA as PA' = VA' - VA + PA = VA' + (PA - VA) = VA' + x29 */
337 /* Find the load address for the kernel */
347 * This builds the page tables containing the identity map, and the kernel
351 * We were loaded to an address that is on a 2MiB boundary
352 * All the memory must not cross a 1GiB boundaty
353 * x28 contains the physical address we were loaded from
355 * TODO: This is out of date.
356 * There are at least 5 pages before that address for the page tables
357 * The pages used are:
358 * - The Kernel L2 table
359 * - The Kernel L1 table
360 * - The Kernel L0 table (TTBR1)
361 * - The identity (PA = VA) L1 table
362 * - The identity (PA = VA) L0 table (TTBR0)
363 * - The DMAP L1 tables
366 /* Save the Link register */
369 /* Clean the page table */
372 adr x27, pagetable_end
374 stp xzr, xzr, [x6], #16
375 stp xzr, xzr, [x6], #16
376 stp xzr, xzr, [x6], #16
377 stp xzr, xzr, [x6], #16
382 * Build the TTBR1 maps.
385 /* Find the size of the kernel */
388 #if defined(LINUX_BOOT_ABI)
389 /* X19 is used as 'map FDT data' flag */
392 /* No modules or FDT pointer ? */
395 /* Test if modulep points to modules descriptor or to FDT */
402 /* Booted with modules pointer */
403 /* Find modulep - begin */
405 /* Add two 2MiB pages for the module data and round up */
406 ldr x7, =(3 * L2_SIZE - 1)
410 #if defined(LINUX_BOOT_ABI)
412 /* Booted by U-Boot booti with FDT data */
413 /* Set 'map FDT data' flag */
417 /* Booted by U-Boot booti without FTD data */
418 /* Find the end - begin */
423 * Add one 2MiB page for copy of FDT data (maximum FDT size),
424 * one for metadata and round up
426 ldr x7, =(3 * L2_SIZE - 1)
431 /* Get the number of l2 pages to allocate, rounded down */
432 lsr x10, x8, #(L2_SHIFT)
434 /* Create the kernel space L2 table */
436 mov x7, #VM_MEMATTR_WRITE_BACK
437 mov x8, #(KERNBASE & L2_BLOCK_MASK)
439 bl build_l2_block_pagetable
441 /* Move to the l1 table */
442 add x26, x26, #PAGE_SIZE
444 /* Link the l1 -> l2 table */
449 /* Move to the l0 table */
450 add x24, x26, #PAGE_SIZE
452 /* Link the l0 -> l1 table */
458 /* Link the DMAP tables */
459 ldr x8, =DMAP_MIN_ADDRESS
460 adr x9, pagetable_dmap;
461 mov x10, #DMAP_TABLES
465 * Build the TTBR0 maps. As TTBR0 maps, they must specify ATTR_S1_nG.
466 * They are only needed early on, so the VA = PA map is uncached.
468 add x27, x24, #PAGE_SIZE
470 mov x6, x27 /* The initial page table */
471 #if defined(SOCDEV_PA) && defined(SOCDEV_VA)
472 /* Create a table for the UART */
473 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_DEVICE))
474 mov x8, #(SOCDEV_VA) /* VA start */
475 mov x9, #(SOCDEV_PA) /* PA start */
477 bl build_l1_block_pagetable
480 #if defined(LINUX_BOOT_ABI)
484 /* Create the identity mapping for FDT data (2 MiB max) */
485 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_UNCACHEABLE))
487 mov x8, x0 /* VA start (== PA start) */
489 bl build_l1_block_pagetable
494 /* Create the VA = PA map */
495 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_UNCACHEABLE))
497 mov x8, x9 /* VA start (== PA start) */
499 bl build_l1_block_pagetable
501 /* Move to the l0 table */
502 add x27, x27, #PAGE_SIZE
504 /* Link the l0 -> l1 table */
510 /* Restore the Link register */
515 * Builds an L0 -> L1 table descriptor
517 * This is a link for a 512GiB block of memory with up to 1GiB regions mapped
518 * within it by build_l1_block_pagetable.
521 * x8 = Virtual Address
522 * x9 = L1 PA (trashed)
524 * x11, x12 and x13 are trashed
528 * Link an L0 -> L1 table entry.
530 /* Find the table index */
531 lsr x11, x8, #L0_SHIFT
532 and x11, x11, #L0_ADDR_MASK
534 /* Build the L0 block entry */
537 /* Only use the output address bits */
538 lsr x9, x9, #PAGE_SHIFT
539 1: orr x13, x12, x9, lsl #PAGE_SHIFT
541 /* Store the entry */
542 str x13, [x6, x11, lsl #3]
552 * Builds an L1 -> L2 table descriptor
554 * This is a link for a 1GiB block of memory with up to 2MiB regions mapped
555 * within it by build_l2_block_pagetable.
558 * x8 = Virtual Address
559 * x9 = L2 PA (trashed)
560 * x11, x12 and x13 are trashed
564 * Link an L1 -> L2 table entry.
566 /* Find the table index */
567 lsr x11, x8, #L1_SHIFT
568 and x11, x11, #Ln_ADDR_MASK
570 /* Build the L1 block entry */
573 /* Only use the output address bits */
574 lsr x9, x9, #PAGE_SHIFT
575 orr x13, x12, x9, lsl #PAGE_SHIFT
577 /* Store the entry */
578 str x13, [x6, x11, lsl #3]
583 * Builds count 1 GiB page table entry
585 * x7 = Variable lower block attributes
587 * x9 = PA start (trashed)
589 * x11, x12 and x13 are trashed
591 build_l1_block_pagetable:
593 * Build the L1 table entry.
595 /* Find the table index */
596 lsr x11, x8, #L1_SHIFT
597 and x11, x11, #Ln_ADDR_MASK
599 /* Build the L1 block entry */
600 orr x12, x7, #L1_BLOCK
601 orr x12, x12, #(ATTR_AF)
603 orr x12, x12, ATTR_SH(ATTR_SH_IS)
606 /* Only use the output address bits */
607 lsr x9, x9, #L1_SHIFT
609 /* Set the physical address for this virtual address */
610 1: orr x13, x12, x9, lsl #L1_SHIFT
612 /* Store the entry */
613 str x13, [x6, x11, lsl #3]
623 * Builds count 2 MiB page table entry
625 * x7 = Type (0 = Device, 1 = Normal)
627 * x9 = PA start (trashed)
629 * x11, x12 and x13 are trashed
631 build_l2_block_pagetable:
633 * Build the L2 table entry.
635 /* Find the table index */
636 lsr x11, x8, #L2_SHIFT
637 and x11, x11, #Ln_ADDR_MASK
639 /* Build the L2 block entry */
641 orr x12, x12, #L2_BLOCK
642 orr x12, x12, #(ATTR_AF)
643 orr x12, x12, #(ATTR_S1_UXN)
645 orr x12, x12, ATTR_SH(ATTR_SH_IS)
648 /* Only use the output address bits */
649 lsr x9, x9, #L2_SHIFT
651 /* Set the physical address for this virtual address */
652 1: orr x13, x12, x9, lsl #L2_SHIFT
654 /* Store the entry */
655 str x13, [x6, x11, lsl #3]
667 /* Load the exception vectors */
668 ldr x2, =exception_vectors
671 /* Load ttbr0 and ttbr1 */
676 /* Clear the Monitor Debug System control register */
679 /* Invalidate the TLB */
686 * Setup TCR according to the PARange and ASIDBits fields
687 * from ID_AA64MMFR0_EL1 and the HAFDBS field from the
688 * ID_AA64MMFR1_EL1. More precisely, set TCR_EL1.AS
689 * to 1 only if the ASIDBits field equals 0b0010.
692 mrs x3, id_aa64mmfr0_el1
694 /* Copy the bottom 3 bits from id_aa64mmfr0_el1 into TCR.IPS */
695 bfi x2, x3, #(TCR_IPS_SHIFT), #(TCR_IPS_WIDTH)
696 and x3, x3, #(ID_AA64MMFR0_ASIDBits_MASK)
698 /* Check if the HW supports 16 bit ASIDS */
699 cmp x3, #(ID_AA64MMFR0_ASIDBits_16)
700 /* If so x3 == 1, else x3 == 0 */
702 /* Set TCR.AS with x3 */
703 bfi x2, x3, #(TCR_ASID_SHIFT), #(TCR_ASID_WIDTH)
706 * Check if the HW supports access flag and dirty state updates,
707 * and set TCR_EL1.HA and TCR_EL1.HD accordingly.
709 mrs x3, id_aa64mmfr1_el1
710 and x3, x3, #(ID_AA64MMFR1_HAFDBS_MASK)
713 orr x2, x2, #(TCR_HA)
718 orr x2, x2, #(TCR_HA | TCR_HD)
728 bic x1, x1, x3 /* Clear the required bits */
729 orr x1, x1, x2 /* Set the required bits */
737 .quad MAIR_ATTR(MAIR_DEVICE_nGnRnE, VM_MEMATTR_DEVICE) | \
738 MAIR_ATTR(MAIR_NORMAL_NC, VM_MEMATTR_UNCACHEABLE) | \
739 MAIR_ATTR(MAIR_NORMAL_WB, VM_MEMATTR_WRITE_BACK) | \
740 MAIR_ATTR(MAIR_NORMAL_WT, VM_MEMATTR_WRITE_THROUGH)
742 .quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_TG1_4K | \
743 TCR_CACHE_ATTRS | TCR_SMP_ATTRS)
746 .quad (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_UCI | SCTLR_SPAN | \
747 SCTLR_nTWE | SCTLR_nTWI | SCTLR_UCT | SCTLR_DZE | \
748 SCTLR_I | SCTLR_SED | SCTLR_SA0 | SCTLR_SA | SCTLR_C | \
749 SCTLR_M | SCTLR_CP15BEN)
752 .quad (SCTLR_EE | SCTLR_EOE | SCTLR_IESB | SCTLR_WXN | SCTLR_UMA | \
759 //.section .init_pagetable
760 .align 12 /* 4KiB aligned */
762 * 3 initial tables (in the following order):
763 * L2 for kernel (High addresses)
765 * L1 for user (Low addresses)
778 .globl pagetable_dmap
780 .space PAGE_SIZE * DMAP_TABLES
788 .quad pagetable /* XXX: Keep page tables VA */
792 .space (PAGE_SIZE * KSTACK_PAGES)
801 mov x8, #SYS_sigreturn
804 /* sigreturn failed, exit */
810 /* This may be copied to the stack, keep it 16-byte aligned */
818 .quad esigcode - sigcode
820 ENTRY(aarch32_sigcode)
821 .word 0xe1a0000d // mov r0, sp
822 .word 0xe2800040 // add r0, r0, #SIGF_UC
823 .word 0xe59f700c // ldr r7, [pc, #12]
824 .word 0xef000000 // swi #0
825 .word 0xe59f7008 // ldr r7, [pc, #8]
826 .word 0xef000000 // swi #0
827 .word 0xeafffffa // b . - 16
834 .global sz_aarch32_sigcode
836 .quad aarch32_esigcode - aarch32_sigcode