2 * Copyright (c) 2012-2014 Andrew Turner
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include "opt_kstack_pages.h"
29 #include <sys/syscall.h>
30 #include <machine/asm.h>
31 #include <machine/armreg.h>
32 #include <machine/hypervisor.h>
33 #include <machine/param.h>
34 #include <machine/pte.h>
35 #include <machine/vm.h>
36 #include <machine/vmparam.h>
40 #if PAGE_SIZE == PAGE_SIZE_16K
42 * The number of level 3 tables to create. 32 will allow for 1G of address
43 * space, the same as a single level 2 page with 4k pages.
45 #define L3_PAGE_COUNT 32
49 .set kernbase, KERNBASE
53 * MMU on with an identity map, or off
56 * We are loaded at a 2MiB aligned address
64 * Disable the MMU. We may have entered the kernel with it on and
65 * will need to update the tables later. If this has been set up
66 * with anything other than a VA == PA map then this will fail,
67 * but in this case the code to find where we are running from
68 * would have also failed.
76 /* Set the context id */
77 msr contextidr_el1, xzr
79 /* Get the virt -> phys offset */
85 * x28 = Our physical load address
88 /* Create the page tables */
94 * x26 = Kernel L1 table
101 /* Load the new ttbr0 pagetable */
102 adrp x27, pagetable_l0_ttbr0
103 add x27, x27, :lo12:pagetable_l0_ttbr0
105 /* Jump to the virtual address space */
110 /* Set up the stack */
111 adrp x25, initstack_end
112 add x25, x25, :lo12:initstack_end
114 sub sp, sp, #PCB_SIZE
124 #if defined(PERTHREAD_SSP)
125 /* Set sp_el0 to the boot canary for early per-thread SSP to work */
126 adrp x15, boot_canary
127 add x15, x15, :lo12:boot_canary
131 /* Backup the module pointer */
134 sub sp, sp, #BOOTPARAMS_SIZE
137 /* Degate the delda so it is VA -> PA */
140 str x1, [x0, #BP_MODULEP]
141 str x29, [x0, #BP_KERN_DELTA]
143 add x25, x25, :lo12:initstack
144 str x25, [x0, #BP_KERN_STACK]
145 str x27, [x0, #BP_KERN_TTBR0]
146 str x23, [x0, #BP_BOOT_EL]
147 str x4, [x0, #BP_HCR_EL2]
150 /* Save bootparams */
153 /* Bootstrap an early shadow map for the boot stack. */
154 bl pmap_san_bootstrap
156 /* Restore bootparams */
160 /* trace back starts here */
162 /* Branch to C code */
164 /* We are done with the boot params */
165 add sp, sp, #BOOTPARAMS_SIZE
168 * Enable pointer authentication in the kernel. We set the keys for
169 * thread0 in initarm so have to wait until it returns to enable it.
170 * If we were to enable it in initarm then any authentication when
171 * returning would fail as it was called with pointer authentication
178 /* We should not get here */
192 * mpentry(unsigned long)
194 * Called by a core when it is being brought online.
195 * The data in x0 is passed straight to init_secondary.
198 /* Disable interrupts */
199 msr daifset, #DAIF_INTR
204 /* Set the context id */
205 msr contextidr_el1, xzr
207 /* Load the kernel page table */
208 adrp x24, pagetable_l0_ttbr1
209 add x24, x24, :lo12:pagetable_l0_ttbr1
210 /* Load the identity page table */
211 adrp x27, pagetable_l0_ttbr0_boostrap
212 add x27, x27, :lo12:pagetable_l0_ttbr0_boostrap
217 /* Load the new ttbr0 pagetable */
218 adrp x27, pagetable_l0_ttbr0
219 add x27, x27, :lo12:pagetable_l0_ttbr0
221 /* Jump to the virtual address space */
222 ldr x15, =mp_virtdone
226 /* Start using the AP boot stack */
231 #if defined(PERTHREAD_SSP)
232 /* Set sp_el0 to the boot canary for early per-thread SSP to work */
233 adrp x15, boot_canary
234 add x15, x15, :lo12:boot_canary
238 /* Load the kernel ttbr0 pagetable */
242 /* Invalidate the TLB */
252 * If we are started in EL2, configure the required hypervisor
253 * registers and drop to EL1.
263 * Disable the MMU. If the HCR_EL2.E2H field is set we will clear it
264 * which may break address translation.
272 /* Configure the Hypervisor */
273 ldr x2, =(HCR_RW | HCR_APK | HCR_API)
276 /* Stash value of HCR_EL2 for later */
280 /* Load the Virtualization Process ID Register */
284 /* Load the Virtualization Multiprocess ID Register */
288 /* Set the bits that need to be 1 in sctlr_el1 */
293 * On some hardware, e.g., Apple M1, we can't clear E2H, so make sure we
294 * don't trap to EL2 for SIMD register usage to have at least a
295 * minimally usable system.
298 mov x3, #CPTR_RES1 /* HCR_E2H == 0 */
299 mov x5, #CPTR_FPEN /* HCR_E2H == 1 */
303 /* Don't trap to EL2 for CP15 traps */
306 /* Enable access to the physical timers at EL1 */
308 orr x2, x2, #(CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN)
311 /* Set the counter offset to a known value */
314 /* Hypervisor trap functions */
315 adrp x2, hyp_stub_vectors
316 add x2, x2, :lo12:hyp_stub_vectors
319 /* Zero vttbr_el2 so a hypervisor can tell the host and guest apart */
322 mov x2, #(PSR_DAIF | PSR_M_EL1h)
325 /* Configure GICv3 CPU interface */
326 mrs x2, id_aa64pfr0_el1
327 /* Extract GIC bits from the register */
328 ubfx x2, x2, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_BITS
329 /* GIC[3:0] == 0001 - GIC CPU interface via special regs. supported */
330 cmp x2, #(ID_AA64PFR0_GIC_CPUIF_EN >> ID_AA64PFR0_GIC_SHIFT)
334 orr x2, x2, #ICC_SRE_EL2_EN /* Enable access from insecure EL1 */
335 orr x2, x2, #ICC_SRE_EL2_SRE /* Enable system registers */
339 /* Set the address to return to our return address */
351 * Get the delta between the physical address we were loaded to and the
352 * virtual address we expect to run from. This is used when building the
353 * initial page table.
355 LENTRY(get_virt_delta)
356 /* Load the physical address of virt_map */
358 add x29, x29, :lo12:virt_map
359 /* Load the virtual address of virt_map stored in virt_map */
361 /* Find PA - VA as PA' = VA' - VA + PA = VA' + (PA - VA) = VA' + x29 */
363 /* Find the load address for the kernel */
374 * This builds the page tables containing the identity map, and the kernel
378 * We were loaded to an address that is on a 2MiB boundary
379 * All the memory must not cross a 1GiB boundaty
380 * x28 contains the physical address we were loaded from
382 * TODO: This is out of date.
383 * There are at least 5 pages before that address for the page tables
384 * The pages used are:
385 * - The Kernel L2 table
386 * - The Kernel L1 table
387 * - The Kernel L0 table (TTBR1)
388 * - The identity (PA = VA) L1 table
389 * - The identity (PA = VA) L0 table (TTBR0)
391 LENTRY(create_pagetables)
392 /* Save the Link register */
395 /* Clean the page table */
397 add x6, x6, :lo12:pagetable
399 adrp x27, pagetable_end
400 add x27, x27, :lo12:pagetable_end
402 stp xzr, xzr, [x6], #16
403 stp xzr, xzr, [x6], #16
404 stp xzr, xzr, [x6], #16
405 stp xzr, xzr, [x6], #16
410 * Build the TTBR1 maps.
413 /* Find the size of the kernel */
416 #if defined(LINUX_BOOT_ABI)
417 /* X19 is used as 'map FDT data' flag */
420 /* No modules or FDT pointer ? */
424 * Test if x0 points to modules descriptor(virtual address) or
425 * to FDT (physical address)
427 cmp x0, x6 /* x6 is #(KERNBASE) */
431 /* Booted with modules pointer */
432 /* Find modulep - begin */
435 * Add space for the module data. When PAGE_SIZE is 4k this will
436 * add at least 2 level 2 blocks (2 * 2MiB). When PAGE_SIZE is
437 * larger it will be at least as large as we use smaller level 3
440 ldr x7, =((6 * 1024 * 1024) - 1)
444 #if defined(LINUX_BOOT_ABI)
446 /* Booted by U-Boot booti with FDT data */
447 /* Set 'map FDT data' flag */
451 /* Booted by U-Boot booti without FTD data */
452 /* Find the end - begin */
457 * Add one 2MiB page for copy of FDT data (maximum FDT size),
458 * one for metadata and round up
460 ldr x7, =(3 * L2_SIZE - 1)
465 #if PAGE_SIZE != PAGE_SIZE_4K
467 * Create L3 pages. The kernel will be loaded at a 2M aligned
468 * address, however L2 blocks are too large when the page size is
469 * not 4k to map the kernel with such an aligned address. However,
470 * when the page size is larger than 4k, L2 blocks are too large to
471 * map the kernel with such an alignment.
474 /* Get the number of l3 pages to allocate, rounded down */
475 lsr x10, x8, #(L3_SHIFT)
477 /* Create the kernel space L2 table */
479 mov x7, #(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK))
482 bl build_l3_page_pagetable
484 /* Move to the l2 table */
485 ldr x9, =(PAGE_SIZE * L3_PAGE_COUNT)
488 /* Link the l2 -> l3 table */
493 /* Get the number of l2 pages to allocate, rounded down */
494 lsr x10, x8, #(L2_SHIFT)
496 /* Create the kernel space L2 table */
498 mov x7, #(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK))
501 bl build_l2_block_pagetable
504 /* Move to the l1 table */
505 add x26, x26, #PAGE_SIZE
507 /* Link the l1 -> l2 table */
512 /* Move to the l0 table */
513 add x24, x26, #PAGE_SIZE
515 /* Link the l0 -> l1 table */
522 * Build the TTBR0 maps. As TTBR0 maps, they must specify ATTR_S1_nG.
523 * They are only needed early on, so the VA = PA map is uncached.
525 add x27, x24, #PAGE_SIZE
527 mov x6, x27 /* The initial page table */
529 /* Create the VA = PA map */
530 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK))
532 and x16, x16, #(~L2_OFFSET)
533 mov x9, x16 /* PA start */
534 mov x8, x16 /* VA start (== PA start) */
536 bl build_l2_block_pagetable
538 #if defined(SOCDEV_PA)
539 /* Create a table for the UART */
540 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_DEVICE))
542 add x16, x16, x9 /* VA start */
545 /* Store the socdev virtual address */
546 add x17, x8, #(SOCDEV_PA & L2_OFFSET)
548 str x17, [x9, :lo12:socdev_va]
550 mov x9, #(SOCDEV_PA & ~L2_OFFSET) /* PA start */
552 bl build_l2_block_pagetable
555 #if defined(LINUX_BOOT_ABI)
559 /* Create the mapping for FDT data (2 MiB max) */
560 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK))
562 add x16, x16, x9 /* VA start */
564 mov x9, x0 /* PA start */
565 /* Update the module pointer to point at the allocated memory */
566 and x0, x0, #(L2_OFFSET) /* Keep the lower bits */
567 add x0, x0, x8 /* Add the aligned virtual address */
570 bl build_l2_block_pagetable
575 /* Move to the l1 table */
576 add x27, x27, #PAGE_SIZE
578 /* Link the l1 -> l2 table */
583 /* Move to the l0 table */
584 add x27, x27, #PAGE_SIZE
586 /* Link the l0 -> l1 table */
592 /* Restore the Link register */
595 LEND(create_pagetables)
598 * Builds an L0 -> L1 table descriptor
601 * x8 = Virtual Address
602 * x9 = L1 PA (trashed)
603 * x10 = Entry count (trashed)
604 * x11, x12 and x13 are trashed
606 LENTRY(link_l0_pagetable)
608 * Link an L0 -> L1 table entry.
610 /* Find the table index */
611 lsr x11, x8, #L0_SHIFT
612 and x11, x11, #L0_ADDR_MASK
614 /* Build the L0 block entry */
616 orr x12, x12, #(TATTR_UXN_TABLE | TATTR_AP_TABLE_NO_EL0)
618 /* Only use the output address bits */
619 lsr x9, x9, #PAGE_SHIFT
620 1: orr x13, x12, x9, lsl #PAGE_SHIFT
622 /* Store the entry */
623 str x13, [x6, x11, lsl #3]
631 LEND(link_l0_pagetable)
634 * Builds an L1 -> L2 table descriptor
637 * x8 = Virtual Address
638 * x9 = L2 PA (trashed)
639 * x11, x12 and x13 are trashed
641 LENTRY(link_l1_pagetable)
643 * Link an L1 -> L2 table entry.
645 /* Find the table index */
646 lsr x11, x8, #L1_SHIFT
647 and x11, x11, #Ln_ADDR_MASK
649 /* Build the L1 block entry */
652 /* Only use the output address bits */
653 lsr x9, x9, #PAGE_SHIFT
654 orr x13, x12, x9, lsl #PAGE_SHIFT
656 /* Store the entry */
657 str x13, [x6, x11, lsl #3]
660 LEND(link_l1_pagetable)
663 * Builds count 2 MiB page table entry
665 * x7 = Block attributes
667 * x9 = PA start (trashed)
668 * x10 = Entry count (trashed)
669 * x11, x12 and x13 are trashed
671 LENTRY(build_l2_block_pagetable)
673 * Build the L2 table entry.
675 /* Find the table index */
676 lsr x11, x8, #L2_SHIFT
677 and x11, x11, #Ln_ADDR_MASK
679 /* Build the L2 block entry */
680 orr x12, x7, #L2_BLOCK
681 orr x12, x12, #(ATTR_DEFAULT)
682 orr x12, x12, #(ATTR_S1_UXN)
684 /* Only use the output address bits */
685 lsr x9, x9, #L2_SHIFT
687 /* Set the physical address for this virtual address */
688 1: orr x13, x12, x9, lsl #L2_SHIFT
690 /* Store the entry */
691 str x13, [x6, x11, lsl #3]
699 LEND(build_l2_block_pagetable)
701 #if PAGE_SIZE != PAGE_SIZE_4K
703 * Builds an L2 -> L3 table descriptor
706 * x8 = Virtual Address
707 * x9 = L3 PA (trashed)
708 * x11, x12 and x13 are trashed
710 LENTRY(link_l2_pagetable)
712 * Link an L2 -> L3 table entry.
714 /* Find the table index */
715 lsr x11, x8, #L2_SHIFT
716 and x11, x11, #Ln_ADDR_MASK
718 /* Build the L1 block entry */
721 /* Only use the output address bits */
722 lsr x9, x9, #PAGE_SHIFT
723 orr x13, x12, x9, lsl #PAGE_SHIFT
725 /* Store the entry */
726 str x13, [x6, x11, lsl #3]
729 LEND(link_l2_pagetable)
732 * Builds count level 3 page table entries
734 * x7 = Block attributes
736 * x9 = PA start (trashed)
737 * x10 = Entry count (trashed)
738 * x11, x12 and x13 are trashed
740 LENTRY(build_l3_page_pagetable)
742 * Build the L3 table entry.
744 /* Find the table index */
745 lsr x11, x8, #L3_SHIFT
746 and x11, x11, #Ln_ADDR_MASK
748 /* Build the L3 page entry */
749 orr x12, x7, #L3_PAGE
750 orr x12, x12, #(ATTR_DEFAULT)
751 orr x12, x12, #(ATTR_S1_UXN)
753 /* Only use the output address bits */
754 lsr x9, x9, #L3_SHIFT
756 /* Set the physical address for this virtual address */
757 1: orr x13, x12, x9, lsl #L3_SHIFT
759 /* Store the entry */
760 str x13, [x6, x11, lsl #3]
768 LEND(build_l3_page_pagetable)
774 /* Load the exception vectors */
775 ldr x2, =exception_vectors
778 /* Load ttbr0 and ttbr1 */
783 /* Clear the Monitor Debug System control register */
786 /* Invalidate the TLB */
795 * Setup TCR according to the PARange and ASIDBits fields
796 * from ID_AA64MMFR0_EL1 and the HAFDBS field from the
797 * ID_AA64MMFR1_EL1. More precisely, set TCR_EL1.AS
798 * to 1 only if the ASIDBits field equals 0b0010.
801 mrs x3, id_aa64mmfr0_el1
803 /* Copy the bottom 3 bits from id_aa64mmfr0_el1 into TCR.IPS */
804 bfi x2, x3, #(TCR_IPS_SHIFT), #(TCR_IPS_WIDTH)
805 and x3, x3, #(ID_AA64MMFR0_ASIDBits_MASK)
807 /* Check if the HW supports 16 bit ASIDS */
808 cmp x3, #(ID_AA64MMFR0_ASIDBits_16)
809 /* If so x3 == 1, else x3 == 0 */
811 /* Set TCR.AS with x3 */
812 bfi x2, x3, #(TCR_ASID_SHIFT), #(TCR_ASID_WIDTH)
815 * Check if the HW supports access flag and dirty state updates,
816 * and set TCR_EL1.HA and TCR_EL1.HD accordingly.
818 mrs x3, id_aa64mmfr1_el1
819 and x3, x3, #(ID_AA64MMFR1_HAFDBS_MASK)
822 orr x2, x2, #(TCR_HA)
827 orr x2, x2, #(TCR_HA | TCR_HD)
837 bic x1, x1, x3 /* Clear the required bits */
838 orr x1, x1, x2 /* Set the required bits */
846 .quad MAIR_ATTR(MAIR_DEVICE_nGnRnE, VM_MEMATTR_DEVICE_nGnRnE) | \
847 MAIR_ATTR(MAIR_NORMAL_NC, VM_MEMATTR_UNCACHEABLE) | \
848 MAIR_ATTR(MAIR_NORMAL_WB, VM_MEMATTR_WRITE_BACK) | \
849 MAIR_ATTR(MAIR_NORMAL_WT, VM_MEMATTR_WRITE_THROUGH) | \
850 MAIR_ATTR(MAIR_DEVICE_nGnRE, VM_MEMATTR_DEVICE_nGnRE)
852 #if PAGE_SIZE == PAGE_SIZE_4K
853 #define TCR_TG (TCR_TG1_4K | TCR_TG0_4K)
854 #elif PAGE_SIZE == PAGE_SIZE_16K
855 #define TCR_TG (TCR_TG1_16K | TCR_TG0_16K)
857 #error Unsupported page size
860 .quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_TG | \
861 TCR_CACHE_ATTRS | TCR_SMP_ATTRS)
864 .quad (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_UCI | SCTLR_SPAN | \
865 SCTLR_nTWE | SCTLR_nTWI | SCTLR_UCT | SCTLR_DZE | \
866 SCTLR_I | SCTLR_SED | SCTLR_SA0 | SCTLR_SA | SCTLR_C | \
867 SCTLR_M | SCTLR_CP15BEN)
870 .quad (SCTLR_EE | SCTLR_E0E | SCTLR_IESB | SCTLR_WXN | SCTLR_UMA | \
881 .space (PAGE_SIZE * KSTACK_PAGES)
884 .section .init_pagetable, "aw", %nobits
887 * 6 initial tables (in the following order):
888 * L2 for kernel (High addresses)
891 * L1 bootstrap for user (Low addresses)
892 * L0 bootstrap for user
895 .globl pagetable_l0_ttbr1
897 #if PAGE_SIZE != PAGE_SIZE_4K
898 .space (PAGE_SIZE * L3_PAGE_COUNT)
906 pagetable_l2_ttbr0_bootstrap:
908 pagetable_l1_ttbr0_bootstrap:
910 pagetable_l0_ttbr0_boostrap:
919 .section .rodata, "a", %progbits
920 .globl aarch32_sigcode
923 .word 0xe1a0000d // mov r0, sp
924 .word 0xe2800040 // add r0, r0, #SIGF_UC
925 .word 0xe59f700c // ldr r7, [pc, #12]
926 .word 0xef000000 // swi #0
927 .word 0xe59f7008 // ldr r7, [pc, #8]
928 .word 0xef000000 // swi #0
929 .word 0xeafffffa // b . - 16
933 .size aarch32_sigcode, . - aarch32_sigcode
936 .global sz_aarch32_sigcode
938 .quad aarch32_esigcode - aarch32_sigcode