2 * Copyright (c) 2012-2014 Andrew Turner
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include "opt_kstack_pages.h"
31 #include <sys/syscall.h>
32 #include <machine/asm.h>
33 #include <machine/armreg.h>
34 #include <machine/hypervisor.h>
35 #include <machine/param.h>
36 #include <machine/pte.h>
37 #include <machine/vm.h>
38 #include <machine/vmparam.h>
43 .set kernbase, KERNBASE
47 * MMU on with an identity map, or off
50 * We are loaded at a 2MiB aligned address
58 * Disable the MMU. We may have entered the kernel with it on and
59 * will need to update the tables later. If this has been set up
60 * with anything other than a VA == PA map then this will fail,
61 * but in this case the code to find where we are running from
62 * would have also failed.
70 /* Set the context id */
71 msr contextidr_el1, xzr
73 /* Get the virt -> phys offset */
79 * x28 = Our physical load address
82 /* Create the page tables */
88 * x26 = Kernel L1 table
95 /* Load the new ttbr0 pagetable */
96 adrp x27, pagetable_l0_ttbr0
97 add x27, x27, :lo12:pagetable_l0_ttbr0
99 /* Jump to the virtual address space */
104 /* Set up the stack */
105 adrp x25, initstack_end
106 add x25, x25, :lo12:initstack_end
108 sub sp, sp, #PCB_SIZE
118 #if defined(PERTHREAD_SSP)
119 /* Set sp_el0 to the boot canary for early per-thread SSP to work */
120 adrp x15, boot_canary
121 add x15, x15, :lo12:boot_canary
125 /* Backup the module pointer */
128 /* Make the page table base a virtual address */
132 sub sp, sp, #BOOTPARAMS_SIZE
135 /* Degate the delda so it is VA -> PA */
138 str x1, [x0, #BP_MODULEP]
139 str x26, [x0, #BP_KERN_L1PT]
140 str x29, [x0, #BP_KERN_DELTA]
142 add x25, x25, :lo12:initstack
143 str x25, [x0, #BP_KERN_STACK]
144 str x24, [x0, #BP_KERN_L0PT]
145 str x27, [x0, #BP_KERN_TTBR0]
146 str x23, [x0, #BP_BOOT_EL]
148 /* trace back starts here */
150 /* Branch to C code */
152 /* We are done with the boot params */
153 add sp, sp, #BOOTPARAMS_SIZE
156 /* We should not get here */
170 * mpentry(unsigned long)
172 * Called by a core when it is being brought online.
173 * The data in x0 is passed straight to init_secondary.
176 /* Disable interrupts */
177 msr daifset, #DAIF_INTR
182 /* Set the context id */
183 msr contextidr_el1, xzr
185 /* Load the kernel page table */
186 adrp x24, pagetable_l0_ttbr1
187 add x24, x24, :lo12:pagetable_l0_ttbr1
188 /* Load the identity page table */
189 adrp x27, pagetable_l0_ttbr0_boostrap
190 add x27, x27, :lo12:pagetable_l0_ttbr0_boostrap
195 /* Load the new ttbr0 pagetable */
196 adrp x27, pagetable_l0_ttbr0
197 add x27, x27, :lo12:pagetable_l0_ttbr0
199 /* Jump to the virtual address space */
200 ldr x15, =mp_virtdone
204 /* Start using the AP boot stack */
209 #if defined(PERTHREAD_SSP)
210 /* Set sp_el0 to the boot canary for early per-thread SSP to work */
211 adrp x15, boot_canary
212 add x15, x15, :lo12:boot_canary
216 /* Load the kernel ttbr0 pagetable */
220 /* Invalidate the TLB */
230 * If we are started in EL2, configure the required hypervisor
231 * registers and drop to EL1.
241 * Disable the MMU. If the HCR_EL2.E2H field is set we will clear it
242 * which may break address translation.
250 /* Configure the Hypervisor */
254 /* Load the Virtualization Process ID Register */
258 /* Load the Virtualization Multiprocess ID Register */
262 /* Set the bits that need to be 1 in sctlr_el1 */
266 /* Don't trap to EL2 for exceptions */
270 /* Don't trap to EL2 for CP15 traps */
273 /* Enable access to the physical timers at EL1 */
275 orr x2, x2, #(CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN)
278 /* Set the counter offset to a known value */
281 /* Hypervisor trap functions */
283 add x2, x2, :lo12:hyp_vectors
286 mov x2, #(PSR_F | PSR_I | PSR_A | PSR_D | PSR_M_EL1h)
289 /* Configure GICv3 CPU interface */
290 mrs x2, id_aa64pfr0_el1
291 /* Extract GIC bits from the register */
292 ubfx x2, x2, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_BITS
293 /* GIC[3:0] == 0001 - GIC CPU interface via special regs. supported */
294 cmp x2, #(ID_AA64PFR0_GIC_CPUIF_EN >> ID_AA64PFR0_GIC_SHIFT)
298 orr x2, x2, #ICC_SRE_EL2_EN /* Enable access from insecure EL1 */
299 orr x2, x2, #ICC_SRE_EL2_SRE /* Enable system registers */
303 /* Set the address to return to our return address */
320 VECT_EMPTY /* Synchronous EL2t */
321 VECT_EMPTY /* IRQ EL2t */
322 VECT_EMPTY /* FIQ EL2t */
323 VECT_EMPTY /* Error EL2t */
325 VECT_EMPTY /* Synchronous EL2h */
326 VECT_EMPTY /* IRQ EL2h */
327 VECT_EMPTY /* FIQ EL2h */
328 VECT_EMPTY /* Error EL2h */
330 VECT_EMPTY /* Synchronous 64-bit EL1 */
331 VECT_EMPTY /* IRQ 64-bit EL1 */
332 VECT_EMPTY /* FIQ 64-bit EL1 */
333 VECT_EMPTY /* Error 64-bit EL1 */
335 VECT_EMPTY /* Synchronous 32-bit EL1 */
336 VECT_EMPTY /* IRQ 32-bit EL1 */
337 VECT_EMPTY /* FIQ 32-bit EL1 */
338 VECT_EMPTY /* Error 32-bit EL1 */
341 * Get the delta between the physical address we were loaded to and the
342 * virtual address we expect to run from. This is used when building the
343 * initial page table.
345 LENTRY(get_virt_delta)
346 /* Load the physical address of virt_map */
348 add x29, x29, :lo12:virt_map
349 /* Load the virtual address of virt_map stored in virt_map */
351 /* Find PA - VA as PA' = VA' - VA + PA = VA' + (PA - VA) = VA' + x29 */
353 /* Find the load address for the kernel */
364 * This builds the page tables containing the identity map, and the kernel
368 * We were loaded to an address that is on a 2MiB boundary
369 * All the memory must not cross a 1GiB boundaty
370 * x28 contains the physical address we were loaded from
372 * TODO: This is out of date.
373 * There are at least 5 pages before that address for the page tables
374 * The pages used are:
375 * - The Kernel L2 table
376 * - The Kernel L1 table
377 * - The Kernel L0 table (TTBR1)
378 * - The identity (PA = VA) L1 table
379 * - The identity (PA = VA) L0 table (TTBR0)
381 LENTRY(create_pagetables)
382 /* Save the Link register */
385 /* Clean the page table */
387 add x6, x6, :lo12:pagetable
389 adrp x27, pagetable_end
390 add x27, x27, :lo12:pagetable_end
392 stp xzr, xzr, [x6], #16
393 stp xzr, xzr, [x6], #16
394 stp xzr, xzr, [x6], #16
395 stp xzr, xzr, [x6], #16
400 * Build the TTBR1 maps.
403 /* Find the size of the kernel */
406 #if defined(LINUX_BOOT_ABI)
407 /* X19 is used as 'map FDT data' flag */
410 /* No modules or FDT pointer ? */
414 * Test if x0 points to modules descriptor(virtual address) or
415 * to FDT (physical address)
417 cmp x0, x6 /* x6 is #(KERNBASE) */
421 /* Booted with modules pointer */
422 /* Find modulep - begin */
424 /* Add two 2MiB pages for the module data and round up */
425 ldr x7, =(3 * L2_SIZE - 1)
429 #if defined(LINUX_BOOT_ABI)
431 /* Booted by U-Boot booti with FDT data */
432 /* Set 'map FDT data' flag */
436 /* Booted by U-Boot booti without FTD data */
437 /* Find the end - begin */
442 * Add one 2MiB page for copy of FDT data (maximum FDT size),
443 * one for metadata and round up
445 ldr x7, =(3 * L2_SIZE - 1)
450 /* Get the number of l2 pages to allocate, rounded down */
451 lsr x10, x8, #(L2_SHIFT)
453 /* Create the kernel space L2 table */
455 mov x7, #VM_MEMATTR_WRITE_BACK
456 mov x8, #(KERNBASE & L2_BLOCK_MASK)
458 bl build_l2_block_pagetable
460 /* Move to the l1 table */
461 add x26, x26, #PAGE_SIZE
463 /* Link the l1 -> l2 table */
468 /* Move to the l0 table */
469 add x24, x26, #PAGE_SIZE
471 /* Link the l0 -> l1 table */
478 * Build the TTBR0 maps. As TTBR0 maps, they must specify ATTR_S1_nG.
479 * They are only needed early on, so the VA = PA map is uncached.
481 add x27, x24, #PAGE_SIZE
483 mov x6, x27 /* The initial page table */
484 #if defined(SOCDEV_PA) && defined(SOCDEV_VA)
485 /* Create a table for the UART */
486 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_DEVICE))
487 mov x8, #(SOCDEV_VA) /* VA start */
488 mov x9, #(SOCDEV_PA) /* PA start */
490 bl build_l1_block_pagetable
493 #if defined(LINUX_BOOT_ABI)
497 /* Create the identity mapping for FDT data (2 MiB max) */
498 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK))
500 mov x8, x0 /* VA start (== PA start) */
502 bl build_l1_block_pagetable
507 /* Create the VA = PA map */
508 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK))
510 mov x8, x9 /* VA start (== PA start) */
512 bl build_l1_block_pagetable
514 /* Move to the l0 table */
515 add x27, x27, #PAGE_SIZE
517 /* Link the l0 -> l1 table */
523 /* Restore the Link register */
526 LEND(create_pagetables)
529 * Builds an L0 -> L1 table descriptor
531 * This is a link for a 512GiB block of memory with up to 1GiB regions mapped
532 * within it by build_l1_block_pagetable.
535 * x8 = Virtual Address
536 * x9 = L1 PA (trashed)
538 * x11, x12 and x13 are trashed
540 LENTRY(link_l0_pagetable)
542 * Link an L0 -> L1 table entry.
544 /* Find the table index */
545 lsr x11, x8, #L0_SHIFT
546 and x11, x11, #L0_ADDR_MASK
548 /* Build the L0 block entry */
550 orr x12, x12, #(TATTR_UXN_TABLE | TATTR_AP_TABLE_NO_EL0)
552 /* Only use the output address bits */
553 lsr x9, x9, #PAGE_SHIFT
554 1: orr x13, x12, x9, lsl #PAGE_SHIFT
556 /* Store the entry */
557 str x13, [x6, x11, lsl #3]
565 LEND(link_l0_pagetable)
568 * Builds an L1 -> L2 table descriptor
570 * This is a link for a 1GiB block of memory with up to 2MiB regions mapped
571 * within it by build_l2_block_pagetable.
574 * x8 = Virtual Address
575 * x9 = L2 PA (trashed)
576 * x11, x12 and x13 are trashed
578 LENTRY(link_l1_pagetable)
580 * Link an L1 -> L2 table entry.
582 /* Find the table index */
583 lsr x11, x8, #L1_SHIFT
584 and x11, x11, #Ln_ADDR_MASK
586 /* Build the L1 block entry */
589 /* Only use the output address bits */
590 lsr x9, x9, #PAGE_SHIFT
591 orr x13, x12, x9, lsl #PAGE_SHIFT
593 /* Store the entry */
594 str x13, [x6, x11, lsl #3]
597 LEND(link_l1_pagetable)
600 * Builds count 1 GiB page table entry
602 * x7 = Variable lower block attributes
604 * x9 = PA start (trashed)
606 * x11, x12 and x13 are trashed
608 LENTRY(build_l1_block_pagetable)
610 * Build the L1 table entry.
612 /* Find the table index */
613 lsr x11, x8, #L1_SHIFT
614 and x11, x11, #Ln_ADDR_MASK
616 /* Build the L1 block entry */
617 orr x12, x7, #L1_BLOCK
618 orr x12, x12, #(ATTR_DEFAULT)
620 /* Only use the output address bits */
621 lsr x9, x9, #L1_SHIFT
623 /* Set the physical address for this virtual address */
624 1: orr x13, x12, x9, lsl #L1_SHIFT
626 /* Store the entry */
627 str x13, [x6, x11, lsl #3]
635 LEND(build_l1_block_pagetable)
638 * Builds count 2 MiB page table entry
640 * x7 = Type (0 = Device, 1 = Normal)
642 * x9 = PA start (trashed)
644 * x11, x12 and x13 are trashed
646 LENTRY(build_l2_block_pagetable)
648 * Build the L2 table entry.
650 /* Find the table index */
651 lsr x11, x8, #L2_SHIFT
652 and x11, x11, #Ln_ADDR_MASK
654 /* Build the L2 block entry */
656 orr x12, x12, #L2_BLOCK
657 orr x12, x12, #(ATTR_DEFAULT)
658 orr x12, x12, #(ATTR_S1_UXN)
660 /* Only use the output address bits */
661 lsr x9, x9, #L2_SHIFT
663 /* Set the physical address for this virtual address */
664 1: orr x13, x12, x9, lsl #L2_SHIFT
666 /* Store the entry */
667 str x13, [x6, x11, lsl #3]
675 LEND(build_l2_block_pagetable)
680 /* Load the exception vectors */
681 ldr x2, =exception_vectors
684 /* Load ttbr0 and ttbr1 */
689 /* Clear the Monitor Debug System control register */
692 /* Invalidate the TLB */
701 * Setup TCR according to the PARange and ASIDBits fields
702 * from ID_AA64MMFR0_EL1 and the HAFDBS field from the
703 * ID_AA64MMFR1_EL1. More precisely, set TCR_EL1.AS
704 * to 1 only if the ASIDBits field equals 0b0010.
707 mrs x3, id_aa64mmfr0_el1
709 /* Copy the bottom 3 bits from id_aa64mmfr0_el1 into TCR.IPS */
710 bfi x2, x3, #(TCR_IPS_SHIFT), #(TCR_IPS_WIDTH)
711 and x3, x3, #(ID_AA64MMFR0_ASIDBits_MASK)
713 /* Check if the HW supports 16 bit ASIDS */
714 cmp x3, #(ID_AA64MMFR0_ASIDBits_16)
715 /* If so x3 == 1, else x3 == 0 */
717 /* Set TCR.AS with x3 */
718 bfi x2, x3, #(TCR_ASID_SHIFT), #(TCR_ASID_WIDTH)
721 * Check if the HW supports access flag and dirty state updates,
722 * and set TCR_EL1.HA and TCR_EL1.HD accordingly.
724 mrs x3, id_aa64mmfr1_el1
725 and x3, x3, #(ID_AA64MMFR1_HAFDBS_MASK)
728 orr x2, x2, #(TCR_HA)
733 orr x2, x2, #(TCR_HA | TCR_HD)
743 bic x1, x1, x3 /* Clear the required bits */
744 orr x1, x1, x2 /* Set the required bits */
752 .quad MAIR_ATTR(MAIR_DEVICE_nGnRnE, VM_MEMATTR_DEVICE_nGnRnE) | \
753 MAIR_ATTR(MAIR_NORMAL_NC, VM_MEMATTR_UNCACHEABLE) | \
754 MAIR_ATTR(MAIR_NORMAL_WB, VM_MEMATTR_WRITE_BACK) | \
755 MAIR_ATTR(MAIR_NORMAL_WT, VM_MEMATTR_WRITE_THROUGH) | \
756 MAIR_ATTR(MAIR_DEVICE_nGnRE, VM_MEMATTR_DEVICE_nGnRE)
758 .quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_TG1_4K | TCR_TG0_4K | \
759 TCR_CACHE_ATTRS | TCR_SMP_ATTRS)
762 .quad (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_UCI | SCTLR_SPAN | \
763 SCTLR_nTWE | SCTLR_nTWI | SCTLR_UCT | SCTLR_DZE | \
764 SCTLR_I | SCTLR_SED | SCTLR_SA0 | SCTLR_SA | SCTLR_C | \
765 SCTLR_M | SCTLR_CP15BEN)
768 .quad (SCTLR_EE | SCTLR_E0E | SCTLR_IESB | SCTLR_WXN | SCTLR_UMA | \
776 .section .init_pagetable, "aw", %nobits
779 * 6 initial tables (in the following order):
780 * L2 for kernel (High addresses)
783 * L1 bootstrap for user (Low addresses)
784 * L0 bootstrap for user
787 .globl pagetable_l0_ttbr1
794 pagetable_l1_ttbr0_bootstrap:
796 pagetable_l0_ttbr0_boostrap:
808 .space (PAGE_SIZE * KSTACK_PAGES)
813 EENTRY(aarch32_sigcode)
814 .word 0xe1a0000d // mov r0, sp
815 .word 0xe2800040 // add r0, r0, #SIGF_UC
816 .word 0xe59f700c // ldr r7, [pc, #12]
817 .word 0xef000000 // swi #0
818 .word 0xe59f7008 // ldr r7, [pc, #8]
819 .word 0xef000000 // swi #0
820 .word 0xeafffffa // b . - 16
821 EEND(aarch32_sigcode)
827 .global sz_aarch32_sigcode
829 .quad aarch32_esigcode - aarch32_sigcode