2 * Copyright (c) 2012-2014 Andrew Turner
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include "opt_kstack_pages.h"
31 #include <sys/syscall.h>
32 #include <machine/asm.h>
33 #include <machine/armreg.h>
34 #include <machine/hypervisor.h>
35 #include <machine/param.h>
36 #include <machine/pte.h>
37 #include <machine/vmparam.h>
40 #define DMAP_TABLES ((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
43 .set kernbase, KERNBASE
46 #define NORMAL_UNCACHED 1
51 * MMU on with an identity map, or off
54 * We are loaded at a 2MiB aligned address
64 * Disable the MMU. We may have entered the kernel with it on and
65 * will need to update the tables later. If this has been set up
66 * with anything other than a VA == PA map then this will fail,
67 * but in this case the code to find where we are running from
68 * would have also failed.
76 /* Set the context id */
77 msr contextidr_el1, xzr
79 /* Get the virt -> phys offset */
85 * x28 = Our physical load address
88 /* Create the page tables */
94 * x26 = Kernel L1 table
101 /* Jump to the virtual address space */
106 /* Set up the stack */
107 adr x25, initstack_end
109 sub sp, sp, #PCB_SIZE
119 /* Backup the module pointer */
122 /* Make the page table base a virtual address */
126 sub sp, sp, #(64 * 4)
129 /* Degate the delda so it is VA -> PA */
132 str x1, [x0] /* modulep */
133 str x26, [x0, 8] /* kern_l1pt */
134 str x29, [x0, 16] /* kern_delta */
135 str x25, [x0, 24] /* kern_stack */
136 str x24, [x0, 32] /* kern_l0pt */
138 /* trace back starts here */
140 /* Branch to C code */
144 /* We should not get here */
157 * mpentry(unsigned long)
159 * Called by a core when it is being brought online.
160 * The data in x0 is passed straight to init_secondary.
163 /* Disable interrupts */
169 /* Set the context id */
170 msr contextidr_el1, xzr
172 /* Load the kernel page table */
173 adr x24, pagetable_l0_ttbr1
174 /* Load the identity page table */
175 adr x27, pagetable_l0_ttbr0
180 /* Jump to the virtual address space */
181 ldr x15, =mp_virtdone
185 ldr x4, =secondary_stacks
186 mov x5, #(PAGE_SIZE * KSTACK_PAGES)
195 * If we are started in EL2, configure the required hypervisor
196 * registers and drop to EL1.
205 /* Configure the Hypervisor */
209 /* Load the Virtualization Process ID Register */
213 /* Load the Virtualization Multiprocess ID Register */
217 /* Set the bits that need to be 1 in sctlr_el1 */
221 /* Don't trap to EL2 for exceptions */
225 /* Don't trap to EL2 for CP15 traps */
228 /* Enable access to the physical timers at EL1 */
230 orr x2, x2, #(CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN)
233 /* Set the counter offset to a known value */
236 /* Hypervisor trap functions */
240 mov x2, #(PSR_F | PSR_I | PSR_A | PSR_D | PSR_M_EL1h)
243 /* Configure GICv3 CPU interface */
244 mrs x2, id_aa64pfr0_el1
245 /* Extract GIC bits from the register */
246 ubfx x2, x2, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_BITS
247 /* GIC[3:0] == 0001 - GIC CPU interface via special regs. supported */
248 cmp x2, #(ID_AA64PFR0_GIC_CPUIF_EN >> ID_AA64PFR0_GIC_SHIFT)
252 orr x2, x2, #ICC_SRE_EL2_EN /* Enable access from insecure EL1 */
253 orr x2, x2, #ICC_SRE_EL2_SRE /* Enable system registers */
257 /* Set the address to return to our return address */
273 VECT_EMPTY /* Synchronous EL2t */
274 VECT_EMPTY /* IRQ EL2t */
275 VECT_EMPTY /* FIQ EL2t */
276 VECT_EMPTY /* Error EL2t */
278 VECT_EMPTY /* Synchronous EL2h */
279 VECT_EMPTY /* IRQ EL2h */
280 VECT_EMPTY /* FIQ EL2h */
281 VECT_EMPTY /* Error EL2h */
283 VECT_EMPTY /* Synchronous 64-bit EL1 */
284 VECT_EMPTY /* IRQ 64-bit EL1 */
285 VECT_EMPTY /* FIQ 64-bit EL1 */
286 VECT_EMPTY /* Error 64-bit EL1 */
288 VECT_EMPTY /* Synchronous 32-bit EL1 */
289 VECT_EMPTY /* IRQ 32-bit EL1 */
290 VECT_EMPTY /* FIQ 32-bit EL1 */
291 VECT_EMPTY /* Error 32-bit EL1 */
294 * Get the delta between the physical address we were loaded to and the
295 * virtual address we expect to run from. This is used when building the
296 * initial page table.
299 /* Load the physical address of virt_map */
301 /* Load the virtual address of virt_map stored in virt_map */
303 /* Find PA - VA as PA' = VA' - VA + PA = VA' + (PA - VA) = VA' + x29 */
305 /* Find the load address for the kernel */
315 * This builds the page tables containing the identity map, and the kernel
319 * We were loaded to an address that is on a 2MiB boundary
320 * All the memory must not cross a 1GiB boundaty
321 * x28 contains the physical address we were loaded from
323 * TODO: This is out of date.
324 * There are at least 5 pages before that address for the page tables
325 * The pages used are:
326 * - The Kernel L2 table
327 * - The Kernel L1 table
328 * - The Kernel L0 table (TTBR1)
329 * - The identity (PA = VA) L1 table
330 * - The identity (PA = VA) L0 table (TTBR0)
331 * - The DMAP L1 tables
334 /* Save the Link register */
337 /* Clean the page table */
340 adr x27, pagetable_end
342 stp xzr, xzr, [x6], #16
343 stp xzr, xzr, [x6], #16
344 stp xzr, xzr, [x6], #16
345 stp xzr, xzr, [x6], #16
350 * Build the TTBR1 maps.
353 /* Find the size of the kernel */
355 /* Find modulep - begin */
357 /* Add two 2MiB pages for the module data and round up */
358 ldr x7, =(3 * L2_SIZE - 1)
360 /* Get the number of l2 pages to allocate, rounded down */
361 lsr x10, x8, #(L2_SHIFT)
363 /* Create the kernel space L2 table */
366 mov x8, #(KERNBASE & L2_BLOCK_MASK)
368 bl build_l2_block_pagetable
370 /* Move to the l1 table */
371 add x26, x26, #PAGE_SIZE
373 /* Link the l1 -> l2 table */
378 /* Move to the l0 table */
379 add x24, x26, #PAGE_SIZE
381 /* Link the l0 -> l1 table */
387 /* Link the DMAP tables */
388 ldr x8, =DMAP_MIN_ADDRESS
389 adr x9, pagetable_dmap;
390 mov x10, #DMAP_TABLES
394 * Build the TTBR0 maps.
396 add x27, x24, #PAGE_SIZE
398 mov x6, x27 /* The initial page table */
399 #if defined(SOCDEV_PA) && defined(SOCDEV_VA)
400 /* Create a table for the UART */
402 mov x8, #(SOCDEV_VA) /* VA start */
403 mov x9, #(SOCDEV_PA) /* PA start */
405 bl build_l1_block_pagetable
408 /* Create the VA = PA map */
409 mov x7, #NORMAL_UNCACHED /* Uncached as it's only needed early on */
411 mov x8, x9 /* VA start (== PA start) */
413 bl build_l1_block_pagetable
415 /* Move to the l0 table */
416 add x27, x27, #PAGE_SIZE
418 /* Link the l0 -> l1 table */
424 /* Restore the Link register */
429 * Builds an L0 -> L1 table descriptor
431 * This is a link for a 512GiB block of memory with up to 1GiB regions mapped
432 * within it by build_l1_block_pagetable.
435 * x8 = Virtual Address
436 * x9 = L1 PA (trashed)
438 * x11, x12 and x13 are trashed
442 * Link an L0 -> L1 table entry.
444 /* Find the table index */
445 lsr x11, x8, #L0_SHIFT
446 and x11, x11, #L0_ADDR_MASK
448 /* Build the L0 block entry */
451 /* Only use the output address bits */
452 lsr x9, x9, #PAGE_SHIFT
453 1: orr x13, x12, x9, lsl #PAGE_SHIFT
455 /* Store the entry */
456 str x13, [x6, x11, lsl #3]
466 * Builds an L1 -> L2 table descriptor
468 * This is a link for a 1GiB block of memory with up to 2MiB regions mapped
469 * within it by build_l2_block_pagetable.
472 * x8 = Virtual Address
473 * x9 = L2 PA (trashed)
474 * x11, x12 and x13 are trashed
478 * Link an L1 -> L2 table entry.
480 /* Find the table index */
481 lsr x11, x8, #L1_SHIFT
482 and x11, x11, #Ln_ADDR_MASK
484 /* Build the L1 block entry */
487 /* Only use the output address bits */
488 lsr x9, x9, #PAGE_SHIFT
489 orr x13, x12, x9, lsl #PAGE_SHIFT
491 /* Store the entry */
492 str x13, [x6, x11, lsl #3]
497 * Builds count 1 GiB page table entry
499 * x7 = Type (0 = Device, 1 = Normal)
501 * x9 = PA start (trashed)
503 * x11, x12 and x13 are trashed
505 build_l1_block_pagetable:
507 * Build the L1 table entry.
509 /* Find the table index */
510 lsr x11, x8, #L1_SHIFT
511 and x11, x11, #Ln_ADDR_MASK
513 /* Build the L1 block entry */
515 orr x12, x12, #L1_BLOCK
516 orr x12, x12, #(ATTR_AF)
518 orr x12, x12, ATTR_SH(ATTR_SH_IS)
521 /* Only use the output address bits */
522 lsr x9, x9, #L1_SHIFT
524 /* Set the physical address for this virtual address */
525 1: orr x13, x12, x9, lsl #L1_SHIFT
527 /* Store the entry */
528 str x13, [x6, x11, lsl #3]
538 * Builds count 2 MiB page table entry
540 * x7 = Type (0 = Device, 1 = Normal)
542 * x9 = PA start (trashed)
544 * x11, x12 and x13 are trashed
546 build_l2_block_pagetable:
548 * Build the L2 table entry.
550 /* Find the table index */
551 lsr x11, x8, #L2_SHIFT
552 and x11, x11, #Ln_ADDR_MASK
554 /* Build the L2 block entry */
556 orr x12, x12, #L2_BLOCK
557 orr x12, x12, #(ATTR_AF)
559 orr x12, x12, ATTR_SH(ATTR_SH_IS)
562 /* Only use the output address bits */
563 lsr x9, x9, #L2_SHIFT
565 /* Set the physical address for this virtual address */
566 1: orr x13, x12, x9, lsl #L2_SHIFT
568 /* Store the entry */
569 str x13, [x6, x11, lsl #3]
581 /* Load the exception vectors */
582 ldr x2, =exception_vectors
585 /* Load ttbr0 and ttbr1 */
590 /* Clear the Monitor Debug System control register */
593 /* Invalidate the TLB */
600 * Setup TCR according to PARange bits from ID_AA64MMFR0_EL1.
603 mrs x3, id_aa64mmfr0_el1
611 bic x1, x1, x3 /* Clear the required bits */
612 orr x1, x1, x2 /* Set the required bits */
620 .quad MAIR_ATTR(MAIR_DEVICE_nGnRnE, 0) | \
621 MAIR_ATTR(MAIR_NORMAL_NC, 1) | \
622 MAIR_ATTR(MAIR_NORMAL_WB, 2) | \
623 MAIR_ATTR(MAIR_NORMAL_WT, 3)
625 .quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_ASID_16 | TCR_TG1_4K | \
626 TCR_CACHE_ATTRS | TCR_SMP_ATTRS)
629 .quad (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_UCI | SCTLR_SPAN | \
630 SCTLR_nTWE | SCTLR_nTWI | SCTLR_UCT | SCTLR_DZE | \
631 SCTLR_I | SCTLR_SED | SCTLR_SA0 | SCTLR_SA | SCTLR_C | \
632 SCTLR_M | SCTLR_CP15BEN)
635 .quad (SCTLR_EE | SCTLR_EOE | SCTLR_IESB | SCTLR_WXN | SCTLR_UMA | \
636 SCTLR_ITD | SCTLR_THEE | SCTLR_A)
642 //.section .init_pagetable
643 .align 12 /* 4KiB aligned */
645 * 3 initial tables (in the following order):
646 * L2 for kernel (High addresses)
648 * L1 for user (Low addresses)
661 .globl pagetable_dmap
663 .space PAGE_SIZE * DMAP_TABLES
671 .quad pagetable /* XXX: Keep page tables VA */
675 .space (PAGE_SIZE * KSTACK_PAGES)
684 mov x8, #SYS_sigreturn
687 /* sigreturn failed, exit */
693 /* This may be copied to the stack, keep it 16-byte aligned */
701 .quad esigcode - sigcode