2 * Copyright (c) 2012-2014 Andrew Turner
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include "opt_kstack_pages.h"
31 #include <sys/syscall.h>
32 #include <machine/asm.h>
33 #include <machine/armreg.h>
34 #include <machine/hypervisor.h>
35 #include <machine/param.h>
36 #include <machine/pte.h>
37 #include <machine/vmparam.h>
42 .set kernbase, KERNBASE
45 #define NORMAL_UNCACHED 1
50 * MMU on with an identity map, or off
53 * We are loaded at a 2MiB aligned address
63 * Disable the MMU. We may have entered the kernel with it on and
64 * will need to update the tables later. If this has been set up
65 * with anything other than a VA == PA map then this will fail,
66 * but in this case the code to find where we are running from
67 * would have also failed.
75 /* Set the context id */
76 msr contextidr_el1, xzr
78 /* Get the virt -> phys offset */
84 * x28 = Our physical load address
87 /* Create the page tables */
93 * x26 = Kernel L1 table
100 /* Jump to the virtual address space */
105 /* Set up the stack */
106 adr x25, initstack_end
108 sub sp, sp, #PCB_SIZE
118 /* Backup the module pointer */
121 /* Make the page table base a virtual address */
125 sub sp, sp, #(64 * 4)
128 /* Degate the delda so it is VA -> PA */
131 str x1, [x0] /* modulep */
132 str x26, [x0, 8] /* kern_l1pt */
133 str x29, [x0, 16] /* kern_delta */
134 str x25, [x0, 24] /* kern_stack */
135 str x24, [x0, 32] /* kern_l0pt */
137 /* trace back starts here */
139 /* Branch to C code */
143 /* We should not get here */
156 * mpentry(unsigned long)
158 * Called by a core when it is being brought online.
159 * The data in x0 is passed straight to init_secondary.
162 /* Disable interrupts */
168 /* Set the context id */
169 msr contextidr_el1, x1
171 /* Load the kernel page table */
172 adr x24, pagetable_l0_ttbr1
173 /* Load the identity page table */
174 adr x27, pagetable_l0_ttbr0
179 /* Jump to the virtual address space */
180 ldr x15, =mp_virtdone
184 ldr x4, =secondary_stacks
185 mov x5, #(PAGE_SIZE * KSTACK_PAGES)
194 * If we are started in EL2, configure the required hypervisor
195 * registers and drop to EL1.
204 /* Configure the Hypervisor */
208 /* Load the Virtualization Process ID Register */
212 /* Load the Virtualization Multiprocess ID Register */
216 /* Set the bits that need to be 1 in sctlr_el1 */
220 /* Don't trap to EL2 for exceptions */
224 /* Don't trap to EL2 for CP15 traps */
227 /* Enable access to the physical timers at EL1 */
229 orr x2, x2, #(CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN)
232 /* Set the counter offset to a known value */
235 /* Hypervisor trap functions */
239 mov x2, #(PSR_F | PSR_I | PSR_A | PSR_D | PSR_M_EL1h)
242 /* Configure GICv3 CPU interface */
243 mrs x2, id_aa64pfr0_el1
244 /* Extract GIC bits from the register */
245 ubfx x2, x2, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_BITS
246 /* GIC[3:0] == 0001 - GIC CPU interface via special regs. supported */
247 cmp x2, #(ID_AA64PFR0_GIC_CPUIF_EN >> ID_AA64PFR0_GIC_SHIFT)
251 orr x2, x2, #ICC_SRE_EL2_EN /* Enable access from insecure EL1 */
252 orr x2, x2, #ICC_SRE_EL2_SRE /* Enable system registers */
256 /* Set the address to return to our return address */
272 VECT_EMPTY /* Synchronous EL2t */
273 VECT_EMPTY /* IRQ EL2t */
274 VECT_EMPTY /* FIQ EL2t */
275 VECT_EMPTY /* Error EL2t */
277 VECT_EMPTY /* Synchronous EL2h */
278 VECT_EMPTY /* IRQ EL2h */
279 VECT_EMPTY /* FIQ EL2h */
280 VECT_EMPTY /* Error EL2h */
282 VECT_EMPTY /* Synchronous 64-bit EL1 */
283 VECT_EMPTY /* IRQ 64-bit EL1 */
284 VECT_EMPTY /* FIQ 64-bit EL1 */
285 VECT_EMPTY /* Error 64-bit EL1 */
287 VECT_EMPTY /* Synchronous 32-bit EL1 */
288 VECT_EMPTY /* IRQ 32-bit EL1 */
289 VECT_EMPTY /* FIQ 32-bit EL1 */
290 VECT_EMPTY /* Error 32-bit EL1 */
293 * Get the delta between the physical address we were loaded to and the
294 * virtual address we expect to run from. This is used when building the
295 * initial page table.
298 /* Load the physical address of virt_map */
300 /* Load the virtual address of virt_map stored in virt_map */
302 /* Find PA - VA as PA' = VA' - VA + PA = VA' + (PA - VA) = VA' + x29 */
304 /* Find the load address for the kernel */
314 * This builds the page tables containing the identity map, and the kernel
318 * We were loaded to an address that is on a 2MiB boundary
319 * All the memory must not cross a 1GiB boundaty
320 * x28 contains the physical address we were loaded from
322 * TODO: This is out of date.
323 * There are at least 5 pages before that address for the page tables
324 * The pages used are:
325 * - The identity (PA = VA) table (TTBR0)
326 * - The Kernel L1 table (TTBR1)(not yet)
327 * - The PA != VA L2 table to jump into (not yet)
328 * - The FDT L2 table (not yet)
331 /* Save the Link register */
334 /* Clean the page table */
337 adr x27, pagetable_end
339 stp xzr, xzr, [x6], #16
340 stp xzr, xzr, [x6], #16
341 stp xzr, xzr, [x6], #16
342 stp xzr, xzr, [x6], #16
347 * Build the TTBR1 maps.
350 /* Find the size of the kernel */
353 /* Find the end - begin */
355 /* Get the number of l2 pages to allocate, rounded down */
356 lsr x10, x8, #(L2_SHIFT)
357 /* Add 8 MiB for any rounding above and the module data */
360 /* Create the kernel space L2 table */
363 mov x8, #(KERNBASE & L2_BLOCK_MASK)
365 bl build_l2_block_pagetable
367 /* Move to the l1 table */
368 add x26, x26, #PAGE_SIZE
370 /* Link the l1 -> l2 table */
375 /* Move to the l0 table */
376 add x24, x26, #PAGE_SIZE
378 /* Link the l0 -> l1 table */
385 * Build the TTBR0 maps.
387 add x27, x24, #PAGE_SIZE
389 mov x6, x27 /* The initial page table */
390 #if defined(SOCDEV_PA) && defined(SOCDEV_VA)
391 /* Create a table for the UART */
393 mov x8, #(SOCDEV_VA) /* VA start */
394 mov x9, #(SOCDEV_PA) /* PA start */
396 bl build_l1_block_pagetable
399 /* Create the VA = PA map */
400 mov x7, #NORMAL_UNCACHED /* Uncached as it's only needed early on */
402 mov x8, x9 /* VA start (== PA start) */
404 bl build_l1_block_pagetable
406 /* Move to the l0 table */
407 add x27, x27, #PAGE_SIZE
409 /* Link the l0 -> l1 table */
415 /* Restore the Link register */
420 * Builds an L0 -> L1 table descriptor
422 * This is a link for a 512GiB block of memory with up to 1GiB regions mapped
423 * within it by build_l1_block_pagetable.
426 * x8 = Virtual Address
427 * x9 = L1 PA (trashed)
429 * x11, x12 and x13 are trashed
433 * Link an L0 -> L1 table entry.
435 /* Find the table index */
436 lsr x11, x8, #L0_SHIFT
437 and x11, x11, #L0_ADDR_MASK
439 /* Build the L0 block entry */
442 /* Only use the output address bits */
443 lsr x9, x9, #PAGE_SHIFT
444 1: orr x13, x12, x9, lsl #PAGE_SHIFT
446 /* Store the entry */
447 str x13, [x6, x11, lsl #3]
457 * Builds an L1 -> L2 table descriptor
459 * This is a link for a 1GiB block of memory with up to 2MiB regions mapped
460 * within it by build_l2_block_pagetable.
463 * x8 = Virtual Address
464 * x9 = L2 PA (trashed)
465 * x11, x12 and x13 are trashed
469 * Link an L1 -> L2 table entry.
471 /* Find the table index */
472 lsr x11, x8, #L1_SHIFT
473 and x11, x11, #Ln_ADDR_MASK
475 /* Build the L1 block entry */
478 /* Only use the output address bits */
479 lsr x9, x9, #PAGE_SHIFT
480 orr x13, x12, x9, lsl #PAGE_SHIFT
482 /* Store the entry */
483 str x13, [x6, x11, lsl #3]
488 * Builds count 1 GiB page table entry
490 * x7 = Type (0 = Device, 1 = Normal)
492 * x9 = PA start (trashed)
494 * x11, x12 and x13 are trashed
496 build_l1_block_pagetable:
498 * Build the L1 table entry.
500 /* Find the table index */
501 lsr x11, x8, #L1_SHIFT
502 and x11, x11, #Ln_ADDR_MASK
504 /* Build the L1 block entry */
506 orr x12, x12, #L1_BLOCK
507 orr x12, x12, #(ATTR_AF)
509 orr x12, x12, ATTR_SH(ATTR_SH_IS)
512 /* Only use the output address bits */
513 lsr x9, x9, #L1_SHIFT
515 /* Set the physical address for this virtual address */
516 1: orr x13, x12, x9, lsl #L1_SHIFT
518 /* Store the entry */
519 str x13, [x6, x11, lsl #3]
529 * Builds count 2 MiB page table entry
531 * x7 = Type (0 = Device, 1 = Normal)
533 * x9 = PA start (trashed)
535 * x11, x12 and x13 are trashed
537 build_l2_block_pagetable:
539 * Build the L2 table entry.
541 /* Find the table index */
542 lsr x11, x8, #L2_SHIFT
543 and x11, x11, #Ln_ADDR_MASK
545 /* Build the L2 block entry */
547 orr x12, x12, #L2_BLOCK
548 orr x12, x12, #(ATTR_AF)
550 orr x12, x12, ATTR_SH(ATTR_SH_IS)
553 /* Only use the output address bits */
554 lsr x9, x9, #L2_SHIFT
556 /* Set the physical address for this virtual address */
557 1: orr x13, x12, x9, lsl #L2_SHIFT
559 /* Store the entry */
560 str x13, [x6, x11, lsl #3]
572 /* Load the exception vectors */
573 ldr x2, =exception_vectors
576 /* Load ttbr0 and ttbr1 */
581 /* Clear the Monitor Debug System control register */
584 /* Invalidate the TLB */
591 * Setup TCR according to PARange bits from ID_AA64MMFR0_EL1.
594 mrs x3, id_aa64mmfr0_el1
602 bic x1, x1, x3 /* Clear the required bits */
603 orr x1, x1, x2 /* Set the required bits */
611 /* Device Normal, no cache Normal, write-back */
612 .quad MAIR_ATTR(0x00, 0) | MAIR_ATTR(0x44, 1) | MAIR_ATTR(0xff, 2)
614 .quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_ASID_16 | TCR_TG1_4K | \
615 TCR_CACHE_ATTRS | TCR_SMP_ATTRS)
618 .quad (SCTLR_UCI | SCTLR_nTWE | SCTLR_nTWI | SCTLR_UCT | SCTLR_DZE | \
619 SCTLR_I | SCTLR_SED | SCTLR_SA0 | SCTLR_SA | SCTLR_C | SCTLR_M)
622 .quad (SCTLR_EE | SCTLR_EOE | SCTLR_WXN | SCTLR_UMA | SCTLR_ITD | \
623 SCTLR_THEE | SCTLR_CP15BEN | SCTLR_A)
629 //.section .init_pagetable
630 .align 12 /* 4KiB aligned */
632 * 3 initial tables (in the following order):
633 * L2 for kernel (High addresses)
635 * L1 for user (Low addresses)
654 .quad pagetable /* XXX: Keep page tables VA */
658 .space (PAGE_SIZE * KSTACK_PAGES)
667 mov x8, #SYS_sigreturn
670 /* sigreturn failed, exit */
676 /* This may be copied to the stack, keep it 16-byte aligned */
684 .quad esigcode - sigcode