2 * Copyright (c) 2012-2014 Andrew Turner
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include "opt_kstack_pages.h"
31 #include <sys/syscall.h>
32 #include <machine/asm.h>
33 #include <machine/armreg.h>
34 #include <machine/hypervisor.h>
35 #include <machine/param.h>
36 #include <machine/pte.h>
37 #include <machine/vmparam.h>
40 #define DMAP_TABLES ((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
43 .set kernbase, KERNBASE
46 #define NORMAL_UNCACHED 1
51 * MMU on with an identity map, or off
54 * We are loaded at a 2MiB aligned address
64 * Disable the MMU. We may have entered the kernel with it on and
65 * will need to update the tables later. If this has been set up
66 * with anything other than a VA == PA map then this will fail,
67 * but in this case the code to find where we are running from
68 * would have also failed.
76 /* Set the context id */
77 msr contextidr_el1, xzr
79 /* Get the virt -> phys offset */
85 * x28 = Our physical load address
88 /* Create the page tables */
94 * x26 = Kernel L1 table
101 /* Jump to the virtual address space */
106 /* Set up the stack */
107 adr x25, initstack_end
109 sub sp, sp, #PCB_SIZE
119 /* Backup the module pointer */
122 /* Make the page table base a virtual address */
126 sub sp, sp, #(64 * 4)
129 /* Degate the delda so it is VA -> PA */
132 str x1, [x0] /* modulep */
133 str x26, [x0, 8] /* kern_l1pt */
134 str x29, [x0, 16] /* kern_delta */
136 str x25, [x0, 24] /* kern_stack */
137 str x24, [x0, 32] /* kern_l0pt */
139 /* trace back starts here */
141 /* Branch to C code */
145 /* We should not get here */
158 * mpentry(unsigned long)
160 * Called by a core when it is being brought online.
161 * The data in x0 is passed straight to init_secondary.
164 /* Disable interrupts */
170 /* Set the context id */
171 msr contextidr_el1, xzr
173 /* Load the kernel page table */
174 adr x24, pagetable_l0_ttbr1
175 /* Load the identity page table */
176 adr x27, pagetable_l0_ttbr0
181 /* Jump to the virtual address space */
182 ldr x15, =mp_virtdone
186 ldr x4, =secondary_stacks
187 mov x5, #(PAGE_SIZE * KSTACK_PAGES)
196 * If we are started in EL2, configure the required hypervisor
197 * registers and drop to EL1.
206 /* Configure the Hypervisor */
210 /* Load the Virtualization Process ID Register */
214 /* Load the Virtualization Multiprocess ID Register */
218 /* Set the bits that need to be 1 in sctlr_el1 */
222 /* Don't trap to EL2 for exceptions */
226 /* Don't trap to EL2 for CP15 traps */
229 /* Enable access to the physical timers at EL1 */
231 orr x2, x2, #(CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN)
234 /* Set the counter offset to a known value */
237 /* Hypervisor trap functions */
241 mov x2, #(PSR_F | PSR_I | PSR_A | PSR_D | PSR_M_EL1h)
244 /* Configure GICv3 CPU interface */
245 mrs x2, id_aa64pfr0_el1
246 /* Extract GIC bits from the register */
247 ubfx x2, x2, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_BITS
248 /* GIC[3:0] == 0001 - GIC CPU interface via special regs. supported */
249 cmp x2, #(ID_AA64PFR0_GIC_CPUIF_EN >> ID_AA64PFR0_GIC_SHIFT)
253 orr x2, x2, #ICC_SRE_EL2_EN /* Enable access from insecure EL1 */
254 orr x2, x2, #ICC_SRE_EL2_SRE /* Enable system registers */
258 /* Set the address to return to our return address */
274 VECT_EMPTY /* Synchronous EL2t */
275 VECT_EMPTY /* IRQ EL2t */
276 VECT_EMPTY /* FIQ EL2t */
277 VECT_EMPTY /* Error EL2t */
279 VECT_EMPTY /* Synchronous EL2h */
280 VECT_EMPTY /* IRQ EL2h */
281 VECT_EMPTY /* FIQ EL2h */
282 VECT_EMPTY /* Error EL2h */
284 VECT_EMPTY /* Synchronous 64-bit EL1 */
285 VECT_EMPTY /* IRQ 64-bit EL1 */
286 VECT_EMPTY /* FIQ 64-bit EL1 */
287 VECT_EMPTY /* Error 64-bit EL1 */
289 VECT_EMPTY /* Synchronous 32-bit EL1 */
290 VECT_EMPTY /* IRQ 32-bit EL1 */
291 VECT_EMPTY /* FIQ 32-bit EL1 */
292 VECT_EMPTY /* Error 32-bit EL1 */
295 * Get the delta between the physical address we were loaded to and the
296 * virtual address we expect to run from. This is used when building the
297 * initial page table.
300 /* Load the physical address of virt_map */
302 /* Load the virtual address of virt_map stored in virt_map */
304 /* Find PA - VA as PA' = VA' - VA + PA = VA' + (PA - VA) = VA' + x29 */
306 /* Find the load address for the kernel */
316 * This builds the page tables containing the identity map, and the kernel
320 * We were loaded to an address that is on a 2MiB boundary
321 * All the memory must not cross a 1GiB boundaty
322 * x28 contains the physical address we were loaded from
324 * TODO: This is out of date.
325 * There are at least 5 pages before that address for the page tables
326 * The pages used are:
327 * - The Kernel L2 table
328 * - The Kernel L1 table
329 * - The Kernel L0 table (TTBR1)
330 * - The identity (PA = VA) L1 table
331 * - The identity (PA = VA) L0 table (TTBR0)
332 * - The DMAP L1 tables
335 /* Save the Link register */
338 /* Clean the page table */
341 adr x27, pagetable_end
343 stp xzr, xzr, [x6], #16
344 stp xzr, xzr, [x6], #16
345 stp xzr, xzr, [x6], #16
346 stp xzr, xzr, [x6], #16
351 * Build the TTBR1 maps.
354 /* Find the size of the kernel */
356 /* Find modulep - begin */
358 /* Add two 2MiB pages for the module data and round up */
359 ldr x7, =(3 * L2_SIZE - 1)
361 /* Get the number of l2 pages to allocate, rounded down */
362 lsr x10, x8, #(L2_SHIFT)
364 /* Create the kernel space L2 table */
367 mov x8, #(KERNBASE & L2_BLOCK_MASK)
369 bl build_l2_block_pagetable
371 /* Move to the l1 table */
372 add x26, x26, #PAGE_SIZE
374 /* Link the l1 -> l2 table */
379 /* Move to the l0 table */
380 add x24, x26, #PAGE_SIZE
382 /* Link the l0 -> l1 table */
388 /* Link the DMAP tables */
389 ldr x8, =DMAP_MIN_ADDRESS
390 adr x9, pagetable_dmap;
391 mov x10, #DMAP_TABLES
395 * Build the TTBR0 maps. As TTBR0 maps, they must specify ATTR_nG.
396 * They are only needed early on, so the VA = PA map is uncached.
398 add x27, x24, #PAGE_SIZE
400 mov x6, x27 /* The initial page table */
401 #if defined(SOCDEV_PA) && defined(SOCDEV_VA)
402 /* Create a table for the UART */
403 mov x7, #(ATTR_nG | ATTR_IDX(DEVICE_MEM))
404 mov x8, #(SOCDEV_VA) /* VA start */
405 mov x9, #(SOCDEV_PA) /* PA start */
407 bl build_l1_block_pagetable
410 /* Create the VA = PA map */
411 mov x7, #(ATTR_nG | ATTR_IDX(NORMAL_UNCACHED))
413 mov x8, x9 /* VA start (== PA start) */
415 bl build_l1_block_pagetable
417 /* Move to the l0 table */
418 add x27, x27, #PAGE_SIZE
420 /* Link the l0 -> l1 table */
426 /* Restore the Link register */
431 * Builds an L0 -> L1 table descriptor
433 * This is a link for a 512GiB block of memory with up to 1GiB regions mapped
434 * within it by build_l1_block_pagetable.
437 * x8 = Virtual Address
438 * x9 = L1 PA (trashed)
440 * x11, x12 and x13 are trashed
444 * Link an L0 -> L1 table entry.
446 /* Find the table index */
447 lsr x11, x8, #L0_SHIFT
448 and x11, x11, #L0_ADDR_MASK
450 /* Build the L0 block entry */
453 /* Only use the output address bits */
454 lsr x9, x9, #PAGE_SHIFT
455 1: orr x13, x12, x9, lsl #PAGE_SHIFT
457 /* Store the entry */
458 str x13, [x6, x11, lsl #3]
468 * Builds an L1 -> L2 table descriptor
470 * This is a link for a 1GiB block of memory with up to 2MiB regions mapped
471 * within it by build_l2_block_pagetable.
474 * x8 = Virtual Address
475 * x9 = L2 PA (trashed)
476 * x11, x12 and x13 are trashed
480 * Link an L1 -> L2 table entry.
482 /* Find the table index */
483 lsr x11, x8, #L1_SHIFT
484 and x11, x11, #Ln_ADDR_MASK
486 /* Build the L1 block entry */
489 /* Only use the output address bits */
490 lsr x9, x9, #PAGE_SHIFT
491 orr x13, x12, x9, lsl #PAGE_SHIFT
493 /* Store the entry */
494 str x13, [x6, x11, lsl #3]
499 * Builds count 1 GiB page table entry
501 * x7 = Variable lower block attributes
503 * x9 = PA start (trashed)
505 * x11, x12 and x13 are trashed
507 build_l1_block_pagetable:
509 * Build the L1 table entry.
511 /* Find the table index */
512 lsr x11, x8, #L1_SHIFT
513 and x11, x11, #Ln_ADDR_MASK
515 /* Build the L1 block entry */
516 orr x12, x7, #L1_BLOCK
517 orr x12, x12, #(ATTR_AF)
519 orr x12, x12, ATTR_SH(ATTR_SH_IS)
522 /* Only use the output address bits */
523 lsr x9, x9, #L1_SHIFT
525 /* Set the physical address for this virtual address */
526 1: orr x13, x12, x9, lsl #L1_SHIFT
528 /* Store the entry */
529 str x13, [x6, x11, lsl #3]
539 * Builds count 2 MiB page table entry
541 * x7 = Type (0 = Device, 1 = Normal)
543 * x9 = PA start (trashed)
545 * x11, x12 and x13 are trashed
547 build_l2_block_pagetable:
549 * Build the L2 table entry.
551 /* Find the table index */
552 lsr x11, x8, #L2_SHIFT
553 and x11, x11, #Ln_ADDR_MASK
555 /* Build the L2 block entry */
557 orr x12, x12, #L2_BLOCK
558 orr x12, x12, #(ATTR_AF)
559 orr x12, x12, #(ATTR_UXN)
561 orr x12, x12, ATTR_SH(ATTR_SH_IS)
564 /* Only use the output address bits */
565 lsr x9, x9, #L2_SHIFT
567 /* Set the physical address for this virtual address */
568 1: orr x13, x12, x9, lsl #L2_SHIFT
570 /* Store the entry */
571 str x13, [x6, x11, lsl #3]
583 /* Load the exception vectors */
584 ldr x2, =exception_vectors
587 /* Load ttbr0 and ttbr1 */
592 /* Clear the Monitor Debug System control register */
595 /* Invalidate the TLB */
602 * Setup TCR according to the PARange and ASIDBits fields
603 * from ID_AA64MMFR0_EL1. More precisely, set TCR_EL1.AS
604 * to 1 only if the ASIDBits field equals 0b0010.
607 mrs x3, id_aa64mmfr0_el1
619 bic x1, x1, x3 /* Clear the required bits */
620 orr x1, x1, x2 /* Set the required bits */
628 .quad MAIR_ATTR(MAIR_DEVICE_nGnRnE, 0) | \
629 MAIR_ATTR(MAIR_NORMAL_NC, 1) | \
630 MAIR_ATTR(MAIR_NORMAL_WB, 2) | \
631 MAIR_ATTR(MAIR_NORMAL_WT, 3)
633 .quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_TG1_4K | \
634 TCR_CACHE_ATTRS | TCR_SMP_ATTRS)
637 .quad (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_UCI | SCTLR_SPAN | \
638 SCTLR_nTWE | SCTLR_nTWI | SCTLR_UCT | SCTLR_DZE | \
639 SCTLR_I | SCTLR_SED | SCTLR_SA0 | SCTLR_SA | SCTLR_C | \
640 SCTLR_M | SCTLR_CP15BEN)
643 .quad (SCTLR_EE | SCTLR_EOE | SCTLR_IESB | SCTLR_WXN | SCTLR_UMA | \
650 //.section .init_pagetable
651 .align 12 /* 4KiB aligned */
653 * 3 initial tables (in the following order):
654 * L2 for kernel (High addresses)
656 * L1 for user (Low addresses)
669 .globl pagetable_dmap
671 .space PAGE_SIZE * DMAP_TABLES
679 .quad pagetable /* XXX: Keep page tables VA */
683 .space (PAGE_SIZE * KSTACK_PAGES)
692 mov x8, #SYS_sigreturn
695 /* sigreturn failed, exit */
701 /* This may be copied to the stack, keep it 16-byte aligned */
709 .quad esigcode - sigcode
711 ENTRY(aarch32_sigcode)
712 .word 0xe1a0000d // mov r0, sp
713 .word 0xe2800040 // add r0, r0, #SIGF_UC
714 .word 0xe59f700c // ldr r7, [pc, #12]
715 .word 0xef000000 // swi #0
716 .word 0xe59f7008 // ldr r7, [pc, #8]
717 .word 0xef000000 // swi #0
718 .word 0xeafffffa // b . - 16
725 .global sz_aarch32_sigcode
727 .quad aarch32_esigcode - aarch32_sigcode