2 * Copyright (c) 2015-2018 Ruslan Bukin <br@bsdpad.com>
5 * Portions of this software were developed by SRI International and the
6 * University of Cambridge Computer Laboratory under DARPA/AFRL contract
7 * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme.
9 * Portions of this software were developed by the University of Cambridge
10 * Computer Laboratory as part of the CTSRD Project, with support from the
11 * UK Higher Education Innovation Fund (HEIF).
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #include <sys/syscall.h>
38 #include <machine/asm.h>
39 #include <machine/param.h>
40 #include <machine/trap.h>
41 #include <machine/riscvreg.h>
42 #include <machine/pte.h>
45 .set kernbase, KERNBASE
49 * Alternate entry point. Used when booting via SBI firmware. It must be placed
50 * at the beginning of the .text section. Arguments are as follows:
54 * Multiple CPUs might enter from this point, so we perform a hart lottery and
55 * send the losers to mpentry.
59 /* Set the global pointer */
62 lla gp, __global_pointer$
65 /* Pick a hart to run the boot process. */
68 amoadd.w t0, t1, 0(t0)
71 * We must jump to mpentry in the non-BSP case because the offset is
72 * too large to fit in a 12-bit branch immediate.
77 /* Store the boot hart */
81 /* Load zero as modulep */
86 * Main entry point. This routine is marked as the ELF entry, and is where
87 * loader(8) will enter the kernel. Arguments are as follows:
91 * It is expected that only a single CPU will enter here.
95 /* Set the global pointer */
98 lla gp, __global_pointer$
102 * Zero a1 to indicate that we have no DTB pointer. It is already
103 * included in the loader(8) metadata.
108 * Set up page tables: map a 1GB region starting at KERNBASE using 2MB
109 * superpages, starting from the first 2MB physical page into which the
110 * kernel was loaded. Also reserve an L2 page for the early device map
111 * and map the DTB, if any, using the second-last entry of that L2
112 * page. This is hopefully enough to get us to pmap_bootstrap().
114 * Implementations are required to provide SV39 mode, so we use that
115 * initially and will optionally enable SV48 mode during kernel pmap
118 * a0 - modulep or zero
122 /* Get the kernel's load address */
125 /* Add L1 entry for kernel */
127 lla s2, pagetable_l2 /* Link to next level PN */
128 srli s2, s2, PAGE_SHIFT
131 srli a5, a5, L1_SHIFT /* >> L1_SHIFT */
132 andi a5, a5, Ln_ADDR_MASK /* & Ln_ADDR_MASK */
134 slli t5, s2, PTE_PPN0_S /* (s2 << PTE_PPN0_S) */
137 /* Store L1 PTE entry to position */
143 /* Level 2 superpages (512 x 2MiB) */
145 srli t4, s9, L2_SHIFT /* Div physmem base by 2 MiB */
146 li t2, 512 /* Build 512 entries */
148 li t0, (PTE_KERN | PTE_X)
150 slli t2, t4, PTE_PPN1_S /* << PTE_PPN1_S */
152 sd t5, (s1) /* Store PTE entry to position */
153 addi s1, s1, PTE_SIZE
158 /* Create an L1 table entry for early devmap */
160 lla s2, pagetable_l2_devmap /* Link to next level PN */
161 srli s2, s2, PAGE_SHIFT
163 li a5, (VM_MAX_KERNEL_ADDRESS - PMAP_MAPDEV_EARLY_SIZE)
164 srli a5, a5, L1_SHIFT /* >> L1_SHIFT */
165 andi a5, a5, Ln_ADDR_MASK /* & Ln_ADDR_MASK */
167 slli t5, s2, PTE_PPN0_S /* (s2 << PTE_PPN0_S) */
170 /* Store the L1 table entry */
176 /* Check if we have a DTB that needs to be mapped */
179 /* Create an L2 mapping for the DTB */
180 lla s1, pagetable_l2_devmap
182 srli s2, s2, PAGE_SHIFT
183 /* Mask off any bits that aren't aligned */
184 andi s2, s2, ~((1 << (PTE_PPN1_S - PTE_PPN0_S)) - 1)
187 slli t2, s2, PTE_PPN0_S /* << PTE_PPN0_S */
190 /* Store the L2 table entry for the DTB */
192 li a5, VM_EARLY_DTB_ADDRESS
193 srli a5, a5, L2_SHIFT /* >> L2_SHIFT */
194 andi a5, a5, Ln_ADDR_MASK /* & Ln_ADDR_MASK */
199 /* Page tables END */
201 /* Setup supervisor trap vector */
209 /* Set page tables base register */
211 srli s2, s2, PAGE_SHIFT
212 li t0, SATP_MODE_SV39
219 /* Set the global pointer again, this time with the virtual address. */
222 lla gp, __global_pointer$
225 /* Setup supervisor trap vector */
226 la t0, cpu_exception_handler
229 /* Ensure sscratch is zero */
233 /* Initialize stack pointer */
236 /* Clear frame pointer */
239 /* Allocate space for thread0 PCB and riscv_bootparams */
240 addi sp, sp, -(PCB_SIZE + RISCV_BOOTPARAMS_SIZE) & ~STACKALIGNBYTES
243 la t0, _C_LABEL(__bss_start)
244 la t1, _C_LABEL(_end)
250 /* Fill riscv_bootparams */
252 sd t0, RISCV_BOOTPARAMS_KERN_L1PT(sp)
253 sd s9, RISCV_BOOTPARAMS_KERN_PHYS(sp)
256 sd t0, RISCV_BOOTPARAMS_KERN_STACK(sp)
258 li t0, (VM_EARLY_DTB_ADDRESS)
259 /* Add offset of DTB within superpage */
263 sd t0, RISCV_BOOTPARAMS_DTBP_VIRT(sp)
264 sd a1, RISCV_BOOTPARAMS_DTBP_PHYS(sp)
266 sd a0, RISCV_BOOTPARAMS_MODULEP(sp)
269 call _C_LABEL(initriscv) /* Off we go */
270 call _C_LABEL(mi_startup)
272 /* We should never reach here, but if so just hang. */
278 * Get the physical address the kernel is loaded to. Returned in s9.
281 lla t0, virt_map /* physical address of virt_map */
282 ld t1, 0(t0) /* virtual address of virt_map */
283 sub t1, t1, t0 /* calculate phys->virt delta */
285 sub s9, t2, t1 /* s9 = physmem base */
290 .space (PAGE_SIZE * KSTACK_PAGES)
301 /* sigreturn failed, exit */
307 /* This may be copied to the stack, keep it 16-byte aligned */
315 .quad esigcode - sigcode
333 .quad pagetable_l2 /* XXX: Keep page tables VA */
343 * mpentry(unsigned long)
345 * Called by a core when it is being brought online.
349 * Calculate the offset to __riscv_boot_ap
350 * for the current core, cpuid is in a0.
354 /* Get the pointer */
355 lla t0, __riscv_boot_ap
359 /* Wait the kernel to be ready */
363 /* Setup stack pointer */
367 /* Get the kernel's load address */
370 /* Setup supervisor trap vector */
377 /* Set page tables base register */
379 srli s2, s2, PAGE_SHIFT
380 li t0, SATP_MODE_SV39
387 /* Set the global pointer again, this time with the virtual address. */
390 lla gp, __global_pointer$
393 /* Setup supervisor trap vector */
394 la t0, cpu_exception_handler
397 /* Ensure sscratch is zero */