1 /* $NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $ */
4 * Copyright (c) 2004 Olivier Houchard
5 * Copyright (c) 1994-1998 Mark Brinicombe.
6 * Copyright (c) 1994 Brini.
9 * This code is derived from software written for Brini by Mark Brinicombe
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by Mark Brinicombe
22 * for the NetBSD Project.
23 * 4. The name of the company nor the name of the author may be used to
24 * endorse or promote products derived from this software without specific
25 * prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
28 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
29 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * Machine dependent functions for kernel setup
42 * Updated : 18/04/01 updated for new wscons
45 #include "opt_compat.h"
47 #include "opt_kstack_pages.h"
48 #include "opt_platform.h"
49 #include "opt_sched.h"
50 #include "opt_timer.h"
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
55 #include <sys/param.h>
60 #include <sys/devmap.h>
62 #include <sys/imgact.h>
64 #include <sys/kernel.h>
65 #include <sys/linker.h>
66 #include <sys/msgbuf.h>
67 #include <sys/rwlock.h>
68 #include <sys/sched.h>
69 #include <sys/syscallsubr.h>
70 #include <sys/sysent.h>
71 #include <sys/sysproto.h>
72 #include <sys/vmmeter.h>
74 #include <vm/vm_object.h>
75 #include <vm/vm_page.h>
76 #include <vm/vm_pager.h>
78 #include <machine/debug_monitor.h>
79 #include <machine/machdep.h>
80 #include <machine/metadata.h>
81 #include <machine/pcb.h>
82 #include <machine/physmem.h>
83 #include <machine/platform.h>
84 #include <machine/sysarch.h>
85 #include <machine/undefined.h>
86 #include <machine/vfp.h>
87 #include <machine/vmparam.h>
90 #include <dev/fdt/fdt_common.h>
91 #include <machine/ofw_machdep.h>
95 #define debugf(fmt, args...) printf(fmt, ##args)
97 #define debugf(fmt, args...)
100 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
101 defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) || \
102 defined(COMPAT_FREEBSD9)
103 #error FreeBSD/arm doesn't provide compatibility with releases prior to 10
106 #if __ARM_ARCH >= 6 && !defined(INTRNG)
107 #error armv6 requires INTRNG
110 struct pcpu __pcpu[MAXCPU];
111 struct pcpu *pcpup = &__pcpu[0];
113 static struct trapframe proc0_tf;
114 uint32_t cpu_reset_address = 0;
116 vm_offset_t vector_page;
118 int (*_arm_memcpy)(void *, void *, int, int) = NULL;
119 int (*_arm_bzero)(void *, int, int) = NULL;
120 int _min_memcpy_size = 0;
121 int _min_bzero_size = 0;
128 vm_offset_t systempage;
129 vm_offset_t irqstack;
130 vm_offset_t undstack;
131 vm_offset_t abtstack;
134 * This is the number of L2 page tables required for covering max
135 * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf,
136 * stacks etc.), uprounded to be divisible by 4.
138 #define KERNEL_PT_MAX 78
139 static struct pv_addr kernel_pt_table[KERNEL_PT_MAX];
140 struct pv_addr systempage;
141 static struct pv_addr msgbufpv;
142 struct pv_addr irqstack;
143 struct pv_addr undstack;
144 struct pv_addr abtstack;
145 static struct pv_addr kernelstack;
146 #endif /* __ARM_ARCH >= 6 */
150 static delay_func *delay_impl;
151 static void *delay_arg;
154 struct kva_md_info kmi;
159 * Initialize the vector page, and select whether or not to
160 * relocate the vectors.
162 * NOTE: We expect the vector page to be mapped at its expected
166 extern unsigned int page0[], page0_data[];
168 arm_vector_init(vm_offset_t va, int which)
170 unsigned int *vectors = (int *) va;
171 unsigned int *vectors_data = vectors + (page0_data - page0);
175 * Loop through the vectors we're taking over, and copy the
176 * vector's insn and data word.
178 for (vec = 0; vec < ARM_NVEC; vec++) {
179 if ((which & (1 << vec)) == 0) {
180 /* Don't want to take over this vector. */
183 vectors[vec] = page0[vec];
184 vectors_data[vec] = page0_data[vec];
187 /* Now sync the vectors. */
188 icache_sync(va, (ARM_NVEC * 2) * sizeof(u_int));
192 if (va == ARM_VECTORS_HIGH) {
194 * Enable high vectors in the system control reg (SCTLR).
196 * Assume the MD caller knows what it's doing here, and really
197 * does want the vector page relocated.
199 * Note: This has to be done here (and not just in
200 * cpu_setup()) because the vector page needs to be
201 * accessible *before* cpu_startup() is called.
204 cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
210 cpu_startup(void *dummy)
212 struct pcb *pcb = thread0.td_pcb;
213 const unsigned int mbyte = 1024 * 1024;
214 #if __ARM_ARCH < 6 && !defined(ARM_CACHE_LOCK_ENABLE)
220 vm_ksubmap_init(&kmi);
223 * Display the RAM layout.
225 printf("real memory = %ju (%ju MB)\n",
226 (uintmax_t)arm32_ptob(realmem),
227 (uintmax_t)arm32_ptob(realmem) / mbyte);
228 printf("avail memory = %ju (%ju MB)\n",
229 (uintmax_t)arm32_ptob(vm_cnt.v_free_count),
230 (uintmax_t)arm32_ptob(vm_cnt.v_free_count) / mbyte);
232 arm_physmem_print_tables();
233 devmap_print_table();
237 vm_pager_bufferinit();
238 pcb->pcb_regs.sf_sp = (u_int)thread0.td_kstack +
239 USPACE_SVC_STACK_TOP;
240 pmap_set_pcb_pagedir(kernel_pmap, pcb);
242 vector_page_setprot(VM_PROT_READ);
244 #ifdef ARM_CACHE_LOCK_ENABLE
245 pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS);
246 arm_lock_cache_line(ARM_TP_ADDRESS);
248 m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO);
249 pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m));
251 *(uint32_t *)ARM_RAS_START = 0;
252 *(uint32_t *)ARM_RAS_END = 0xffffffff;
256 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
259 * Flush the D-cache for non-DMA I/O so that the I-cache can
260 * be made coherent later.
263 cpu_flush_dcache(void *ptr, size_t len)
266 dcache_wb_poc((vm_offset_t)ptr, (vm_paddr_t)vtophys(ptr), len);
269 /* Get current clock frequency for the given cpu id. */
271 cpu_est_clockrate(int cpu_id, uint64_t *rate)
281 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d", busy, curcpu);
283 #ifndef NO_EVENTTIMERS
287 if (!sched_runnable())
289 #ifndef NO_EVENTTIMERS
294 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done", busy, curcpu);
298 cpu_idle_wakeup(int cpu)
304 #ifdef NO_EVENTTIMERS
306 * Most ARM platforms don't need to do anything special to init their clocks
307 * (they get intialized during normal device attachment), and by not defining a
308 * cpu_initclocks() function they get this generic one. Any platform that needs
309 * to do something special can just provide their own implementation, which will
310 * override this one due to the weak linkage.
313 arm_generic_initclocks(void)
316 __weak_reference(arm_generic_initclocks, cpu_initclocks);
324 if (PCPU_GET(cpuid) == 0)
325 cpu_initclocks_bsp();
329 cpu_initclocks_bsp();
336 arm_set_delay(delay_func *impl, void *arg)
339 KASSERT(impl != NULL, ("No DELAY implementation"));
348 delay_impl(usec, delay_arg);
353 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
364 if (td->td_md.md_spinlock_count == 0) {
365 cspr = disable_interrupts(PSR_I | PSR_F);
366 td->td_md.md_spinlock_count = 1;
367 td->td_md.md_saved_cspr = cspr;
369 td->td_md.md_spinlock_count++;
381 cspr = td->td_md.md_saved_cspr;
382 td->td_md.md_spinlock_count--;
383 if (td->td_md.md_spinlock_count == 0)
384 restore_interrupts(cspr);
388 * Clear registers on exec
391 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
393 struct trapframe *tf = td->td_frame;
395 memset(tf, 0, sizeof(*tf));
396 tf->tf_usr_sp = stack;
397 tf->tf_usr_lr = imgp->entry_addr;
398 tf->tf_svc_lr = 0x77777777;
399 tf->tf_pc = imgp->entry_addr;
400 tf->tf_spsr = PSR_USR32_MODE;
406 * Get machine VFP context.
409 get_vfpcontext(struct thread *td, mcontext_vfp_t *vfp)
414 if (td == curthread) {
416 vfp_store(&pcb->pcb_vfpstate, false);
419 MPASS(TD_IS_SUSPENDED(td));
420 memcpy(vfp->mcv_reg, pcb->pcb_vfpstate.reg,
421 sizeof(vfp->mcv_reg));
422 vfp->mcv_fpscr = pcb->pcb_vfpstate.fpscr;
426 * Set machine VFP context.
429 set_vfpcontext(struct thread *td, mcontext_vfp_t *vfp)
434 if (td == curthread) {
439 MPASS(TD_IS_SUSPENDED(td));
440 memcpy(pcb->pcb_vfpstate.reg, vfp->mcv_reg,
441 sizeof(pcb->pcb_vfpstate.reg));
442 pcb->pcb_vfpstate.fpscr = vfp->mcv_fpscr;
447 * Get machine context.
450 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
452 struct trapframe *tf = td->td_frame;
453 __greg_t *gr = mcp->__gregs;
455 if (clear_ret & GET_MC_CLEAR_RET) {
457 gr[_REG_CPSR] = tf->tf_spsr & ~PSR_C;
459 gr[_REG_R0] = tf->tf_r0;
460 gr[_REG_CPSR] = tf->tf_spsr;
462 gr[_REG_R1] = tf->tf_r1;
463 gr[_REG_R2] = tf->tf_r2;
464 gr[_REG_R3] = tf->tf_r3;
465 gr[_REG_R4] = tf->tf_r4;
466 gr[_REG_R5] = tf->tf_r5;
467 gr[_REG_R6] = tf->tf_r6;
468 gr[_REG_R7] = tf->tf_r7;
469 gr[_REG_R8] = tf->tf_r8;
470 gr[_REG_R9] = tf->tf_r9;
471 gr[_REG_R10] = tf->tf_r10;
472 gr[_REG_R11] = tf->tf_r11;
473 gr[_REG_R12] = tf->tf_r12;
474 gr[_REG_SP] = tf->tf_usr_sp;
475 gr[_REG_LR] = tf->tf_usr_lr;
476 gr[_REG_PC] = tf->tf_pc;
478 mcp->mc_vfp_size = 0;
479 mcp->mc_vfp_ptr = NULL;
480 memset(&mcp->mc_spare, 0, sizeof(mcp->mc_spare));
486 * Set machine context.
488 * However, we don't set any but the user modifiable flags, and we won't
489 * touch the cs selector.
492 set_mcontext(struct thread *td, mcontext_t *mcp)
494 mcontext_vfp_t mc_vfp, *vfp;
495 struct trapframe *tf = td->td_frame;
496 const __greg_t *gr = mcp->__gregs;
499 if (mcp->mc_vfp_size != 0 && mcp->mc_vfp_size != sizeof(mc_vfp)) {
500 printf("%s: %s: Malformed mc_vfp_size: %d (0x%08X)\n",
501 td->td_proc->p_comm, __func__,
502 mcp->mc_vfp_size, mcp->mc_vfp_size);
503 } else if (mcp->mc_vfp_size != 0 && mcp->mc_vfp_ptr == NULL) {
504 printf("%s: %s: c_vfp_size != 0 but mc_vfp_ptr == NULL\n",
505 td->td_proc->p_comm, __func__);
509 if (mcp->mc_vfp_size == sizeof(mc_vfp) && mcp->mc_vfp_ptr != NULL) {
510 if (copyin(mcp->mc_vfp_ptr, &mc_vfp, sizeof(mc_vfp)) != 0)
517 tf->tf_r0 = gr[_REG_R0];
518 tf->tf_r1 = gr[_REG_R1];
519 tf->tf_r2 = gr[_REG_R2];
520 tf->tf_r3 = gr[_REG_R3];
521 tf->tf_r4 = gr[_REG_R4];
522 tf->tf_r5 = gr[_REG_R5];
523 tf->tf_r6 = gr[_REG_R6];
524 tf->tf_r7 = gr[_REG_R7];
525 tf->tf_r8 = gr[_REG_R8];
526 tf->tf_r9 = gr[_REG_R9];
527 tf->tf_r10 = gr[_REG_R10];
528 tf->tf_r11 = gr[_REG_R11];
529 tf->tf_r12 = gr[_REG_R12];
530 tf->tf_usr_sp = gr[_REG_SP];
531 tf->tf_usr_lr = gr[_REG_LR];
532 tf->tf_pc = gr[_REG_PC];
533 tf->tf_spsr = gr[_REG_CPSR];
536 set_vfpcontext(td, vfp);
542 sendsig(catcher, ksi, mask)
549 struct trapframe *tf;
550 struct sigframe *fp, frame;
552 struct sysentvec *sysent;
559 PROC_LOCK_ASSERT(p, MA_OWNED);
560 sig = ksi->ksi_signo;
561 code = ksi->ksi_code;
563 mtx_assert(&psp->ps_mtx, MA_OWNED);
565 onstack = sigonstack(tf->tf_usr_sp);
567 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
570 /* Allocate and validate space for the signal handler context. */
571 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !(onstack) &&
572 SIGISMEMBER(psp->ps_sigonstack, sig)) {
573 fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
574 td->td_sigstk.ss_size);
575 #if defined(COMPAT_43)
576 td->td_sigstk.ss_flags |= SS_ONSTACK;
579 fp = (struct sigframe *)td->td_frame->tf_usr_sp;
581 /* make room on the stack */
584 /* make the stack aligned */
585 fp = (struct sigframe *)STACKALIGN(fp);
586 /* Populate the siginfo frame. */
587 get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
589 get_vfpcontext(td, &frame.sf_vfp);
590 frame.sf_uc.uc_mcontext.mc_vfp_size = sizeof(fp->sf_vfp);
591 frame.sf_uc.uc_mcontext.mc_vfp_ptr = &fp->sf_vfp;
593 frame.sf_uc.uc_mcontext.mc_vfp_size = 0;
594 frame.sf_uc.uc_mcontext.mc_vfp_ptr = NULL;
596 frame.sf_si = ksi->ksi_info;
597 frame.sf_uc.uc_sigmask = *mask;
598 frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK )
599 ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
600 frame.sf_uc.uc_stack = td->td_sigstk;
601 mtx_unlock(&psp->ps_mtx);
602 PROC_UNLOCK(td->td_proc);
604 /* Copy the sigframe out to the user's stack. */
605 if (copyout(&frame, fp, sizeof(*fp)) != 0) {
606 /* Process has trashed its stack. Kill it. */
607 CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
613 * Build context to run handler in. We invoke the handler
614 * directly, only returning via the trampoline. Note the
615 * trampoline version numbers are coordinated with machine-
616 * dependent code in libc.
620 tf->tf_r1 = (register_t)&fp->sf_si;
621 tf->tf_r2 = (register_t)&fp->sf_uc;
623 /* the trampoline uses r5 as the uc address */
624 tf->tf_r5 = (register_t)&fp->sf_uc;
625 tf->tf_pc = (register_t)catcher;
626 tf->tf_usr_sp = (register_t)fp;
627 sysent = p->p_sysent;
628 if (sysent->sv_sigcode_base != 0)
629 tf->tf_usr_lr = (register_t)sysent->sv_sigcode_base;
631 tf->tf_usr_lr = (register_t)(sysent->sv_psstrings -
632 *(sysent->sv_szsigcode));
633 /* Set the mode to enter in the signal handler */
635 if ((register_t)catcher & 1)
636 tf->tf_spsr |= PSR_T;
638 tf->tf_spsr &= ~PSR_T;
641 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_usr_lr,
645 mtx_lock(&psp->ps_mtx);
649 sys_sigreturn(td, uap)
651 struct sigreturn_args /* {
652 const struct __ucontext *sigcntxp;
660 if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
663 * Make sure the processor mode has not been tampered with and
664 * interrupts have not been disabled.
666 spsr = uc.uc_mcontext.__gregs[_REG_CPSR];
667 if ((spsr & PSR_MODE) != PSR_USR32_MODE ||
668 (spsr & (PSR_I | PSR_F)) != 0)
670 /* Restore register context. */
671 set_mcontext(td, &uc.uc_mcontext);
673 /* Restore signal mask. */
674 kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
676 return (EJUSTRETURN);
680 * Construct a PCB from a trapframe. This is called from kdb_trap() where
681 * we want to start a backtrace from the function that caused us to enter
682 * the debugger. We have the context in the trapframe, but base the trace
683 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
684 * enough for a backtrace.
687 makectx(struct trapframe *tf, struct pcb *pcb)
689 pcb->pcb_regs.sf_r4 = tf->tf_r4;
690 pcb->pcb_regs.sf_r5 = tf->tf_r5;
691 pcb->pcb_regs.sf_r6 = tf->tf_r6;
692 pcb->pcb_regs.sf_r7 = tf->tf_r7;
693 pcb->pcb_regs.sf_r8 = tf->tf_r8;
694 pcb->pcb_regs.sf_r9 = tf->tf_r9;
695 pcb->pcb_regs.sf_r10 = tf->tf_r10;
696 pcb->pcb_regs.sf_r11 = tf->tf_r11;
697 pcb->pcb_regs.sf_r12 = tf->tf_r12;
698 pcb->pcb_regs.sf_pc = tf->tf_pc;
699 pcb->pcb_regs.sf_lr = tf->tf_usr_lr;
700 pcb->pcb_regs.sf_sp = tf->tf_usr_sp;
707 set_curthread(&thread0);
709 pcpu_init(pcpup, 0, sizeof(struct pcpu));
710 PCPU_SET(curthread, &thread0);
717 init_proc0(vm_offset_t kstack)
719 proc_linkup0(&proc0, &thread0);
720 thread0.td_kstack = kstack;
721 thread0.td_pcb = (struct pcb *)
722 (thread0.td_kstack + kstack_pages * PAGE_SIZE) - 1;
723 thread0.td_pcb->pcb_flags = 0;
724 thread0.td_pcb->pcb_vfpcpu = -1;
725 thread0.td_pcb->pcb_vfpstate.fpscr = VFPSCR_DN;
726 thread0.td_frame = &proc0_tf;
727 pcpup->pc_curpcb = thread0.td_pcb;
732 set_stackptrs(int cpu)
735 set_stackptr(PSR_IRQ32_MODE,
736 irqstack + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
737 set_stackptr(PSR_ABT32_MODE,
738 abtstack + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
739 set_stackptr(PSR_UND32_MODE,
740 undstack + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
744 set_stackptrs(int cpu)
747 set_stackptr(PSR_IRQ32_MODE,
748 irqstack.pv_va + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
749 set_stackptr(PSR_ABT32_MODE,
750 abtstack.pv_va + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
751 set_stackptr(PSR_UND32_MODE,
752 undstack.pv_va + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
760 initarm(struct arm_boot_params *abp)
762 struct mem_region mem_regions[FDT_MEM_REGIONS];
763 struct pv_addr kernel_l1pt;
764 struct pv_addr dpcpu;
765 vm_offset_t dtbp, freemempos, l2_start, lastaddr;
771 int i, j, err_devmap, mem_regions_sz;
773 lastaddr = parse_boot_param(abp);
774 arm_physmem_kernaddr = abp->abp_physaddr;
782 * Find the dtb passed in by the boot loader.
784 kmdp = preload_search_by_type("elf kernel");
786 dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
788 dtbp = (vm_offset_t)NULL;
790 #if defined(FDT_DTB_STATIC)
792 * In case the device tree blob was not retrieved (from metadata) try
793 * to use the statically embedded one.
795 if (dtbp == (vm_offset_t)NULL)
796 dtbp = (vm_offset_t)&fdt_static_dtb;
799 if (OF_install(OFW_FDT, 0) == FALSE)
800 panic("Cannot install FDT");
802 if (OF_init((void *)dtbp) != 0)
803 panic("OF_init failed with the found device tree");
805 /* Grab physical memory regions information from device tree. */
806 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, &memsize) != 0)
807 panic("Cannot get physical memory regions");
808 arm_physmem_hardware_regions(mem_regions, mem_regions_sz);
810 /* Grab reserved memory regions information from device tree. */
811 if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0)
812 arm_physmem_exclude_regions(mem_regions, mem_regions_sz,
813 EXFLAG_NODUMP | EXFLAG_NOALLOC);
815 /* Platform-specific initialisation */
816 platform_probe_and_attach();
820 /* Do basic tuning, hz etc */
823 /* Calculate number of L2 tables needed for mapping vm_page_array */
824 l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page);
825 l2size = (l2size >> L1_S_SHIFT) + 1;
828 * Add one table for end of kernel map, one for stacks, msgbuf and
829 * L1 and L2 tables map, one for vectors map and two for
830 * l2 structures from pmap_bootstrap.
834 /* Make it divisible by 4 */
835 l2size = (l2size + 3) & ~3;
837 freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK;
839 /* Define a macro to simplify memory allocation */
840 #define valloc_pages(var, np) \
841 alloc_pages((var).pv_va, (np)); \
842 (var).pv_pa = (var).pv_va + (abp->abp_physaddr - KERNVIRTADDR);
844 #define alloc_pages(var, np) \
845 (var) = freemempos; \
846 freemempos += (np * PAGE_SIZE); \
847 memset((char *)(var), 0, ((np) * PAGE_SIZE));
849 while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
850 freemempos += PAGE_SIZE;
851 valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
853 for (i = 0, j = 0; i < l2size; ++i) {
854 if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
855 valloc_pages(kernel_pt_table[i],
856 L2_TABLE_SIZE / PAGE_SIZE);
859 kernel_pt_table[i].pv_va = kernel_pt_table[j].pv_va +
860 L2_TABLE_SIZE_REAL * (i - j);
861 kernel_pt_table[i].pv_pa =
862 kernel_pt_table[i].pv_va - KERNVIRTADDR +
868 * Allocate a page for the system page mapped to 0x00000000
869 * or 0xffff0000. This page will just contain the system vectors
870 * and can be shared by all processes.
872 valloc_pages(systempage, 1);
874 /* Allocate dynamic per-cpu area. */
875 valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
876 dpcpu_init((void *)dpcpu.pv_va, 0);
878 /* Allocate stacks for all modes */
879 valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU);
880 valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU);
881 valloc_pages(undstack, UND_STACK_SIZE * MAXCPU);
882 valloc_pages(kernelstack, kstack_pages * MAXCPU);
883 valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
886 * Now we start construction of the L1 page table
887 * We start by mapping the L2 page tables into the L1.
888 * This means that we can replace L1 mappings later on if necessary
890 l1pagetable = kernel_l1pt.pv_va;
893 * Try to map as much as possible of kernel text and data using
894 * 1MB section mapping and for the rest of initial kernel address
895 * space use L2 coarse tables.
897 * Link L2 tables for mapping remainder of kernel (modulo 1MB)
898 * and kernel structures
900 l2_start = lastaddr & ~(L1_S_OFFSET);
901 for (i = 0 ; i < l2size - 1; i++)
902 pmap_link_l2pt(l1pagetable, l2_start + i * L1_S_SIZE,
903 &kernel_pt_table[i]);
905 pmap_curmaxkvaddr = l2_start + (l2size - 1) * L1_S_SIZE;
907 /* Map kernel code and data */
908 pmap_map_chunk(l1pagetable, KERNVIRTADDR, abp->abp_physaddr,
909 (((uint32_t)(lastaddr) - KERNVIRTADDR) + PAGE_MASK) & ~PAGE_MASK,
910 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
912 /* Map L1 directory and allocated L2 page tables */
913 pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
914 L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
916 pmap_map_chunk(l1pagetable, kernel_pt_table[0].pv_va,
917 kernel_pt_table[0].pv_pa,
918 L2_TABLE_SIZE_REAL * l2size,
919 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
921 /* Map allocated DPCPU, stacks and msgbuf */
922 pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa,
923 freemempos - dpcpu.pv_va,
924 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
926 /* Link and map the vector page */
927 pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH,
928 &kernel_pt_table[l2size - 1]);
929 pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
930 VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE);
932 /* Establish static device mappings. */
933 err_devmap = platform_devmap_init();
934 devmap_bootstrap(l1pagetable, NULL);
935 vm_max_kernel_address = platform_lastaddr();
937 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT);
938 pmap_pa = kernel_l1pt.pv_pa;
939 cpu_setttb(kernel_l1pt.pv_pa);
941 cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2));
944 * Now that proper page tables are installed, call cpu_setup() to enable
945 * instruction and data caches and other chip-specific features.
950 * Only after the SOC registers block is mapped we can perform device
951 * tree fixups, as they may attempt to read parameters from hardware.
953 OF_interpret("perform-fixup", 0);
955 platform_gpio_init();
959 debugf("initarm: console initialized\n");
960 debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp);
961 debugf(" boothowto = 0x%08x\n", boothowto);
962 debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp);
965 env = kern_getenv("kernelname");
967 strlcpy(kernelname, env, sizeof(kernelname));
972 printf("WARNING: could not fully configure devmap, error=%d\n",
975 platform_late_init();
978 * Pages were allocated during the secondary bootstrap for the
979 * stacks for different CPU modes.
980 * We must now set the r13 registers in the different CPU modes to
981 * point to these stacks.
982 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
983 * of the stack memory.
985 cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE);
990 * We must now clean the cache again....
991 * Cleaning may be done by reading new data to displace any
992 * dirty data in the cache. This will have happened in cpu_setttb()
993 * but since we are boot strapping the addresses used for the read
994 * may have just been remapped and thus the cache could be out
995 * of sync. A re-clean after the switch will cure this.
996 * After booting there are no gross relocations of the kernel thus
997 * this problem will not occur after initarm().
999 cpu_idcache_wbinv_all();
1003 init_proc0(kernelstack.pv_va);
1005 arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
1006 pmap_bootstrap(freemempos, &kernel_l1pt);
1007 msgbufp = (void *)msgbufpv.pv_va;
1008 msgbufinit(msgbufp, msgbufsize);
1012 * Exclude the kernel (and all the things we allocated which immediately
1013 * follow the kernel) from the VM allocation pool but not from crash
1014 * dumps. virtual_avail is a global variable which tracks the kva we've
1015 * "allocated" while setting up pmaps.
1017 * Prepare the list of physical memory available to the vm subsystem.
1019 arm_physmem_exclude_region(abp->abp_physaddr,
1020 (virtual_avail - KERNVIRTADDR), EXFLAG_NOALLOC);
1021 arm_physmem_init_kernel_globals();
1023 init_param2(physmem);
1027 return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
1028 sizeof(struct pcb)));
1030 #else /* __ARM_ARCH < 6 */
1032 initarm(struct arm_boot_params *abp)
1034 struct mem_region mem_regions[FDT_MEM_REGIONS];
1035 vm_paddr_t lastaddr;
1036 vm_offset_t dtbp, kernelstack, dpcpu;
1039 int err_devmap, mem_regions_sz;
1041 struct efi_map_header *efihdr;
1044 /* get last allocated physical address */
1045 arm_physmem_kernaddr = abp->abp_physaddr;
1046 lastaddr = parse_boot_param(abp) - KERNVIRTADDR + arm_physmem_kernaddr;
1052 * Find the dtb passed in by the boot loader.
1054 kmdp = preload_search_by_type("elf kernel");
1055 dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
1056 #if defined(FDT_DTB_STATIC)
1058 * In case the device tree blob was not retrieved (from metadata) try
1059 * to use the statically embedded one.
1061 if (dtbp == (vm_offset_t)NULL)
1062 dtbp = (vm_offset_t)&fdt_static_dtb;
1065 if (OF_install(OFW_FDT, 0) == FALSE)
1066 panic("Cannot install FDT");
1068 if (OF_init((void *)dtbp) != 0)
1069 panic("OF_init failed with the found device tree");
1071 #if defined(LINUX_BOOT_ABI)
1072 arm_parse_fdt_bootargs();
1076 efihdr = (struct efi_map_header *)preload_search_info(kmdp,
1077 MODINFO_METADATA | MODINFOMD_EFI_MAP);
1078 if (efihdr != NULL) {
1079 arm_add_efi_map_entries(efihdr, mem_regions, &mem_regions_sz);
1083 /* Grab physical memory regions information from device tree. */
1084 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,NULL) != 0)
1085 panic("Cannot get physical memory regions");
1087 arm_physmem_hardware_regions(mem_regions, mem_regions_sz);
1089 /* Grab reserved memory regions information from device tree. */
1090 if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0)
1091 arm_physmem_exclude_regions(mem_regions, mem_regions_sz,
1092 EXFLAG_NODUMP | EXFLAG_NOALLOC);
1095 * Set TEX remapping registers.
1096 * Setup kernel page tables and switch to kernel L1 page table.
1099 pmap_bootstrap_prepare(lastaddr);
1102 * Now that proper page tables are installed, call cpu_setup() to enable
1103 * instruction and data caches and other chip-specific features.
1107 /* Platform-specific initialisation */
1108 platform_probe_and_attach();
1111 /* Do basic tuning, hz etc */
1115 * Allocate a page for the system page mapped to 0xffff0000
1116 * This page will just contain the system vectors and can be
1117 * shared by all processes.
1119 systempage = pmap_preboot_get_pages(1);
1121 /* Map the vector page. */
1122 pmap_preboot_map_pages(systempage, ARM_VECTORS_HIGH, 1);
1123 if (virtual_end >= ARM_VECTORS_HIGH)
1124 virtual_end = ARM_VECTORS_HIGH - 1;
1126 /* Allocate dynamic per-cpu area. */
1127 dpcpu = pmap_preboot_get_vpages(DPCPU_SIZE / PAGE_SIZE);
1128 dpcpu_init((void *)dpcpu, 0);
1130 /* Allocate stacks for all modes */
1131 irqstack = pmap_preboot_get_vpages(IRQ_STACK_SIZE * MAXCPU);
1132 abtstack = pmap_preboot_get_vpages(ABT_STACK_SIZE * MAXCPU);
1133 undstack = pmap_preboot_get_vpages(UND_STACK_SIZE * MAXCPU );
1134 kernelstack = pmap_preboot_get_vpages(kstack_pages * MAXCPU);
1136 /* Allocate message buffer. */
1137 msgbufp = (void *)pmap_preboot_get_vpages(
1138 round_page(msgbufsize) / PAGE_SIZE);
1141 * Pages were allocated during the secondary bootstrap for the
1142 * stacks for different CPU modes.
1143 * We must now set the r13 registers in the different CPU modes to
1144 * point to these stacks.
1145 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
1146 * of the stack memory.
1151 /* Establish static device mappings. */
1152 err_devmap = platform_devmap_init();
1153 devmap_bootstrap(0, NULL);
1154 vm_max_kernel_address = platform_lastaddr();
1157 * Only after the SOC registers block is mapped we can perform device
1158 * tree fixups, as they may attempt to read parameters from hardware.
1160 OF_interpret("perform-fixup", 0);
1161 platform_gpio_init();
1164 debugf("initarm: console initialized\n");
1165 debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp);
1166 debugf(" boothowto = 0x%08x\n", boothowto);
1167 debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp);
1168 debugf(" lastaddr1: 0x%08x\n", lastaddr);
1171 env = kern_getenv("kernelname");
1173 strlcpy(kernelname, env, sizeof(kernelname));
1175 if (err_devmap != 0)
1176 printf("WARNING: could not fully configure devmap, error=%d\n",
1179 platform_late_init();
1182 * We must now clean the cache again....
1183 * Cleaning may be done by reading new data to displace any
1184 * dirty data in the cache. This will have happened in cpu_setttb()
1185 * but since we are boot strapping the addresses used for the read
1186 * may have just been remapped and thus the cache could be out
1187 * of sync. A re-clean after the switch will cure this.
1188 * After booting there are no gross relocations of the kernel thus
1189 * this problem will not occur after initarm().
1191 /* Set stack for exception handlers */
1193 init_proc0(kernelstack);
1194 arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
1195 enable_interrupts(PSR_A);
1198 /* Exclude the kernel (and all the things we allocated which immediately
1199 * follow the kernel) from the VM allocation pool but not from crash
1200 * dumps. virtual_avail is a global variable which tracks the kva we've
1201 * "allocated" while setting up pmaps.
1203 * Prepare the list of physical memory available to the vm subsystem.
1205 arm_physmem_exclude_region(abp->abp_physaddr,
1206 pmap_preboot_get_pages(0) - abp->abp_physaddr, EXFLAG_NOALLOC);
1207 arm_physmem_init_kernel_globals();
1209 init_param2(physmem);
1210 /* Init message buffer. */
1211 msgbufinit(msgbufp, msgbufsize);
1214 return ((void *)STACKALIGN(thread0.td_pcb));
1218 #endif /* __ARM_ARCH < 6 */