1 /* $NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $ */
4 * Copyright (c) 2004 Olivier Houchard
5 * Copyright (c) 1994-1998 Mark Brinicombe.
6 * Copyright (c) 1994 Brini.
9 * This code is derived from software written for Brini by Mark Brinicombe
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by Mark Brinicombe
22 * for the NetBSD Project.
23 * 4. The name of the company nor the name of the author may be used to
24 * endorse or promote products derived from this software without specific
25 * prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
28 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
29 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * Machine dependant functions for kernel setup
42 * Updated : 18/04/01 updated for new wscons
45 #include "opt_compat.h"
47 #include "opt_platform.h"
48 #include "opt_sched.h"
49 #include "opt_timer.h"
51 #include <sys/cdefs.h>
52 __FBSDID("$FreeBSD$");
54 #include <sys/param.h>
56 #include <sys/systm.h>
63 #include <sys/imgact.h>
65 #include <sys/kernel.h>
67 #include <sys/linker.h>
69 #include <sys/malloc.h>
70 #include <sys/msgbuf.h>
71 #include <sys/mutex.h>
73 #include <sys/ptrace.h>
74 #include <sys/rwlock.h>
75 #include <sys/sched.h>
76 #include <sys/signalvar.h>
77 #include <sys/syscallsubr.h>
78 #include <sys/sysctl.h>
79 #include <sys/sysent.h>
80 #include <sys/sysproto.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_pager.h>
90 #include <machine/armreg.h>
91 #include <machine/atags.h>
92 #include <machine/cpu.h>
93 #include <machine/devmap.h>
94 #include <machine/frame.h>
95 #include <machine/intr.h>
96 #include <machine/machdep.h>
97 #include <machine/md_var.h>
98 #include <machine/metadata.h>
99 #include <machine/pcb.h>
100 #include <machine/physmem.h>
101 #include <machine/reg.h>
102 #include <machine/trap.h>
103 #include <machine/undefined.h>
104 #include <machine/vmparam.h>
105 #include <machine/sysarch.h>
108 #include <dev/fdt/fdt_common.h>
109 #include <dev/ofw/openfirm.h>
113 #define debugf(fmt, args...) printf(fmt, ##args)
115 #define debugf(fmt, args...)
118 struct pcpu __pcpu[MAXCPU];
119 struct pcpu *pcpup = &__pcpu[0];
121 static struct trapframe proc0_tf;
122 uint32_t cpu_reset_address = 0;
124 vm_offset_t vector_page;
126 int (*_arm_memcpy)(void *, void *, int, int) = NULL;
127 int (*_arm_bzero)(void *, int, int) = NULL;
128 int _min_memcpy_size = 0;
129 int _min_bzero_size = 0;
133 extern vm_offset_t ksym_start, ksym_end;
138 * This is the number of L2 page tables required for covering max
139 * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf,
140 * stacks etc.), uprounded to be divisible by 4.
142 #define KERNEL_PT_MAX 78
144 static struct pv_addr kernel_pt_table[KERNEL_PT_MAX];
146 extern u_int data_abort_handler_address;
147 extern u_int prefetch_abort_handler_address;
148 extern u_int undefined_handler_address;
152 struct pv_addr systempage;
153 static struct pv_addr msgbufpv;
154 struct pv_addr irqstack;
155 struct pv_addr undstack;
156 struct pv_addr abtstack;
157 static struct pv_addr kernelstack;
161 #if defined(LINUX_BOOT_ABI)
162 #define LBABI_MAX_BANKS 10
165 struct arm_lbabi_tag *atag_list;
166 char linux_command_line[LBABI_MAX_COMMAND_LINE + 1];
167 char atags[LBABI_MAX_COMMAND_LINE * 2];
168 uint32_t memstart[LBABI_MAX_BANKS];
169 uint32_t memsize[LBABI_MAX_BANKS];
173 static uint32_t board_revision;
174 /* hex representation of uint64_t */
175 static char board_serial[32];
177 SYSCTL_NODE(_hw, OID_AUTO, board, CTLFLAG_RD, 0, "Board attributes");
178 SYSCTL_UINT(_hw_board, OID_AUTO, revision, CTLFLAG_RD,
179 &board_revision, 0, "Board revision");
180 SYSCTL_STRING(_hw_board, OID_AUTO, serial, CTLFLAG_RD,
181 board_serial, 0, "Board serial");
184 SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD,
185 &vfp_exists, 0, "Floating point support enabled");
188 board_set_serial(uint64_t serial)
191 snprintf(board_serial, sizeof(board_serial)-1,
196 board_set_revision(uint32_t revision)
199 board_revision = revision;
203 sendsig(catcher, ksi, mask)
210 struct trapframe *tf;
211 struct sigframe *fp, frame;
219 PROC_LOCK_ASSERT(p, MA_OWNED);
220 sig = ksi->ksi_signo;
221 code = ksi->ksi_code;
223 mtx_assert(&psp->ps_mtx, MA_OWNED);
225 onstack = sigonstack(tf->tf_usr_sp);
227 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
230 /* Allocate and validate space for the signal handler context. */
231 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !(onstack) &&
232 SIGISMEMBER(psp->ps_sigonstack, sig)) {
233 fp = (struct sigframe *)(td->td_sigstk.ss_sp +
234 td->td_sigstk.ss_size);
235 #if defined(COMPAT_43)
236 td->td_sigstk.ss_flags |= SS_ONSTACK;
239 fp = (struct sigframe *)td->td_frame->tf_usr_sp;
241 /* make room on the stack */
244 /* make the stack aligned */
245 fp = (struct sigframe *)STACKALIGN(fp);
246 /* Populate the siginfo frame. */
247 get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
248 frame.sf_si = ksi->ksi_info;
249 frame.sf_uc.uc_sigmask = *mask;
250 frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK )
251 ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
252 frame.sf_uc.uc_stack = td->td_sigstk;
253 mtx_unlock(&psp->ps_mtx);
254 PROC_UNLOCK(td->td_proc);
256 /* Copy the sigframe out to the user's stack. */
257 if (copyout(&frame, fp, sizeof(*fp)) != 0) {
258 /* Process has trashed its stack. Kill it. */
259 CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
264 /* Translate the signal if appropriate. */
265 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
266 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
269 * Build context to run handler in. We invoke the handler
270 * directly, only returning via the trampoline. Note the
271 * trampoline version numbers are coordinated with machine-
272 * dependent code in libc.
276 tf->tf_r1 = (register_t)&fp->sf_si;
277 tf->tf_r2 = (register_t)&fp->sf_uc;
279 /* the trampoline uses r5 as the uc address */
280 tf->tf_r5 = (register_t)&fp->sf_uc;
281 tf->tf_pc = (register_t)catcher;
282 tf->tf_usr_sp = (register_t)fp;
283 tf->tf_usr_lr = (register_t)(PS_STRINGS - *(p->p_sysent->sv_szsigcode));
285 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_usr_lr,
289 mtx_lock(&psp->ps_mtx);
292 struct kva_md_info kmi;
297 * Initialize the vector page, and select whether or not to
298 * relocate the vectors.
300 * NOTE: We expect the vector page to be mapped at its expected
304 extern unsigned int page0[], page0_data[];
306 arm_vector_init(vm_offset_t va, int which)
308 unsigned int *vectors = (int *) va;
309 unsigned int *vectors_data = vectors + (page0_data - page0);
313 * Loop through the vectors we're taking over, and copy the
314 * vector's insn and data word.
316 for (vec = 0; vec < ARM_NVEC; vec++) {
317 if ((which & (1 << vec)) == 0) {
318 /* Don't want to take over this vector. */
321 vectors[vec] = page0[vec];
322 vectors_data[vec] = page0_data[vec];
325 /* Now sync the vectors. */
326 cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int));
330 if (va == ARM_VECTORS_HIGH) {
332 * Assume the MD caller knows what it's doing here, and
333 * really does want the vector page relocated.
335 * Note: This has to be done here (and not just in
336 * cpu_setup()) because the vector page needs to be
337 * accessible *before* cpu_startup() is called.
340 * NOTE: If the CPU control register is not readable,
341 * this will totally fail! We'll just assume that
342 * any system that has high vector support has a
343 * readable CPU control register, for now. If we
344 * ever encounter one that does not, we'll have to
347 cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
352 cpu_startup(void *dummy)
354 struct pcb *pcb = thread0.td_pcb;
355 const unsigned int mbyte = 1024 * 1024;
356 #ifdef ARM_TP_ADDRESS
357 #ifndef ARM_CACHE_LOCK_ENABLE
364 vm_ksubmap_init(&kmi);
367 * Display the RAM layout.
369 printf("real memory = %ju (%ju MB)\n",
370 (uintmax_t)arm32_ptob(realmem),
371 (uintmax_t)arm32_ptob(realmem) / mbyte);
372 printf("avail memory = %ju (%ju MB)\n",
373 (uintmax_t)arm32_ptob(cnt.v_free_count),
374 (uintmax_t)arm32_ptob(cnt.v_free_count) / mbyte);
376 arm_physmem_print_tables();
377 arm_devmap_print_table();
381 vm_pager_bufferinit();
382 pcb->un_32.pcb32_und_sp = (u_int)thread0.td_kstack +
383 USPACE_UNDEF_STACK_TOP;
384 pcb->un_32.pcb32_sp = (u_int)thread0.td_kstack +
385 USPACE_SVC_STACK_TOP;
386 vector_page_setprot(VM_PROT_READ);
387 pmap_set_pcb_pagedir(pmap_kernel(), pcb);
389 #ifdef ARM_TP_ADDRESS
390 #ifdef ARM_CACHE_LOCK_ENABLE
391 pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS);
392 arm_lock_cache_line(ARM_TP_ADDRESS);
394 m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO);
395 pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m));
397 *(uint32_t *)ARM_RAS_START = 0;
398 *(uint32_t *)ARM_RAS_END = 0xffffffff;
402 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
405 * Flush the D-cache for non-DMA I/O so that the I-cache can
406 * be made coherent later.
409 cpu_flush_dcache(void *ptr, size_t len)
412 cpu_dcache_wb_range((uintptr_t)ptr, len);
414 cpu_l2cache_wb_range((uintptr_t)vtophys(ptr), len);
416 cpu_l2cache_wb_range((uintptr_t)ptr, len);
420 /* Get current clock frequency for the given cpu id. */
422 cpu_est_clockrate(int cpu_id, uint64_t *rate)
432 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d",
434 #ifndef NO_EVENTTIMERS
440 if (!sched_runnable())
442 #ifndef NO_EVENTTIMERS
448 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done",
453 cpu_idle_wakeup(int cpu)
460 * Most ARM platforms don't need to do anything special to init their clocks
461 * (they get intialized during normal device attachment), and by not defining a
462 * cpu_initclocks() function they get this generic one. Any platform that needs
463 * to do something special can just provide their own implementation, which will
464 * override this one due to the weak linkage.
467 arm_generic_initclocks(void)
470 #ifndef NO_EVENTTIMERS
472 if (PCPU_GET(cpuid) == 0)
473 cpu_initclocks_bsp();
477 cpu_initclocks_bsp();
481 __weak_reference(arm_generic_initclocks, cpu_initclocks);
484 fill_regs(struct thread *td, struct reg *regs)
486 struct trapframe *tf = td->td_frame;
487 bcopy(&tf->tf_r0, regs->r, sizeof(regs->r));
488 regs->r_sp = tf->tf_usr_sp;
489 regs->r_lr = tf->tf_usr_lr;
490 regs->r_pc = tf->tf_pc;
491 regs->r_cpsr = tf->tf_spsr;
495 fill_fpregs(struct thread *td, struct fpreg *regs)
497 bzero(regs, sizeof(*regs));
502 set_regs(struct thread *td, struct reg *regs)
504 struct trapframe *tf = td->td_frame;
506 bcopy(regs->r, &tf->tf_r0, sizeof(regs->r));
507 tf->tf_usr_sp = regs->r_sp;
508 tf->tf_usr_lr = regs->r_lr;
509 tf->tf_pc = regs->r_pc;
510 tf->tf_spsr &= ~PSR_FLAGS;
511 tf->tf_spsr |= regs->r_cpsr & PSR_FLAGS;
516 set_fpregs(struct thread *td, struct fpreg *regs)
522 fill_dbregs(struct thread *td, struct dbreg *regs)
527 set_dbregs(struct thread *td, struct dbreg *regs)
534 ptrace_read_int(struct thread *td, vm_offset_t addr, u_int32_t *v)
539 PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED);
540 iov.iov_base = (caddr_t) v;
541 iov.iov_len = sizeof(u_int32_t);
544 uio.uio_offset = (off_t)addr;
545 uio.uio_resid = sizeof(u_int32_t);
546 uio.uio_segflg = UIO_SYSSPACE;
547 uio.uio_rw = UIO_READ;
549 return proc_rwmem(td->td_proc, &uio);
553 ptrace_write_int(struct thread *td, vm_offset_t addr, u_int32_t v)
558 PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED);
559 iov.iov_base = (caddr_t) &v;
560 iov.iov_len = sizeof(u_int32_t);
563 uio.uio_offset = (off_t)addr;
564 uio.uio_resid = sizeof(u_int32_t);
565 uio.uio_segflg = UIO_SYSSPACE;
566 uio.uio_rw = UIO_WRITE;
568 return proc_rwmem(td->td_proc, &uio);
572 ptrace_single_step(struct thread *td)
577 KASSERT(td->td_md.md_ptrace_instr == 0,
578 ("Didn't clear single step"));
581 error = ptrace_read_int(td, td->td_frame->tf_pc + 4,
582 &td->td_md.md_ptrace_instr);
585 error = ptrace_write_int(td, td->td_frame->tf_pc + 4,
588 td->td_md.md_ptrace_instr = 0;
589 td->td_md.md_ptrace_addr = td->td_frame->tf_pc + 4;
596 ptrace_clear_single_step(struct thread *td)
600 if (td->td_md.md_ptrace_instr) {
603 ptrace_write_int(td, td->td_md.md_ptrace_addr,
604 td->td_md.md_ptrace_instr);
606 td->td_md.md_ptrace_instr = 0;
612 ptrace_set_pc(struct thread *td, unsigned long addr)
614 td->td_frame->tf_pc = addr;
619 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
630 if (td->td_md.md_spinlock_count == 0) {
631 cspr = disable_interrupts(I32_bit | F32_bit);
632 td->td_md.md_spinlock_count = 1;
633 td->td_md.md_saved_cspr = cspr;
635 td->td_md.md_spinlock_count++;
647 cspr = td->td_md.md_saved_cspr;
648 td->td_md.md_spinlock_count--;
649 if (td->td_md.md_spinlock_count == 0)
650 restore_interrupts(cspr);
654 * Clear registers on exec
657 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
659 struct trapframe *tf = td->td_frame;
661 memset(tf, 0, sizeof(*tf));
662 tf->tf_usr_sp = stack;
663 tf->tf_usr_lr = imgp->entry_addr;
664 tf->tf_svc_lr = 0x77777777;
665 tf->tf_pc = imgp->entry_addr;
666 tf->tf_spsr = PSR_USR32_MODE;
670 * Get machine context.
673 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
675 struct trapframe *tf = td->td_frame;
676 __greg_t *gr = mcp->__gregs;
678 if (clear_ret & GET_MC_CLEAR_RET)
681 gr[_REG_R0] = tf->tf_r0;
682 gr[_REG_R1] = tf->tf_r1;
683 gr[_REG_R2] = tf->tf_r2;
684 gr[_REG_R3] = tf->tf_r3;
685 gr[_REG_R4] = tf->tf_r4;
686 gr[_REG_R5] = tf->tf_r5;
687 gr[_REG_R6] = tf->tf_r6;
688 gr[_REG_R7] = tf->tf_r7;
689 gr[_REG_R8] = tf->tf_r8;
690 gr[_REG_R9] = tf->tf_r9;
691 gr[_REG_R10] = tf->tf_r10;
692 gr[_REG_R11] = tf->tf_r11;
693 gr[_REG_R12] = tf->tf_r12;
694 gr[_REG_SP] = tf->tf_usr_sp;
695 gr[_REG_LR] = tf->tf_usr_lr;
696 gr[_REG_PC] = tf->tf_pc;
697 gr[_REG_CPSR] = tf->tf_spsr;
703 * Set machine context.
705 * However, we don't set any but the user modifiable flags, and we won't
706 * touch the cs selector.
709 set_mcontext(struct thread *td, const mcontext_t *mcp)
711 struct trapframe *tf = td->td_frame;
712 const __greg_t *gr = mcp->__gregs;
714 tf->tf_r0 = gr[_REG_R0];
715 tf->tf_r1 = gr[_REG_R1];
716 tf->tf_r2 = gr[_REG_R2];
717 tf->tf_r3 = gr[_REG_R3];
718 tf->tf_r4 = gr[_REG_R4];
719 tf->tf_r5 = gr[_REG_R5];
720 tf->tf_r6 = gr[_REG_R6];
721 tf->tf_r7 = gr[_REG_R7];
722 tf->tf_r8 = gr[_REG_R8];
723 tf->tf_r9 = gr[_REG_R9];
724 tf->tf_r10 = gr[_REG_R10];
725 tf->tf_r11 = gr[_REG_R11];
726 tf->tf_r12 = gr[_REG_R12];
727 tf->tf_usr_sp = gr[_REG_SP];
728 tf->tf_usr_lr = gr[_REG_LR];
729 tf->tf_pc = gr[_REG_PC];
730 tf->tf_spsr = gr[_REG_CPSR];
739 sys_sigreturn(td, uap)
741 struct sigreturn_args /* {
742 const struct __ucontext *sigcntxp;
750 if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
753 * Make sure the processor mode has not been tampered with and
754 * interrupts have not been disabled.
756 spsr = uc.uc_mcontext.__gregs[_REG_CPSR];
757 if ((spsr & PSR_MODE) != PSR_USR32_MODE ||
758 (spsr & (I32_bit | F32_bit)) != 0)
760 /* Restore register context. */
761 set_mcontext(td, &uc.uc_mcontext);
763 /* Restore signal mask. */
764 kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
766 return (EJUSTRETURN);
771 * Construct a PCB from a trapframe. This is called from kdb_trap() where
772 * we want to start a backtrace from the function that caused us to enter
773 * the debugger. We have the context in the trapframe, but base the trace
774 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
775 * enough for a backtrace.
778 makectx(struct trapframe *tf, struct pcb *pcb)
780 pcb->un_32.pcb32_r8 = tf->tf_r8;
781 pcb->un_32.pcb32_r9 = tf->tf_r9;
782 pcb->un_32.pcb32_r10 = tf->tf_r10;
783 pcb->un_32.pcb32_r11 = tf->tf_r11;
784 pcb->un_32.pcb32_r12 = tf->tf_r12;
785 pcb->un_32.pcb32_pc = tf->tf_pc;
786 pcb->un_32.pcb32_lr = tf->tf_usr_lr;
787 pcb->un_32.pcb32_sp = tf->tf_usr_sp;
791 * Fake up a boot descriptor table
794 fake_preload_metadata(struct arm_boot_params *abp __unused)
797 vm_offset_t zstart = 0, zend = 0;
799 vm_offset_t lastaddr;
801 static uint32_t fake_preload[35];
803 fake_preload[i++] = MODINFO_NAME;
804 fake_preload[i++] = strlen("kernel") + 1;
805 strcpy((char*)&fake_preload[i++], "kernel");
807 fake_preload[i++] = MODINFO_TYPE;
808 fake_preload[i++] = strlen("elf kernel") + 1;
809 strcpy((char*)&fake_preload[i++], "elf kernel");
811 fake_preload[i++] = MODINFO_ADDR;
812 fake_preload[i++] = sizeof(vm_offset_t);
813 fake_preload[i++] = KERNVIRTADDR;
814 fake_preload[i++] = MODINFO_SIZE;
815 fake_preload[i++] = sizeof(uint32_t);
816 fake_preload[i++] = (uint32_t)&end - KERNVIRTADDR;
818 if (*(uint32_t *)KERNVIRTADDR == MAGIC_TRAMP_NUMBER) {
819 fake_preload[i++] = MODINFO_METADATA|MODINFOMD_SSYM;
820 fake_preload[i++] = sizeof(vm_offset_t);
821 fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 4);
822 fake_preload[i++] = MODINFO_METADATA|MODINFOMD_ESYM;
823 fake_preload[i++] = sizeof(vm_offset_t);
824 fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 8);
825 lastaddr = *(uint32_t *)(KERNVIRTADDR + 8);
827 zstart = *(uint32_t *)(KERNVIRTADDR + 4);
832 lastaddr = (vm_offset_t)&end;
833 fake_preload[i++] = 0;
835 preload_metadata = (void *)fake_preload;
843 #if ARM_ARCH_6 || ARM_ARCH_7A || defined(CPU_MV_PJ4B)
844 set_curthread(&thread0);
846 pcpu_init(pcpup, 0, sizeof(struct pcpu));
847 PCPU_SET(curthread, &thread0);
853 #if defined(LINUX_BOOT_ABI)
855 linux_parse_boot_param(struct arm_boot_params *abp)
857 struct arm_lbabi_tag *walker;
862 * Linux boot ABI: r0 = 0, r1 is the board type (!= 0) and r2
863 * is atags or dtb pointer. If all of these aren't satisfied,
866 if (!(abp->abp_r0 == 0 && abp->abp_r1 != 0 && abp->abp_r2 != 0))
869 board_id = abp->abp_r1;
870 walker = (struct arm_lbabi_tag *)
871 (abp->abp_r2 + KERNVIRTADDR - abp->abp_physaddr);
873 /* xxx - Need to also look for binary device tree */
874 if (ATAG_TAG(walker) != ATAG_CORE)
878 while (ATAG_TAG(walker) != ATAG_NONE) {
879 switch (ATAG_TAG(walker)) {
883 arm_physmem_hardware_region(walker->u.tag_mem.start,
884 walker->u.tag_mem.size);
889 serial = walker->u.tag_sn.low |
890 ((uint64_t)walker->u.tag_sn.high << 32);
891 board_set_serial(serial);
894 revision = walker->u.tag_rev.rev;
895 board_set_revision(revision);
898 /* XXX open question: Parse this for boothowto? */
899 bcopy(walker->u.tag_cmd.command, linux_command_line,
905 walker = ATAG_NEXT(walker);
908 /* Save a copy for later */
909 bcopy(atag_list, atags,
910 (char *)walker - (char *)atag_list + ATAG_SIZE(walker));
912 return fake_preload_metadata(abp);
916 #if defined(FREEBSD_BOOT_LOADER)
918 freebsd_parse_boot_param(struct arm_boot_params *abp)
920 vm_offset_t lastaddr = 0;
925 * Mask metadata pointer: it is supposed to be on page boundary. If
926 * the first argument (mdp) doesn't point to a valid address the
927 * bootloader must have passed us something else than the metadata
928 * ptr, so we give up. Also give up if we cannot find metadta section
929 * the loader creates that we get all this data out of.
932 if ((mdp = (void *)(abp->abp_r0 & ~PAGE_MASK)) == NULL)
934 preload_metadata = mdp;
935 kmdp = preload_search_by_type("elf kernel");
939 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
940 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
941 lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
943 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
944 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
946 preload_addr_relocate = KERNVIRTADDR - abp->abp_physaddr;
952 default_parse_boot_param(struct arm_boot_params *abp)
954 vm_offset_t lastaddr;
956 #if defined(LINUX_BOOT_ABI)
957 if ((lastaddr = linux_parse_boot_param(abp)) != 0)
960 #if defined(FREEBSD_BOOT_LOADER)
961 if ((lastaddr = freebsd_parse_boot_param(abp)) != 0)
964 /* Fall back to hardcoded metadata. */
965 lastaddr = fake_preload_metadata(abp);
971 * Stub version of the boot parameter parsing routine. We are
972 * called early in initarm, before even VM has been initialized.
973 * This routine needs to preserve any data that the boot loader
974 * has passed in before the kernel starts to grow past the end
975 * of the BSS, traditionally the place boot-loaders put this data.
977 * Since this is called so early, things that depend on the vm system
978 * being setup (including access to some SoC's serial ports), about
979 * all that can be done in this routine is to copy the arguments.
981 * This is the default boot parameter parsing routine. Individual
982 * kernels/boards can override this weak function with one of their
983 * own. We just fake metadata...
985 __weak_reference(default_parse_boot_param, parse_boot_param);
991 init_proc0(vm_offset_t kstack)
993 proc_linkup0(&proc0, &thread0);
994 thread0.td_kstack = kstack;
995 thread0.td_pcb = (struct pcb *)
996 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
997 thread0.td_pcb->pcb_flags = 0;
998 thread0.td_frame = &proc0_tf;
999 pcpup->pc_curpcb = thread0.td_pcb;
1003 set_stackptrs(int cpu)
1006 set_stackptr(PSR_IRQ32_MODE,
1007 irqstack.pv_va + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1008 set_stackptr(PSR_ABT32_MODE,
1009 abtstack.pv_va + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1010 set_stackptr(PSR_UND32_MODE,
1011 undstack.pv_va + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1035 debugf("loader passed (static) kenv:\n");
1036 if (kern_envp == NULL) {
1037 debugf(" no env, null ptr\n");
1040 debugf(" kern_envp = 0x%08x\n", (uint32_t)kern_envp);
1043 for (cp = kern_envp; cp != NULL; cp = kenv_next(cp))
1044 debugf(" %x %s\n", (uint32_t)cp, cp);
1048 initarm(struct arm_boot_params *abp)
1050 struct mem_region mem_regions[FDT_MEM_REGIONS];
1051 struct pv_addr kernel_l1pt;
1052 struct pv_addr dpcpu;
1053 vm_offset_t dtbp, freemempos, l2_start, lastaddr;
1054 uint32_t memsize, l2size;
1058 int i, j, err_devmap, mem_regions_sz;
1060 lastaddr = parse_boot_param(abp);
1061 arm_physmem_kernaddr = abp->abp_physaddr;
1067 * Find the dtb passed in by the boot loader.
1069 kmdp = preload_search_by_type("elf kernel");
1071 dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
1073 dtbp = (vm_offset_t)NULL;
1075 #if defined(FDT_DTB_STATIC)
1077 * In case the device tree blob was not retrieved (from metadata) try
1078 * to use the statically embedded one.
1080 if (dtbp == (vm_offset_t)NULL)
1081 dtbp = (vm_offset_t)&fdt_static_dtb;
1084 if (OF_install(OFW_FDT, 0) == FALSE)
1085 panic("Cannot install FDT");
1087 if (OF_init((void *)dtbp) != 0)
1088 panic("OF_init failed with the found device tree");
1090 /* Grab physical memory regions information from device tree. */
1091 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, &memsize) != 0)
1092 panic("Cannot get physical memory regions");
1093 arm_physmem_hardware_regions(mem_regions, mem_regions_sz);
1095 /* Grab reserved memory regions information from device tree. */
1096 if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0)
1097 arm_physmem_exclude_regions(mem_regions, mem_regions_sz,
1098 EXFLAG_NODUMP | EXFLAG_NOALLOC);
1100 /* Platform-specific initialisation */
1101 initarm_early_init();
1105 /* Do basic tuning, hz etc */
1108 /* Calculate number of L2 tables needed for mapping vm_page_array */
1109 l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page);
1110 l2size = (l2size >> L1_S_SHIFT) + 1;
1113 * Add one table for end of kernel map, one for stacks, msgbuf and
1114 * L1 and L2 tables map and one for vectors map.
1118 /* Make it divisible by 4 */
1119 l2size = (l2size + 3) & ~3;
1121 freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK;
1123 /* Define a macro to simplify memory allocation */
1124 #define valloc_pages(var, np) \
1125 alloc_pages((var).pv_va, (np)); \
1126 (var).pv_pa = (var).pv_va + (abp->abp_physaddr - KERNVIRTADDR);
1128 #define alloc_pages(var, np) \
1129 (var) = freemempos; \
1130 freemempos += (np * PAGE_SIZE); \
1131 memset((char *)(var), 0, ((np) * PAGE_SIZE));
1133 while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
1134 freemempos += PAGE_SIZE;
1135 valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
1137 for (i = 0, j = 0; i < l2size; ++i) {
1138 if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
1139 valloc_pages(kernel_pt_table[i],
1140 L2_TABLE_SIZE / PAGE_SIZE);
1143 kernel_pt_table[i].pv_va = kernel_pt_table[j].pv_va +
1144 L2_TABLE_SIZE_REAL * (i - j);
1145 kernel_pt_table[i].pv_pa =
1146 kernel_pt_table[i].pv_va - KERNVIRTADDR +
1152 * Allocate a page for the system page mapped to 0x00000000
1153 * or 0xffff0000. This page will just contain the system vectors
1154 * and can be shared by all processes.
1156 valloc_pages(systempage, 1);
1158 /* Allocate dynamic per-cpu area. */
1159 valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
1160 dpcpu_init((void *)dpcpu.pv_va, 0);
1162 /* Allocate stacks for all modes */
1163 valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU);
1164 valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU);
1165 valloc_pages(undstack, UND_STACK_SIZE * MAXCPU);
1166 valloc_pages(kernelstack, KSTACK_PAGES * MAXCPU);
1167 valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
1170 * Now we start construction of the L1 page table
1171 * We start by mapping the L2 page tables into the L1.
1172 * This means that we can replace L1 mappings later on if necessary
1174 l1pagetable = kernel_l1pt.pv_va;
1177 * Try to map as much as possible of kernel text and data using
1178 * 1MB section mapping and for the rest of initial kernel address
1179 * space use L2 coarse tables.
1181 * Link L2 tables for mapping remainder of kernel (modulo 1MB)
1182 * and kernel structures
1184 l2_start = lastaddr & ~(L1_S_OFFSET);
1185 for (i = 0 ; i < l2size - 1; i++)
1186 pmap_link_l2pt(l1pagetable, l2_start + i * L1_S_SIZE,
1187 &kernel_pt_table[i]);
1189 pmap_curmaxkvaddr = l2_start + (l2size - 1) * L1_S_SIZE;
1191 /* Map kernel code and data */
1192 pmap_map_chunk(l1pagetable, KERNVIRTADDR, abp->abp_physaddr,
1193 (((uint32_t)(lastaddr) - KERNVIRTADDR) + PAGE_MASK) & ~PAGE_MASK,
1194 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
1196 /* Map L1 directory and allocated L2 page tables */
1197 pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
1198 L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
1200 pmap_map_chunk(l1pagetable, kernel_pt_table[0].pv_va,
1201 kernel_pt_table[0].pv_pa,
1202 L2_TABLE_SIZE_REAL * l2size,
1203 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
1205 /* Map allocated DPCPU, stacks and msgbuf */
1206 pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa,
1207 freemempos - dpcpu.pv_va,
1208 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
1210 /* Link and map the vector page */
1211 pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH,
1212 &kernel_pt_table[l2size - 1]);
1213 pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
1214 VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE);
1216 /* Establish static device mappings. */
1217 err_devmap = initarm_devmap_init();
1218 arm_devmap_bootstrap(l1pagetable, NULL);
1219 vm_max_kernel_address = initarm_lastaddr();
1221 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT);
1222 pmap_pa = kernel_l1pt.pv_pa;
1223 setttb(kernel_l1pt.pv_pa);
1225 cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2));
1228 * Now that proper page tables are installed, call cpu_setup() to enable
1229 * instruction and data caches and other chip-specific features.
1234 * Only after the SOC registers block is mapped we can perform device
1235 * tree fixups, as they may attempt to read parameters from hardware.
1237 OF_interpret("perform-fixup", 0);
1239 initarm_gpio_init();
1243 debugf("initarm: console initialized\n");
1244 debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp);
1245 debugf(" boothowto = 0x%08x\n", boothowto);
1246 debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp);
1249 env = getenv("kernelname");
1251 strlcpy(kernelname, env, sizeof(kernelname));
1253 if (err_devmap != 0)
1254 printf("WARNING: could not fully configure devmap, error=%d\n",
1257 initarm_late_init();
1260 * Pages were allocated during the secondary bootstrap for the
1261 * stacks for different CPU modes.
1262 * We must now set the r13 registers in the different CPU modes to
1263 * point to these stacks.
1264 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
1265 * of the stack memory.
1267 cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE);
1272 * We must now clean the cache again....
1273 * Cleaning may be done by reading new data to displace any
1274 * dirty data in the cache. This will have happened in setttb()
1275 * but since we are boot strapping the addresses used for the read
1276 * may have just been remapped and thus the cache could be out
1277 * of sync. A re-clean after the switch will cure this.
1278 * After booting there are no gross relocations of the kernel thus
1279 * this problem will not occur after initarm().
1281 cpu_idcache_wbinv_all();
1283 /* Set stack for exception handlers */
1284 data_abort_handler_address = (u_int)data_abort_handler;
1285 prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
1286 undefined_handler_address = (u_int)undefinedinstruction_bounce;
1289 init_proc0(kernelstack.pv_va);
1291 arm_intrnames_init();
1292 arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
1293 pmap_bootstrap(freemempos, &kernel_l1pt);
1294 msgbufp = (void *)msgbufpv.pv_va;
1295 msgbufinit(msgbufp, msgbufsize);
1299 * Exclude the kernel (and all the things we allocated which immediately
1300 * follow the kernel) from the VM allocation pool but not from crash
1301 * dumps. virtual_avail is a global variable which tracks the kva we've
1302 * "allocated" while setting up pmaps.
1304 * Prepare the list of physical memory available to the vm subsystem.
1306 arm_physmem_exclude_region(abp->abp_physaddr,
1307 (virtual_avail - KERNVIRTADDR), EXFLAG_NOALLOC);
1308 arm_physmem_init_kernel_globals();
1310 init_param2(physmem);
1313 return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
1314 sizeof(struct pcb)));