1 /* $NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $ */
4 * Copyright (c) 2004 Olivier Houchard
5 * Copyright (c) 1994-1998 Mark Brinicombe.
6 * Copyright (c) 1994 Brini.
9 * This code is derived from software written for Brini by Mark Brinicombe
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by Mark Brinicombe
22 * for the NetBSD Project.
23 * 4. The name of the company nor the name of the author may be used to
24 * endorse or promote products derived from this software without specific
25 * prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
28 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
29 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * Machine dependant functions for kernel setup
42 * Updated : 18/04/01 updated for new wscons
45 #include "opt_compat.h"
47 #include "opt_kstack_pages.h"
48 #include "opt_platform.h"
49 #include "opt_sched.h"
50 #include "opt_timer.h"
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
55 #include <sys/param.h>
57 #include <sys/systm.h>
65 #include <sys/imgact.h>
67 #include <sys/kernel.h>
69 #include <sys/linker.h>
71 #include <sys/malloc.h>
72 #include <sys/msgbuf.h>
73 #include <sys/mutex.h>
75 #include <sys/ptrace.h>
76 #include <sys/reboot.h>
77 #include <sys/rwlock.h>
78 #include <sys/sched.h>
79 #include <sys/signalvar.h>
80 #include <sys/syscallsubr.h>
81 #include <sys/sysctl.h>
82 #include <sys/sysent.h>
83 #include <sys/sysproto.h>
89 #include <vm/vm_map.h>
90 #include <vm/vm_object.h>
91 #include <vm/vm_page.h>
92 #include <vm/vm_pager.h>
94 #include <machine/acle-compat.h>
95 #include <machine/armreg.h>
96 #include <machine/atags.h>
97 #include <machine/cpu.h>
98 #include <machine/cpuinfo.h>
99 #include <machine/debug_monitor.h>
100 #include <machine/db_machdep.h>
101 #include <machine/devmap.h>
102 #include <machine/frame.h>
103 #include <machine/intr.h>
104 #include <machine/machdep.h>
105 #include <machine/md_var.h>
106 #include <machine/metadata.h>
107 #include <machine/pcb.h>
108 #include <machine/physmem.h>
109 #include <machine/platform.h>
110 #include <machine/reg.h>
111 #include <machine/trap.h>
112 #include <machine/undefined.h>
113 #include <machine/vfp.h>
114 #include <machine/vmparam.h>
115 #include <machine/sysarch.h>
118 #include <dev/fdt/fdt_common.h>
119 #include <dev/ofw/openfirm.h>
127 DB_SHOW_COMMAND(cp15, db_show_cp15)
131 reg = cp15_midr_get();
132 db_printf("Cpu ID: 0x%08x\n", reg);
133 reg = cp15_ctr_get();
134 db_printf("Current Cache Lvl ID: 0x%08x\n",reg);
136 reg = cp15_sctlr_get();
137 db_printf("Ctrl: 0x%08x\n",reg);
138 reg = cp15_actlr_get();
139 db_printf("Aux Ctrl: 0x%08x\n",reg);
141 reg = cp15_id_pfr0_get();
142 db_printf("Processor Feat 0: 0x%08x\n", reg);
143 reg = cp15_id_pfr1_get();
144 db_printf("Processor Feat 1: 0x%08x\n", reg);
145 reg = cp15_id_dfr0_get();
146 db_printf("Debug Feat 0: 0x%08x\n", reg);
147 reg = cp15_id_afr0_get();
148 db_printf("Auxiliary Feat 0: 0x%08x\n", reg);
149 reg = cp15_id_mmfr0_get();
150 db_printf("Memory Model Feat 0: 0x%08x\n", reg);
151 reg = cp15_id_mmfr1_get();
152 db_printf("Memory Model Feat 1: 0x%08x\n", reg);
153 reg = cp15_id_mmfr2_get();
154 db_printf("Memory Model Feat 2: 0x%08x\n", reg);
155 reg = cp15_id_mmfr3_get();
156 db_printf("Memory Model Feat 3: 0x%08x\n", reg);
157 reg = cp15_ttbr_get();
158 db_printf("TTB0: 0x%08x\n", reg);
161 DB_SHOW_COMMAND(vtop, db_show_vtop)
166 cp15_ats1cpr_set(addr);
167 reg = cp15_par_get();
168 db_printf("Physical address reg: 0x%08x\n",reg);
170 db_printf("show vtop <virt_addr>\n");
172 #endif /* __ARM_ARCH >= 6 */
176 #define debugf(fmt, args...) printf(fmt, ##args)
178 #define debugf(fmt, args...)
181 struct pcpu __pcpu[MAXCPU];
182 struct pcpu *pcpup = &__pcpu[0];
184 static struct trapframe proc0_tf;
185 uint32_t cpu_reset_address = 0;
187 vm_offset_t vector_page;
189 int (*_arm_memcpy)(void *, void *, int, int) = NULL;
190 int (*_arm_bzero)(void *, int, int) = NULL;
191 int _min_memcpy_size = 0;
192 int _min_bzero_size = 0;
197 static char *loader_envp;
202 vm_offset_t systempage;
203 vm_offset_t irqstack;
204 vm_offset_t undstack;
205 vm_offset_t abtstack;
208 * This is the number of L2 page tables required for covering max
209 * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf,
210 * stacks etc.), uprounded to be divisible by 4.
212 #define KERNEL_PT_MAX 78
214 static struct pv_addr kernel_pt_table[KERNEL_PT_MAX];
216 struct pv_addr systempage;
217 static struct pv_addr msgbufpv;
218 struct pv_addr irqstack;
219 struct pv_addr undstack;
220 struct pv_addr abtstack;
221 static struct pv_addr kernelstack;
225 #if defined(LINUX_BOOT_ABI)
226 #define LBABI_MAX_BANKS 10
229 struct arm_lbabi_tag *atag_list;
230 char linux_command_line[LBABI_MAX_COMMAND_LINE + 1];
231 char atags[LBABI_MAX_COMMAND_LINE * 2];
232 uint32_t memstart[LBABI_MAX_BANKS];
233 uint32_t memsize[LBABI_MAX_BANKS];
237 static uint32_t board_revision;
238 /* hex representation of uint64_t */
239 static char board_serial[32];
241 SYSCTL_NODE(_hw, OID_AUTO, board, CTLFLAG_RD, 0, "Board attributes");
242 SYSCTL_UINT(_hw_board, OID_AUTO, revision, CTLFLAG_RD,
243 &board_revision, 0, "Board revision");
244 SYSCTL_STRING(_hw_board, OID_AUTO, serial, CTLFLAG_RD,
245 board_serial, 0, "Board serial");
248 SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD,
249 &vfp_exists, 0, "Floating point support enabled");
252 board_set_serial(uint64_t serial)
255 snprintf(board_serial, sizeof(board_serial)-1,
260 board_set_revision(uint32_t revision)
263 board_revision = revision;
267 sendsig(catcher, ksi, mask)
274 struct trapframe *tf;
275 struct sigframe *fp, frame;
277 struct sysentvec *sysent;
284 PROC_LOCK_ASSERT(p, MA_OWNED);
285 sig = ksi->ksi_signo;
286 code = ksi->ksi_code;
288 mtx_assert(&psp->ps_mtx, MA_OWNED);
290 onstack = sigonstack(tf->tf_usr_sp);
292 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
295 /* Allocate and validate space for the signal handler context. */
296 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !(onstack) &&
297 SIGISMEMBER(psp->ps_sigonstack, sig)) {
298 fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
299 td->td_sigstk.ss_size);
300 #if defined(COMPAT_43)
301 td->td_sigstk.ss_flags |= SS_ONSTACK;
304 fp = (struct sigframe *)td->td_frame->tf_usr_sp;
306 /* make room on the stack */
309 /* make the stack aligned */
310 fp = (struct sigframe *)STACKALIGN(fp);
311 /* Populate the siginfo frame. */
312 get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
313 frame.sf_si = ksi->ksi_info;
314 frame.sf_uc.uc_sigmask = *mask;
315 frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK )
316 ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
317 frame.sf_uc.uc_stack = td->td_sigstk;
318 mtx_unlock(&psp->ps_mtx);
319 PROC_UNLOCK(td->td_proc);
321 /* Copy the sigframe out to the user's stack. */
322 if (copyout(&frame, fp, sizeof(*fp)) != 0) {
323 /* Process has trashed its stack. Kill it. */
324 CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
330 * Build context to run handler in. We invoke the handler
331 * directly, only returning via the trampoline. Note the
332 * trampoline version numbers are coordinated with machine-
333 * dependent code in libc.
337 tf->tf_r1 = (register_t)&fp->sf_si;
338 tf->tf_r2 = (register_t)&fp->sf_uc;
340 /* the trampoline uses r5 as the uc address */
341 tf->tf_r5 = (register_t)&fp->sf_uc;
342 tf->tf_pc = (register_t)catcher;
343 tf->tf_usr_sp = (register_t)fp;
344 sysent = p->p_sysent;
345 if (sysent->sv_sigcode_base != 0)
346 tf->tf_usr_lr = (register_t)sysent->sv_sigcode_base;
348 tf->tf_usr_lr = (register_t)(sysent->sv_psstrings -
349 *(sysent->sv_szsigcode));
350 /* Set the mode to enter in the signal handler */
352 if ((register_t)catcher & 1)
353 tf->tf_spsr |= PSR_T;
355 tf->tf_spsr &= ~PSR_T;
358 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_usr_lr,
362 mtx_lock(&psp->ps_mtx);
365 struct kva_md_info kmi;
370 * Initialize the vector page, and select whether or not to
371 * relocate the vectors.
373 * NOTE: We expect the vector page to be mapped at its expected
377 extern unsigned int page0[], page0_data[];
379 arm_vector_init(vm_offset_t va, int which)
381 unsigned int *vectors = (int *) va;
382 unsigned int *vectors_data = vectors + (page0_data - page0);
386 * Loop through the vectors we're taking over, and copy the
387 * vector's insn and data word.
389 for (vec = 0; vec < ARM_NVEC; vec++) {
390 if ((which & (1 << vec)) == 0) {
391 /* Don't want to take over this vector. */
394 vectors[vec] = page0[vec];
395 vectors_data[vec] = page0_data[vec];
398 /* Now sync the vectors. */
399 icache_sync(va, (ARM_NVEC * 2) * sizeof(u_int));
403 if (va == ARM_VECTORS_HIGH) {
405 * Assume the MD caller knows what it's doing here, and
406 * really does want the vector page relocated.
408 * Note: This has to be done here (and not just in
409 * cpu_setup()) because the vector page needs to be
410 * accessible *before* cpu_startup() is called.
413 * NOTE: If the CPU control register is not readable,
414 * this will totally fail! We'll just assume that
415 * any system that has high vector support has a
416 * readable CPU control register, for now. If we
417 * ever encounter one that does not, we'll have to
420 cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
425 cpu_startup(void *dummy)
427 struct pcb *pcb = thread0.td_pcb;
428 const unsigned int mbyte = 1024 * 1024;
429 #if __ARM_ARCH < 6 && !defined(ARM_CACHE_LOCK_ENABLE)
435 vm_ksubmap_init(&kmi);
438 * Display the RAM layout.
440 printf("real memory = %ju (%ju MB)\n",
441 (uintmax_t)arm32_ptob(realmem),
442 (uintmax_t)arm32_ptob(realmem) / mbyte);
443 printf("avail memory = %ju (%ju MB)\n",
444 (uintmax_t)arm32_ptob(vm_cnt.v_free_count),
445 (uintmax_t)arm32_ptob(vm_cnt.v_free_count) / mbyte);
447 arm_physmem_print_tables();
448 arm_devmap_print_table();
452 vm_pager_bufferinit();
453 pcb->pcb_regs.sf_sp = (u_int)thread0.td_kstack +
454 USPACE_SVC_STACK_TOP;
455 pmap_set_pcb_pagedir(kernel_pmap, pcb);
457 vector_page_setprot(VM_PROT_READ);
459 #ifdef ARM_CACHE_LOCK_ENABLE
460 pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS);
461 arm_lock_cache_line(ARM_TP_ADDRESS);
463 m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO);
464 pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m));
466 *(uint32_t *)ARM_RAS_START = 0;
467 *(uint32_t *)ARM_RAS_END = 0xffffffff;
471 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
474 * Flush the D-cache for non-DMA I/O so that the I-cache can
475 * be made coherent later.
478 cpu_flush_dcache(void *ptr, size_t len)
481 dcache_wb_poc((vm_offset_t)ptr, (vm_paddr_t)vtophys(ptr), len);
484 /* Get current clock frequency for the given cpu id. */
486 cpu_est_clockrate(int cpu_id, uint64_t *rate)
496 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d", busy, curcpu);
498 #ifndef NO_EVENTTIMERS
502 if (!sched_runnable())
504 #ifndef NO_EVENTTIMERS
509 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done", busy, curcpu);
513 cpu_idle_wakeup(int cpu)
520 * Most ARM platforms don't need to do anything special to init their clocks
521 * (they get intialized during normal device attachment), and by not defining a
522 * cpu_initclocks() function they get this generic one. Any platform that needs
523 * to do something special can just provide their own implementation, which will
524 * override this one due to the weak linkage.
527 arm_generic_initclocks(void)
530 #ifndef NO_EVENTTIMERS
532 if (PCPU_GET(cpuid) == 0)
533 cpu_initclocks_bsp();
537 cpu_initclocks_bsp();
541 __weak_reference(arm_generic_initclocks, cpu_initclocks);
544 fill_regs(struct thread *td, struct reg *regs)
546 struct trapframe *tf = td->td_frame;
547 bcopy(&tf->tf_r0, regs->r, sizeof(regs->r));
548 regs->r_sp = tf->tf_usr_sp;
549 regs->r_lr = tf->tf_usr_lr;
550 regs->r_pc = tf->tf_pc;
551 regs->r_cpsr = tf->tf_spsr;
555 fill_fpregs(struct thread *td, struct fpreg *regs)
557 bzero(regs, sizeof(*regs));
562 set_regs(struct thread *td, struct reg *regs)
564 struct trapframe *tf = td->td_frame;
566 bcopy(regs->r, &tf->tf_r0, sizeof(regs->r));
567 tf->tf_usr_sp = regs->r_sp;
568 tf->tf_usr_lr = regs->r_lr;
569 tf->tf_pc = regs->r_pc;
570 tf->tf_spsr &= ~PSR_FLAGS;
571 tf->tf_spsr |= regs->r_cpsr & PSR_FLAGS;
576 set_fpregs(struct thread *td, struct fpreg *regs)
582 fill_dbregs(struct thread *td, struct dbreg *regs)
587 set_dbregs(struct thread *td, struct dbreg *regs)
594 ptrace_read_int(struct thread *td, vm_offset_t addr, uint32_t *v)
597 if (proc_readmem(td, td->td_proc, addr, v, sizeof(*v)) != sizeof(*v))
603 ptrace_write_int(struct thread *td, vm_offset_t addr, uint32_t v)
606 if (proc_writemem(td, td->td_proc, addr, &v, sizeof(v)) != sizeof(v))
612 ptrace_get_usr_reg(void *cookie, int reg)
615 struct thread *td = cookie;
617 KASSERT(((reg >= 0) && (reg <= ARM_REG_NUM_PC)),
618 ("reg is outside range"));
622 ret = td->td_frame->tf_pc;
625 ret = td->td_frame->tf_usr_lr;
628 ret = td->td_frame->tf_usr_sp;
631 ret = *((register_t*)&td->td_frame->tf_r0 + reg);
639 ptrace_get_usr_int(void* cookie, vm_offset_t offset, u_int* val)
641 struct thread *td = cookie;
644 error = ptrace_read_int(td, offset, val);
650 * This function parses current instruction opcode and decodes
651 * any possible jump (change in PC) which might occur after
652 * the instruction is executed.
654 * @param td Thread structure of analysed task
655 * @param cur_instr Currently executed instruction
656 * @param alt_next_address Pointer to the variable where
657 * the destination address of the
658 * jump instruction shall be stored.
660 * @return <0> when jump is possible
664 ptrace_get_alternative_next(struct thread *td, uint32_t cur_instr,
665 uint32_t *alt_next_address)
669 if (inst_branch(cur_instr) || inst_call(cur_instr) ||
670 inst_return(cur_instr)) {
671 error = arm_predict_branch(td, cur_instr, td->td_frame->tf_pc,
672 alt_next_address, ptrace_get_usr_reg, ptrace_get_usr_int);
681 ptrace_single_step(struct thread *td)
684 int error, error_alt;
685 uint32_t cur_instr, alt_next = 0;
687 /* TODO: This needs to be updated for Thumb-2 */
688 if ((td->td_frame->tf_spsr & PSR_T) != 0)
691 KASSERT(td->td_md.md_ptrace_instr == 0,
692 ("Didn't clear single step"));
693 KASSERT(td->td_md.md_ptrace_instr_alt == 0,
694 ("Didn't clear alternative single step"));
698 error = ptrace_read_int(td, td->td_frame->tf_pc,
703 error = ptrace_read_int(td, td->td_frame->tf_pc + INSN_SIZE,
704 &td->td_md.md_ptrace_instr);
706 error = ptrace_write_int(td, td->td_frame->tf_pc + INSN_SIZE,
709 td->td_md.md_ptrace_instr = 0;
711 td->td_md.md_ptrace_addr = td->td_frame->tf_pc +
716 error_alt = ptrace_get_alternative_next(td, cur_instr, &alt_next);
717 if (error_alt == 0) {
718 error_alt = ptrace_read_int(td, alt_next,
719 &td->td_md.md_ptrace_instr_alt);
721 td->td_md.md_ptrace_instr_alt = 0;
723 error_alt = ptrace_write_int(td, alt_next,
726 td->td_md.md_ptrace_instr_alt = 0;
728 td->td_md.md_ptrace_addr_alt = alt_next;
734 return ((error != 0) && (error_alt != 0));
738 ptrace_clear_single_step(struct thread *td)
742 /* TODO: This needs to be updated for Thumb-2 */
743 if ((td->td_frame->tf_spsr & PSR_T) != 0)
746 if (td->td_md.md_ptrace_instr != 0) {
749 ptrace_write_int(td, td->td_md.md_ptrace_addr,
750 td->td_md.md_ptrace_instr);
752 td->td_md.md_ptrace_instr = 0;
755 if (td->td_md.md_ptrace_instr_alt != 0) {
758 ptrace_write_int(td, td->td_md.md_ptrace_addr_alt,
759 td->td_md.md_ptrace_instr_alt);
761 td->td_md.md_ptrace_instr_alt = 0;
768 ptrace_set_pc(struct thread *td, unsigned long addr)
770 td->td_frame->tf_pc = addr;
775 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
786 if (td->td_md.md_spinlock_count == 0) {
787 cspr = disable_interrupts(PSR_I | PSR_F);
788 td->td_md.md_spinlock_count = 1;
789 td->td_md.md_saved_cspr = cspr;
791 td->td_md.md_spinlock_count++;
803 cspr = td->td_md.md_saved_cspr;
804 td->td_md.md_spinlock_count--;
805 if (td->td_md.md_spinlock_count == 0)
806 restore_interrupts(cspr);
810 * Clear registers on exec
813 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
815 struct trapframe *tf = td->td_frame;
817 memset(tf, 0, sizeof(*tf));
818 tf->tf_usr_sp = stack;
819 tf->tf_usr_lr = imgp->entry_addr;
820 tf->tf_svc_lr = 0x77777777;
821 tf->tf_pc = imgp->entry_addr;
822 tf->tf_spsr = PSR_USR32_MODE;
826 * Get machine context.
829 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
831 struct trapframe *tf = td->td_frame;
832 __greg_t *gr = mcp->__gregs;
834 if (clear_ret & GET_MC_CLEAR_RET) {
836 gr[_REG_CPSR] = tf->tf_spsr & ~PSR_C;
838 gr[_REG_R0] = tf->tf_r0;
839 gr[_REG_CPSR] = tf->tf_spsr;
841 gr[_REG_R1] = tf->tf_r1;
842 gr[_REG_R2] = tf->tf_r2;
843 gr[_REG_R3] = tf->tf_r3;
844 gr[_REG_R4] = tf->tf_r4;
845 gr[_REG_R5] = tf->tf_r5;
846 gr[_REG_R6] = tf->tf_r6;
847 gr[_REG_R7] = tf->tf_r7;
848 gr[_REG_R8] = tf->tf_r8;
849 gr[_REG_R9] = tf->tf_r9;
850 gr[_REG_R10] = tf->tf_r10;
851 gr[_REG_R11] = tf->tf_r11;
852 gr[_REG_R12] = tf->tf_r12;
853 gr[_REG_SP] = tf->tf_usr_sp;
854 gr[_REG_LR] = tf->tf_usr_lr;
855 gr[_REG_PC] = tf->tf_pc;
861 * Set machine context.
863 * However, we don't set any but the user modifiable flags, and we won't
864 * touch the cs selector.
867 set_mcontext(struct thread *td, mcontext_t *mcp)
869 struct trapframe *tf = td->td_frame;
870 const __greg_t *gr = mcp->__gregs;
872 tf->tf_r0 = gr[_REG_R0];
873 tf->tf_r1 = gr[_REG_R1];
874 tf->tf_r2 = gr[_REG_R2];
875 tf->tf_r3 = gr[_REG_R3];
876 tf->tf_r4 = gr[_REG_R4];
877 tf->tf_r5 = gr[_REG_R5];
878 tf->tf_r6 = gr[_REG_R6];
879 tf->tf_r7 = gr[_REG_R7];
880 tf->tf_r8 = gr[_REG_R8];
881 tf->tf_r9 = gr[_REG_R9];
882 tf->tf_r10 = gr[_REG_R10];
883 tf->tf_r11 = gr[_REG_R11];
884 tf->tf_r12 = gr[_REG_R12];
885 tf->tf_usr_sp = gr[_REG_SP];
886 tf->tf_usr_lr = gr[_REG_LR];
887 tf->tf_pc = gr[_REG_PC];
888 tf->tf_spsr = gr[_REG_CPSR];
897 sys_sigreturn(td, uap)
899 struct sigreturn_args /* {
900 const struct __ucontext *sigcntxp;
908 if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
911 * Make sure the processor mode has not been tampered with and
912 * interrupts have not been disabled.
914 spsr = uc.uc_mcontext.__gregs[_REG_CPSR];
915 if ((spsr & PSR_MODE) != PSR_USR32_MODE ||
916 (spsr & (PSR_I | PSR_F)) != 0)
918 /* Restore register context. */
919 set_mcontext(td, &uc.uc_mcontext);
921 /* Restore signal mask. */
922 kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
924 return (EJUSTRETURN);
929 * Construct a PCB from a trapframe. This is called from kdb_trap() where
930 * we want to start a backtrace from the function that caused us to enter
931 * the debugger. We have the context in the trapframe, but base the trace
932 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
933 * enough for a backtrace.
936 makectx(struct trapframe *tf, struct pcb *pcb)
938 pcb->pcb_regs.sf_r4 = tf->tf_r4;
939 pcb->pcb_regs.sf_r5 = tf->tf_r5;
940 pcb->pcb_regs.sf_r6 = tf->tf_r6;
941 pcb->pcb_regs.sf_r7 = tf->tf_r7;
942 pcb->pcb_regs.sf_r8 = tf->tf_r8;
943 pcb->pcb_regs.sf_r9 = tf->tf_r9;
944 pcb->pcb_regs.sf_r10 = tf->tf_r10;
945 pcb->pcb_regs.sf_r11 = tf->tf_r11;
946 pcb->pcb_regs.sf_r12 = tf->tf_r12;
947 pcb->pcb_regs.sf_pc = tf->tf_pc;
948 pcb->pcb_regs.sf_lr = tf->tf_usr_lr;
949 pcb->pcb_regs.sf_sp = tf->tf_usr_sp;
953 * Fake up a boot descriptor table
956 fake_preload_metadata(struct arm_boot_params *abp __unused)
959 vm_offset_t zstart = 0, zend = 0;
961 vm_offset_t lastaddr;
963 static uint32_t fake_preload[35];
965 fake_preload[i++] = MODINFO_NAME;
966 fake_preload[i++] = strlen("kernel") + 1;
967 strcpy((char*)&fake_preload[i++], "kernel");
969 fake_preload[i++] = MODINFO_TYPE;
970 fake_preload[i++] = strlen("elf kernel") + 1;
971 strcpy((char*)&fake_preload[i++], "elf kernel");
973 fake_preload[i++] = MODINFO_ADDR;
974 fake_preload[i++] = sizeof(vm_offset_t);
975 fake_preload[i++] = KERNVIRTADDR;
976 fake_preload[i++] = MODINFO_SIZE;
977 fake_preload[i++] = sizeof(uint32_t);
978 fake_preload[i++] = (uint32_t)&end - KERNVIRTADDR;
980 if (*(uint32_t *)KERNVIRTADDR == MAGIC_TRAMP_NUMBER) {
981 fake_preload[i++] = MODINFO_METADATA|MODINFOMD_SSYM;
982 fake_preload[i++] = sizeof(vm_offset_t);
983 fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 4);
984 fake_preload[i++] = MODINFO_METADATA|MODINFOMD_ESYM;
985 fake_preload[i++] = sizeof(vm_offset_t);
986 fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 8);
987 lastaddr = *(uint32_t *)(KERNVIRTADDR + 8);
989 zstart = *(uint32_t *)(KERNVIRTADDR + 4);
990 db_fetch_ksymtab(zstart, zend);
993 lastaddr = (vm_offset_t)&end;
994 fake_preload[i++] = 0;
996 preload_metadata = (void *)fake_preload;
998 init_static_kenv(NULL, 0);
1007 set_curthread(&thread0);
1009 pcpu_init(pcpup, 0, sizeof(struct pcpu));
1010 PCPU_SET(curthread, &thread0);
1013 #if defined(LINUX_BOOT_ABI)
1015 linux_parse_boot_param(struct arm_boot_params *abp)
1017 struct arm_lbabi_tag *walker;
1022 * Linux boot ABI: r0 = 0, r1 is the board type (!= 0) and r2
1023 * is atags or dtb pointer. If all of these aren't satisfied,
1026 if (!(abp->abp_r0 == 0 && abp->abp_r1 != 0 && abp->abp_r2 != 0))
1029 board_id = abp->abp_r1;
1030 walker = (struct arm_lbabi_tag *)
1031 (abp->abp_r2 + KERNVIRTADDR - abp->abp_physaddr);
1033 /* xxx - Need to also look for binary device tree */
1034 if (ATAG_TAG(walker) != ATAG_CORE)
1038 while (ATAG_TAG(walker) != ATAG_NONE) {
1039 switch (ATAG_TAG(walker)) {
1043 arm_physmem_hardware_region(walker->u.tag_mem.start,
1044 walker->u.tag_mem.size);
1049 serial = walker->u.tag_sn.low |
1050 ((uint64_t)walker->u.tag_sn.high << 32);
1051 board_set_serial(serial);
1054 revision = walker->u.tag_rev.rev;
1055 board_set_revision(revision);
1058 /* XXX open question: Parse this for boothowto? */
1059 bcopy(walker->u.tag_cmd.command, linux_command_line,
1065 walker = ATAG_NEXT(walker);
1068 /* Save a copy for later */
1069 bcopy(atag_list, atags,
1070 (char *)walker - (char *)atag_list + ATAG_SIZE(walker));
1072 init_static_kenv(NULL, 0);
1074 return fake_preload_metadata(abp);
1078 #if defined(FREEBSD_BOOT_LOADER)
1080 freebsd_parse_boot_param(struct arm_boot_params *abp)
1082 vm_offset_t lastaddr = 0;
1086 vm_offset_t ksym_start;
1087 vm_offset_t ksym_end;
1091 * Mask metadata pointer: it is supposed to be on page boundary. If
1092 * the first argument (mdp) doesn't point to a valid address the
1093 * bootloader must have passed us something else than the metadata
1094 * ptr, so we give up. Also give up if we cannot find metadta section
1095 * the loader creates that we get all this data out of.
1098 if ((mdp = (void *)(abp->abp_r0 & ~PAGE_MASK)) == NULL)
1100 preload_metadata = mdp;
1101 kmdp = preload_search_by_type("elf kernel");
1105 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
1106 loader_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
1107 init_static_kenv(loader_envp, 0);
1108 lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
1110 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
1111 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
1112 db_fetch_ksymtab(ksym_start, ksym_end);
1119 default_parse_boot_param(struct arm_boot_params *abp)
1121 vm_offset_t lastaddr;
1123 #if defined(LINUX_BOOT_ABI)
1124 if ((lastaddr = linux_parse_boot_param(abp)) != 0)
1127 #if defined(FREEBSD_BOOT_LOADER)
1128 if ((lastaddr = freebsd_parse_boot_param(abp)) != 0)
1131 /* Fall back to hardcoded metadata. */
1132 lastaddr = fake_preload_metadata(abp);
1138 * Stub version of the boot parameter parsing routine. We are
1139 * called early in initarm, before even VM has been initialized.
1140 * This routine needs to preserve any data that the boot loader
1141 * has passed in before the kernel starts to grow past the end
1142 * of the BSS, traditionally the place boot-loaders put this data.
1144 * Since this is called so early, things that depend on the vm system
1145 * being setup (including access to some SoC's serial ports), about
1146 * all that can be done in this routine is to copy the arguments.
1148 * This is the default boot parameter parsing routine. Individual
1149 * kernels/boards can override this weak function with one of their
1150 * own. We just fake metadata...
1152 __weak_reference(default_parse_boot_param, parse_boot_param);
1158 init_proc0(vm_offset_t kstack)
1160 proc_linkup0(&proc0, &thread0);
1161 thread0.td_kstack = kstack;
1162 thread0.td_pcb = (struct pcb *)
1163 (thread0.td_kstack + kstack_pages * PAGE_SIZE) - 1;
1164 thread0.td_pcb->pcb_flags = 0;
1165 thread0.td_pcb->pcb_vfpcpu = -1;
1166 thread0.td_pcb->pcb_vfpstate.fpscr = VFPSCR_DN;
1167 thread0.td_frame = &proc0_tf;
1168 pcpup->pc_curpcb = thread0.td_pcb;
1172 arm_predict_branch(void *cookie, u_int insn, register_t pc, register_t *new_pc,
1173 u_int (*fetch_reg)(void*, int), u_int (*read_int)(void*, vm_offset_t, u_int*))
1175 u_int addr, nregs, offset = 0;
1178 switch ((insn >> 24) & 0xf) {
1179 case 0x2: /* add pc, reg1, #value */
1180 case 0x0: /* add pc, reg1, reg2, lsl #offset */
1181 addr = fetch_reg(cookie, (insn >> 16) & 0xf);
1182 if (((insn >> 16) & 0xf) == 15)
1184 if (insn & 0x0200000) {
1185 offset = (insn >> 7) & 0x1e;
1186 offset = (insn & 0xff) << (32 - offset) |
1187 (insn & 0xff) >> offset;
1190 offset = fetch_reg(cookie, insn & 0x0f);
1191 if ((insn & 0x0000ff0) != 0x00000000) {
1193 nregs = fetch_reg(cookie,
1196 nregs = (insn >> 7) & 0x1f;
1197 switch ((insn >> 5) & 3) {
1200 offset = offset << nregs;
1204 offset = offset >> nregs;
1211 *new_pc = addr + offset;
1216 case 0xa: /* b ... */
1217 case 0xb: /* bl ... */
1218 addr = ((insn << 2) & 0x03ffffff);
1219 if (addr & 0x02000000)
1221 *new_pc = (pc + 8 + addr);
1223 case 0x7: /* ldr pc, [pc, reg, lsl #2] */
1224 addr = fetch_reg(cookie, insn & 0xf);
1225 addr = pc + 8 + (addr << 2);
1226 error = read_int(cookie, addr, &addr);
1229 case 0x1: /* mov pc, reg */
1230 *new_pc = fetch_reg(cookie, insn & 0xf);
1233 case 0x5: /* ldr pc, [reg] */
1234 addr = fetch_reg(cookie, (insn >> 16) & 0xf);
1235 /* ldr pc, [reg, #offset] */
1236 if (insn & (1 << 24))
1237 offset = insn & 0xfff;
1238 if (insn & 0x00800000)
1242 error = read_int(cookie, addr, &addr);
1246 case 0x8: /* ldmxx reg, {..., pc} */
1248 addr = fetch_reg(cookie, (insn >> 16) & 0xf);
1249 nregs = (insn & 0x5555) + ((insn >> 1) & 0x5555);
1250 nregs = (nregs & 0x3333) + ((nregs >> 2) & 0x3333);
1251 nregs = (nregs + (nregs >> 4)) & 0x0f0f;
1252 nregs = (nregs + (nregs >> 8)) & 0x001f;
1253 switch ((insn >> 23) & 0x3) {
1254 case 0x0: /* ldmda */
1257 case 0x1: /* ldmia */
1258 addr = addr + 0 + ((nregs - 1) << 2);
1260 case 0x2: /* ldmdb */
1263 case 0x3: /* ldmib */
1264 addr = addr + 4 + ((nregs - 1) << 2);
1267 error = read_int(cookie, addr, &addr);
1278 set_stackptrs(int cpu)
1281 set_stackptr(PSR_IRQ32_MODE,
1282 irqstack + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1283 set_stackptr(PSR_ABT32_MODE,
1284 abtstack + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1285 set_stackptr(PSR_UND32_MODE,
1286 undstack + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1290 set_stackptrs(int cpu)
1293 set_stackptr(PSR_IRQ32_MODE,
1294 irqstack.pv_va + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1295 set_stackptr(PSR_ABT32_MODE,
1296 abtstack.pv_va + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1297 set_stackptr(PSR_UND32_MODE,
1298 undstack.pv_va + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1303 #define efi_next_descriptor(ptr, size) \
1304 ((struct efi_md *)(((uint8_t *) ptr) + size))
1307 add_efi_map_entries(struct efi_map_header *efihdr, struct mem_region *mr,
1308 int *mrcnt, uint32_t *memsize)
1310 struct efi_md *map, *p;
1312 size_t efisz, memory_size;
1315 static const char *types[] = {
1321 "RuntimeServicesCode",
1322 "RuntimeServicesData",
1323 "ConventionalMemory",
1325 "ACPIReclaimMemory",
1328 "MemoryMappedIOPortSpace",
1336 * Memory map data provided by UEFI via the GetMemoryMap
1337 * Boot Services API.
1339 efisz = roundup2(sizeof(struct efi_map_header), 0x10);
1340 map = (struct efi_md *)((uint8_t *)efihdr + efisz);
1342 if (efihdr->descriptor_size == 0)
1344 ndesc = efihdr->memory_size / efihdr->descriptor_size;
1346 if (boothowto & RB_VERBOSE)
1347 printf("%23s %12s %12s %8s %4s\n",
1348 "Type", "Physical", "Virtual", "#Pages", "Attr");
1351 for (i = 0, j = 0, p = map; i < ndesc; i++,
1352 p = efi_next_descriptor(p, efihdr->descriptor_size)) {
1353 if (boothowto & RB_VERBOSE) {
1354 if (p->md_type <= EFI_MD_TYPE_PALCODE)
1355 type = types[p->md_type];
1358 printf("%23s %012llx %12p %08llx ", type, p->md_phys,
1359 p->md_virt, p->md_pages);
1360 if (p->md_attr & EFI_MD_ATTR_UC)
1362 if (p->md_attr & EFI_MD_ATTR_WC)
1364 if (p->md_attr & EFI_MD_ATTR_WT)
1366 if (p->md_attr & EFI_MD_ATTR_WB)
1368 if (p->md_attr & EFI_MD_ATTR_UCE)
1370 if (p->md_attr & EFI_MD_ATTR_WP)
1372 if (p->md_attr & EFI_MD_ATTR_RP)
1374 if (p->md_attr & EFI_MD_ATTR_XP)
1376 if (p->md_attr & EFI_MD_ATTR_RT)
1381 switch (p->md_type) {
1382 case EFI_MD_TYPE_CODE:
1383 case EFI_MD_TYPE_DATA:
1384 case EFI_MD_TYPE_BS_CODE:
1385 case EFI_MD_TYPE_BS_DATA:
1386 case EFI_MD_TYPE_FREE:
1388 * We're allowed to use any entry with these types.
1396 if (j >= FDT_MEM_REGIONS)
1399 mr[j].mr_start = p->md_phys;
1400 mr[j].mr_size = p->md_pages * PAGE_SIZE;
1401 memory_size += mr[j].mr_size;
1405 *memsize = memory_size;
1429 debugf("loader passed (static) kenv:\n");
1430 if (loader_envp == NULL) {
1431 debugf(" no env, null ptr\n");
1434 debugf(" loader_envp = 0x%08x\n", (uint32_t)loader_envp);
1436 for (cp = loader_envp; cp != NULL; cp = kenv_next(cp))
1437 debugf(" %x %s\n", (uint32_t)cp, cp);
1442 initarm(struct arm_boot_params *abp)
1444 struct mem_region mem_regions[FDT_MEM_REGIONS];
1445 struct pv_addr kernel_l1pt;
1446 struct pv_addr dpcpu;
1447 vm_offset_t dtbp, freemempos, l2_start, lastaddr;
1448 uint32_t memsize, l2size;
1452 int i, j, err_devmap, mem_regions_sz;
1454 lastaddr = parse_boot_param(abp);
1455 arm_physmem_kernaddr = abp->abp_physaddr;
1463 * Find the dtb passed in by the boot loader.
1465 kmdp = preload_search_by_type("elf kernel");
1467 dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
1469 dtbp = (vm_offset_t)NULL;
1471 #if defined(FDT_DTB_STATIC)
1473 * In case the device tree blob was not retrieved (from metadata) try
1474 * to use the statically embedded one.
1476 if (dtbp == (vm_offset_t)NULL)
1477 dtbp = (vm_offset_t)&fdt_static_dtb;
1480 if (OF_install(OFW_FDT, 0) == FALSE)
1481 panic("Cannot install FDT");
1483 if (OF_init((void *)dtbp) != 0)
1484 panic("OF_init failed with the found device tree");
1486 /* Grab physical memory regions information from device tree. */
1487 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, &memsize) != 0)
1488 panic("Cannot get physical memory regions");
1489 arm_physmem_hardware_regions(mem_regions, mem_regions_sz);
1491 /* Grab reserved memory regions information from device tree. */
1492 if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0)
1493 arm_physmem_exclude_regions(mem_regions, mem_regions_sz,
1494 EXFLAG_NODUMP | EXFLAG_NOALLOC);
1496 /* Platform-specific initialisation */
1497 platform_probe_and_attach();
1501 /* Do basic tuning, hz etc */
1504 /* Calculate number of L2 tables needed for mapping vm_page_array */
1505 l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page);
1506 l2size = (l2size >> L1_S_SHIFT) + 1;
1509 * Add one table for end of kernel map, one for stacks, msgbuf and
1510 * L1 and L2 tables map and one for vectors map.
1514 /* Make it divisible by 4 */
1515 l2size = (l2size + 3) & ~3;
1517 freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK;
1519 /* Define a macro to simplify memory allocation */
1520 #define valloc_pages(var, np) \
1521 alloc_pages((var).pv_va, (np)); \
1522 (var).pv_pa = (var).pv_va + (abp->abp_physaddr - KERNVIRTADDR);
1524 #define alloc_pages(var, np) \
1525 (var) = freemempos; \
1526 freemempos += (np * PAGE_SIZE); \
1527 memset((char *)(var), 0, ((np) * PAGE_SIZE));
1529 while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
1530 freemempos += PAGE_SIZE;
1531 valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
1533 for (i = 0, j = 0; i < l2size; ++i) {
1534 if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
1535 valloc_pages(kernel_pt_table[i],
1536 L2_TABLE_SIZE / PAGE_SIZE);
1539 kernel_pt_table[i].pv_va = kernel_pt_table[j].pv_va +
1540 L2_TABLE_SIZE_REAL * (i - j);
1541 kernel_pt_table[i].pv_pa =
1542 kernel_pt_table[i].pv_va - KERNVIRTADDR +
1548 * Allocate a page for the system page mapped to 0x00000000
1549 * or 0xffff0000. This page will just contain the system vectors
1550 * and can be shared by all processes.
1552 valloc_pages(systempage, 1);
1554 /* Allocate dynamic per-cpu area. */
1555 valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
1556 dpcpu_init((void *)dpcpu.pv_va, 0);
1558 /* Allocate stacks for all modes */
1559 valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU);
1560 valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU);
1561 valloc_pages(undstack, UND_STACK_SIZE * MAXCPU);
1562 valloc_pages(kernelstack, kstack_pages * MAXCPU);
1563 valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
1566 * Now we start construction of the L1 page table
1567 * We start by mapping the L2 page tables into the L1.
1568 * This means that we can replace L1 mappings later on if necessary
1570 l1pagetable = kernel_l1pt.pv_va;
1573 * Try to map as much as possible of kernel text and data using
1574 * 1MB section mapping and for the rest of initial kernel address
1575 * space use L2 coarse tables.
1577 * Link L2 tables for mapping remainder of kernel (modulo 1MB)
1578 * and kernel structures
1580 l2_start = lastaddr & ~(L1_S_OFFSET);
1581 for (i = 0 ; i < l2size - 1; i++)
1582 pmap_link_l2pt(l1pagetable, l2_start + i * L1_S_SIZE,
1583 &kernel_pt_table[i]);
1585 pmap_curmaxkvaddr = l2_start + (l2size - 1) * L1_S_SIZE;
1587 /* Map kernel code and data */
1588 pmap_map_chunk(l1pagetable, KERNVIRTADDR, abp->abp_physaddr,
1589 (((uint32_t)(lastaddr) - KERNVIRTADDR) + PAGE_MASK) & ~PAGE_MASK,
1590 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
1592 /* Map L1 directory and allocated L2 page tables */
1593 pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
1594 L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
1596 pmap_map_chunk(l1pagetable, kernel_pt_table[0].pv_va,
1597 kernel_pt_table[0].pv_pa,
1598 L2_TABLE_SIZE_REAL * l2size,
1599 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
1601 /* Map allocated DPCPU, stacks and msgbuf */
1602 pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa,
1603 freemempos - dpcpu.pv_va,
1604 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
1606 /* Link and map the vector page */
1607 pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH,
1608 &kernel_pt_table[l2size - 1]);
1609 pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
1610 VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE);
1612 /* Establish static device mappings. */
1613 err_devmap = platform_devmap_init();
1614 arm_devmap_bootstrap(l1pagetable, NULL);
1615 vm_max_kernel_address = platform_lastaddr();
1617 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT);
1618 pmap_pa = kernel_l1pt.pv_pa;
1619 cpu_setttb(kernel_l1pt.pv_pa);
1621 cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2));
1624 * Now that proper page tables are installed, call cpu_setup() to enable
1625 * instruction and data caches and other chip-specific features.
1630 * Only after the SOC registers block is mapped we can perform device
1631 * tree fixups, as they may attempt to read parameters from hardware.
1633 OF_interpret("perform-fixup", 0);
1635 platform_gpio_init();
1639 debugf("initarm: console initialized\n");
1640 debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp);
1641 debugf(" boothowto = 0x%08x\n", boothowto);
1642 debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp);
1645 env = kern_getenv("kernelname");
1647 strlcpy(kernelname, env, sizeof(kernelname));
1651 if (err_devmap != 0)
1652 printf("WARNING: could not fully configure devmap, error=%d\n",
1655 platform_late_init();
1658 * Pages were allocated during the secondary bootstrap for the
1659 * stacks for different CPU modes.
1660 * We must now set the r13 registers in the different CPU modes to
1661 * point to these stacks.
1662 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
1663 * of the stack memory.
1665 cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE);
1670 * We must now clean the cache again....
1671 * Cleaning may be done by reading new data to displace any
1672 * dirty data in the cache. This will have happened in cpu_setttb()
1673 * but since we are boot strapping the addresses used for the read
1674 * may have just been remapped and thus the cache could be out
1675 * of sync. A re-clean after the switch will cure this.
1676 * After booting there are no gross relocations of the kernel thus
1677 * this problem will not occur after initarm().
1679 cpu_idcache_wbinv_all();
1683 init_proc0(kernelstack.pv_va);
1685 arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
1686 pmap_bootstrap(freemempos, &kernel_l1pt);
1687 msgbufp = (void *)msgbufpv.pv_va;
1688 msgbufinit(msgbufp, msgbufsize);
1692 * Exclude the kernel (and all the things we allocated which immediately
1693 * follow the kernel) from the VM allocation pool but not from crash
1694 * dumps. virtual_avail is a global variable which tracks the kva we've
1695 * "allocated" while setting up pmaps.
1697 * Prepare the list of physical memory available to the vm subsystem.
1699 arm_physmem_exclude_region(abp->abp_physaddr,
1700 (virtual_avail - KERNVIRTADDR), EXFLAG_NOALLOC);
1701 arm_physmem_init_kernel_globals();
1703 init_param2(physmem);
1707 return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
1708 sizeof(struct pcb)));
1710 #else /* __ARM_ARCH < 6 */
1712 initarm(struct arm_boot_params *abp)
1714 struct mem_region mem_regions[FDT_MEM_REGIONS];
1715 vm_paddr_t lastaddr;
1716 vm_offset_t dtbp, kernelstack, dpcpu;
1720 int err_devmap, mem_regions_sz;
1722 struct efi_map_header *efihdr;
1725 /* get last allocated physical address */
1726 arm_physmem_kernaddr = abp->abp_physaddr;
1727 lastaddr = parse_boot_param(abp) - KERNVIRTADDR + arm_physmem_kernaddr;
1734 * Find the dtb passed in by the boot loader.
1736 kmdp = preload_search_by_type("elf kernel");
1737 dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
1738 #if defined(FDT_DTB_STATIC)
1740 * In case the device tree blob was not retrieved (from metadata) try
1741 * to use the statically embedded one.
1743 if (dtbp == (vm_offset_t)NULL)
1744 dtbp = (vm_offset_t)&fdt_static_dtb;
1747 if (OF_install(OFW_FDT, 0) == FALSE)
1748 panic("Cannot install FDT");
1750 if (OF_init((void *)dtbp) != 0)
1751 panic("OF_init failed with the found device tree");
1754 efihdr = (struct efi_map_header *)preload_search_info(kmdp,
1755 MODINFO_METADATA | MODINFOMD_EFI_MAP);
1756 if (efihdr != NULL) {
1757 add_efi_map_entries(efihdr, mem_regions, &mem_regions_sz,
1762 /* Grab physical memory regions information from device tree. */
1763 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,
1765 panic("Cannot get physical memory regions");
1767 arm_physmem_hardware_regions(mem_regions, mem_regions_sz);
1769 /* Grab reserved memory regions information from device tree. */
1770 if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0)
1771 arm_physmem_exclude_regions(mem_regions, mem_regions_sz,
1772 EXFLAG_NODUMP | EXFLAG_NOALLOC);
1775 * Set TEX remapping registers.
1776 * Setup kernel page tables and switch to kernel L1 page table.
1779 pmap_bootstrap_prepare(lastaddr);
1782 * Now that proper page tables are installed, call cpu_setup() to enable
1783 * instruction and data caches and other chip-specific features.
1787 /* Platform-specific initialisation */
1788 platform_probe_and_attach();
1791 /* Do basic tuning, hz etc */
1795 * Allocate a page for the system page mapped to 0xffff0000
1796 * This page will just contain the system vectors and can be
1797 * shared by all processes.
1799 systempage = pmap_preboot_get_pages(1);
1801 /* Map the vector page. */
1802 pmap_preboot_map_pages(systempage, ARM_VECTORS_HIGH, 1);
1803 if (virtual_end >= ARM_VECTORS_HIGH)
1804 virtual_end = ARM_VECTORS_HIGH - 1;
1806 /* Allocate dynamic per-cpu area. */
1807 dpcpu = pmap_preboot_get_vpages(DPCPU_SIZE / PAGE_SIZE);
1808 dpcpu_init((void *)dpcpu, 0);
1810 /* Allocate stacks for all modes */
1811 irqstack = pmap_preboot_get_vpages(IRQ_STACK_SIZE * MAXCPU);
1812 abtstack = pmap_preboot_get_vpages(ABT_STACK_SIZE * MAXCPU);
1813 undstack = pmap_preboot_get_vpages(UND_STACK_SIZE * MAXCPU );
1814 kernelstack = pmap_preboot_get_vpages(kstack_pages * MAXCPU);
1816 /* Allocate message buffer. */
1817 msgbufp = (void *)pmap_preboot_get_vpages(
1818 round_page(msgbufsize) / PAGE_SIZE);
1821 * Pages were allocated during the secondary bootstrap for the
1822 * stacks for different CPU modes.
1823 * We must now set the r13 registers in the different CPU modes to
1824 * point to these stacks.
1825 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
1826 * of the stack memory.
1831 /* Establish static device mappings. */
1832 err_devmap = platform_devmap_init();
1833 arm_devmap_bootstrap(0, NULL);
1834 vm_max_kernel_address = platform_lastaddr();
1837 * Only after the SOC registers block is mapped we can perform device
1838 * tree fixups, as they may attempt to read parameters from hardware.
1840 OF_interpret("perform-fixup", 0);
1841 platform_gpio_init();
1844 debugf("initarm: console initialized\n");
1845 debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp);
1846 debugf(" boothowto = 0x%08x\n", boothowto);
1847 debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp);
1848 debugf(" lastaddr1: 0x%08x\n", lastaddr);
1851 env = kern_getenv("kernelname");
1853 strlcpy(kernelname, env, sizeof(kernelname));
1855 if (err_devmap != 0)
1856 printf("WARNING: could not fully configure devmap, error=%d\n",
1859 platform_late_init();
1862 * We must now clean the cache again....
1863 * Cleaning may be done by reading new data to displace any
1864 * dirty data in the cache. This will have happened in cpu_setttb()
1865 * but since we are boot strapping the addresses used for the read
1866 * may have just been remapped and thus the cache could be out
1867 * of sync. A re-clean after the switch will cure this.
1868 * After booting there are no gross relocations of the kernel thus
1869 * this problem will not occur after initarm().
1871 /* Set stack for exception handlers */
1873 init_proc0(kernelstack);
1874 arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
1875 enable_interrupts(PSR_A);
1878 /* Exclude the kernel (and all the things we allocated which immediately
1879 * follow the kernel) from the VM allocation pool but not from crash
1880 * dumps. virtual_avail is a global variable which tracks the kva we've
1881 * "allocated" while setting up pmaps.
1883 * Prepare the list of physical memory available to the vm subsystem.
1885 arm_physmem_exclude_region(abp->abp_physaddr,
1886 pmap_preboot_get_pages(0) - abp->abp_physaddr, EXFLAG_NOALLOC);
1887 arm_physmem_init_kernel_globals();
1889 init_param2(physmem);
1890 /* Init message buffer. */
1891 msgbufinit(msgbufp, msgbufsize);
1894 return ((void *)STACKALIGN(thread0.td_pcb));
1898 #endif /* __ARM_ARCH < 6 */
1901 uint32_t (*arm_cpu_fill_vdso_timehands)(struct vdso_timehands *,
1902 struct timecounter *);
1905 cpu_fill_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc)
1908 return (arm_cpu_fill_vdso_timehands != NULL ?
1909 arm_cpu_fill_vdso_timehands(vdso_th, tc) : 0);