1 /* $NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $ */
4 * Copyright (c) 2004 Olivier Houchard
5 * Copyright (c) 1994-1998 Mark Brinicombe.
6 * Copyright (c) 1994 Brini.
9 * This code is derived from software written for Brini by Mark Brinicombe
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by Mark Brinicombe
22 * for the NetBSD Project.
23 * 4. The name of the company nor the name of the author may be used to
24 * endorse or promote products derived from this software without specific
25 * prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
28 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
29 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * Machine dependant functions for kernel setup
42 * Updated : 18/04/01 updated for new wscons
45 #include "opt_compat.h"
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
49 #include <sys/param.h>
51 #include <sys/systm.h>
58 #include <sys/imgact.h>
59 #include <sys/kernel.h>
61 #include <sys/linker.h>
63 #include <sys/malloc.h>
64 #include <sys/mutex.h>
66 #include <sys/ptrace.h>
67 #include <sys/signalvar.h>
68 #include <sys/sysent.h>
69 #include <sys/sysproto.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_object.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_pager.h>
78 #include <vm/vnode_pager.h>
80 #include <machine/armreg.h>
81 #include <machine/cpu.h>
82 #include <machine/machdep.h>
83 #include <machine/md_var.h>
84 #include <machine/metadata.h>
85 #include <machine/pcb.h>
86 #include <machine/pmap.h>
87 #include <machine/reg.h>
88 #include <machine/trap.h>
89 #include <machine/undefined.h>
90 #include <machine/vmparam.h>
91 #include <machine/sysarch.h>
93 uint32_t cpu_reset_address = 0;
95 vm_offset_t vector_page;
99 int (*_arm_memcpy)(void *, void *, int, int) = NULL;
100 int (*_arm_bzero)(void *, int, int) = NULL;
101 int _min_memcpy_size = 0;
102 int _min_bzero_size = 0;
105 sendsig(catcher, ksi, mask)
112 struct trapframe *tf;
113 struct sigframe *fp, frame;
121 PROC_LOCK_ASSERT(p, MA_OWNED);
122 sig = ksi->ksi_signo;
123 code = ksi->ksi_code;
125 mtx_assert(&psp->ps_mtx, MA_OWNED);
127 onstack = sigonstack(tf->tf_usr_sp);
129 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
132 /* Allocate and validate space for the signal handler context. */
133 if ((td->td_flags & TDP_ALTSTACK) != 0 && !(onstack) &&
134 SIGISMEMBER(psp->ps_sigonstack, sig)) {
135 fp = (struct sigframe *)(td->td_sigstk.ss_sp +
136 td->td_sigstk.ss_size);
137 #if defined(COMPAT_43)
138 td->td_sigstk.ss_flags |= SS_ONSTACK;
141 fp = (struct sigframe *)td->td_frame->tf_usr_sp;
143 /* make room on the stack */
146 /* make the stack aligned */
147 fp = (struct sigframe *)STACKALIGN(fp);
148 /* Populate the siginfo frame. */
149 get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
150 frame.sf_si = ksi->ksi_info;
151 frame.sf_uc.uc_sigmask = *mask;
152 frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK )
153 ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
154 frame.sf_uc.uc_stack = td->td_sigstk;
155 mtx_unlock(&psp->ps_mtx);
156 PROC_UNLOCK(td->td_proc);
158 /* Copy the sigframe out to the user's stack. */
159 if (copyout(&frame, fp, sizeof(*fp)) != 0) {
160 /* Process has trashed its stack. Kill it. */
161 CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
166 /* Translate the signal if appropriate. */
167 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
168 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
171 * Build context to run handler in. We invoke the handler
172 * directly, only returning via the trampoline. Note the
173 * trampoline version numbers are coordinated with machine-
174 * dependent code in libc.
178 tf->tf_r1 = (register_t)&fp->sf_si;
179 tf->tf_r2 = (register_t)&fp->sf_uc;
181 /* the trampoline uses r5 as the uc address */
182 tf->tf_r5 = (register_t)&fp->sf_uc;
183 tf->tf_pc = (register_t)catcher;
184 tf->tf_usr_sp = (register_t)fp;
185 tf->tf_usr_lr = (register_t)(PS_STRINGS - *(p->p_sysent->sv_szsigcode));
187 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_usr_lr,
191 mtx_lock(&psp->ps_mtx);
194 struct kva_md_info kmi;
199 * Initialize the vector page, and select whether or not to
200 * relocate the vectors.
202 * NOTE: We expect the vector page to be mapped at its expected
206 extern unsigned int page0[], page0_data[];
208 arm_vector_init(vm_offset_t va, int which)
210 unsigned int *vectors = (int *) va;
211 unsigned int *vectors_data = vectors + (page0_data - page0);
215 * Loop through the vectors we're taking over, and copy the
216 * vector's insn and data word.
218 for (vec = 0; vec < ARM_NVEC; vec++) {
219 if ((which & (1 << vec)) == 0) {
220 /* Don't want to take over this vector. */
223 vectors[vec] = page0[vec];
224 vectors_data[vec] = page0_data[vec];
227 /* Now sync the vectors. */
228 cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int));
232 if (va == ARM_VECTORS_HIGH) {
234 * Assume the MD caller knows what it's doing here, and
235 * really does want the vector page relocated.
237 * Note: This has to be done here (and not just in
238 * cpu_setup()) because the vector page needs to be
239 * accessible *before* cpu_startup() is called.
242 * NOTE: If the CPU control register is not readable,
243 * this will totally fail! We'll just assume that
244 * any system that has high vector support has a
245 * readable CPU control register, for now. If we
246 * ever encounter one that does not, we'll have to
249 cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
254 cpu_startup(void *dummy)
256 struct pcb *pcb = thread0.td_pcb;
257 #ifndef ARM_CACHE_LOCK_ENABLE
264 printf("real memory = %ju (%ju MB)\n", (uintmax_t)ptoa(physmem),
265 (uintmax_t)ptoa(physmem) / 1048576);
269 * Display the RAM layout.
274 printf("Physical memory chunk(s):\n");
275 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
278 size = phys_avail[indx + 1] - phys_avail[indx];
279 printf("%#08jx - %#08jx, %ju bytes (%ju pages)\n",
280 (uintmax_t)phys_avail[indx],
281 (uintmax_t)phys_avail[indx + 1] - 1,
282 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
286 vm_ksubmap_init(&kmi);
288 printf("avail memory = %ju (%ju MB)\n",
289 (uintmax_t)ptoa(cnt.v_free_count),
290 (uintmax_t)ptoa(cnt.v_free_count) / 1048576);
293 vm_pager_bufferinit();
294 pcb->un_32.pcb32_und_sp = (u_int)thread0.td_kstack +
295 USPACE_UNDEF_STACK_TOP;
296 pcb->un_32.pcb32_sp = (u_int)thread0.td_kstack +
297 USPACE_SVC_STACK_TOP;
298 vector_page_setprot(VM_PROT_READ);
299 pmap_set_pcb_pagedir(pmap_kernel(), pcb);
300 thread0.td_frame = (struct trapframe *)pcb->un_32.pcb32_sp - 1;
302 #ifdef ARM_CACHE_LOCK_ENABLE
303 pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS);
304 arm_lock_cache_line(ARM_TP_ADDRESS);
306 m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO);
307 pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m));
311 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL)
313 /* Get current clock frequency for the given cpu id. */
315 cpu_est_clockrate(int cpu_id, uint64_t *rate)
328 fill_regs(struct thread *td, struct reg *regs)
330 struct trapframe *tf = td->td_frame;
331 bcopy(&tf->tf_r0, regs->r, sizeof(regs->r));
332 regs->r_sp = tf->tf_usr_sp;
333 regs->r_lr = tf->tf_usr_lr;
334 regs->r_pc = tf->tf_pc;
335 regs->r_cpsr = tf->tf_spsr;
339 fill_fpregs(struct thread *td, struct fpreg *regs)
341 bzero(regs, sizeof(*regs));
346 set_regs(struct thread *td, struct reg *regs)
348 struct trapframe *tf = td->td_frame;
350 bcopy(regs->r, &tf->tf_r0, sizeof(regs->r));
351 tf->tf_usr_sp = regs->r_sp;
352 tf->tf_usr_lr = regs->r_lr;
353 tf->tf_pc = regs->r_pc;
354 tf->tf_spsr &= ~PSR_FLAGS;
355 tf->tf_spsr |= regs->r_cpsr & PSR_FLAGS;
360 set_fpregs(struct thread *td, struct fpreg *regs)
366 fill_dbregs(struct thread *td, struct dbreg *regs)
371 set_dbregs(struct thread *td, struct dbreg *regs)
378 ptrace_read_int(struct thread *td, vm_offset_t addr, u_int32_t *v)
383 PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED);
384 iov.iov_base = (caddr_t) v;
385 iov.iov_len = sizeof(u_int32_t);
388 uio.uio_offset = (off_t)addr;
389 uio.uio_resid = sizeof(u_int32_t);
390 uio.uio_segflg = UIO_SYSSPACE;
391 uio.uio_rw = UIO_READ;
393 return proc_rwmem(td->td_proc, &uio);
397 ptrace_write_int(struct thread *td, vm_offset_t addr, u_int32_t v)
402 PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED);
403 iov.iov_base = (caddr_t) &v;
404 iov.iov_len = sizeof(u_int32_t);
407 uio.uio_offset = (off_t)addr;
408 uio.uio_resid = sizeof(u_int32_t);
409 uio.uio_segflg = UIO_SYSSPACE;
410 uio.uio_rw = UIO_WRITE;
412 return proc_rwmem(td->td_proc, &uio);
416 ptrace_single_step(struct thread *td)
421 KASSERT(td->td_md.md_ptrace_instr == 0,
422 ("Didn't clear single step"));
425 error = ptrace_read_int(td, td->td_frame->tf_pc + 4,
426 &td->td_md.md_ptrace_instr);
429 error = ptrace_write_int(td, td->td_frame->tf_pc + 4,
432 td->td_md.md_ptrace_instr = 0;
433 td->td_md.md_ptrace_addr = td->td_frame->tf_pc + 4;
440 ptrace_clear_single_step(struct thread *td)
444 if (td->td_md.md_ptrace_instr) {
447 ptrace_write_int(td, td->td_md.md_ptrace_addr,
448 td->td_md.md_ptrace_instr);
450 td->td_md.md_ptrace_instr = 0;
456 ptrace_set_pc(struct thread *td, unsigned long addr)
458 td->td_frame->tf_pc = addr;
463 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
473 if (td->td_md.md_spinlock_count == 0)
474 td->td_md.md_saved_cspr = disable_interrupts(I32_bit | F32_bit);
475 td->td_md.md_spinlock_count++;
486 td->td_md.md_spinlock_count--;
487 if (td->td_md.md_spinlock_count == 0)
488 restore_interrupts(td->td_md.md_saved_cspr);
492 * Clear registers on exec
495 exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
497 struct trapframe *tf = td->td_frame;
499 memset(tf, 0, sizeof(*tf));
500 tf->tf_usr_sp = stack;
501 tf->tf_usr_lr = entry;
502 tf->tf_svc_lr = 0x77777777;
504 tf->tf_spsr = PSR_USR32_MODE;
508 * Get machine context.
511 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
513 struct trapframe *tf = td->td_frame;
514 __greg_t *gr = mcp->__gregs;
516 if (clear_ret & GET_MC_CLEAR_RET)
519 gr[_REG_R0] = tf->tf_r0;
520 gr[_REG_R1] = tf->tf_r1;
521 gr[_REG_R2] = tf->tf_r2;
522 gr[_REG_R3] = tf->tf_r3;
523 gr[_REG_R4] = tf->tf_r4;
524 gr[_REG_R5] = tf->tf_r5;
525 gr[_REG_R6] = tf->tf_r6;
526 gr[_REG_R7] = tf->tf_r7;
527 gr[_REG_R8] = tf->tf_r8;
528 gr[_REG_R9] = tf->tf_r9;
529 gr[_REG_R10] = tf->tf_r10;
530 gr[_REG_R11] = tf->tf_r11;
531 gr[_REG_R12] = tf->tf_r12;
532 gr[_REG_SP] = tf->tf_usr_sp;
533 gr[_REG_LR] = tf->tf_usr_lr;
534 gr[_REG_PC] = tf->tf_pc;
535 gr[_REG_CPSR] = tf->tf_spsr;
541 * Set machine context.
543 * However, we don't set any but the user modifiable flags, and we won't
544 * touch the cs selector.
547 set_mcontext(struct thread *td, const mcontext_t *mcp)
549 struct trapframe *tf = td->td_frame;
550 const __greg_t *gr = mcp->__gregs;
552 tf->tf_r0 = gr[_REG_R0];
553 tf->tf_r1 = gr[_REG_R1];
554 tf->tf_r2 = gr[_REG_R2];
555 tf->tf_r3 = gr[_REG_R3];
556 tf->tf_r4 = gr[_REG_R4];
557 tf->tf_r5 = gr[_REG_R5];
558 tf->tf_r6 = gr[_REG_R6];
559 tf->tf_r7 = gr[_REG_R7];
560 tf->tf_r8 = gr[_REG_R8];
561 tf->tf_r9 = gr[_REG_R9];
562 tf->tf_r10 = gr[_REG_R10];
563 tf->tf_r11 = gr[_REG_R11];
564 tf->tf_r12 = gr[_REG_R12];
565 tf->tf_usr_sp = gr[_REG_SP];
566 tf->tf_usr_lr = gr[_REG_LR];
567 tf->tf_pc = gr[_REG_PC];
568 tf->tf_spsr = gr[_REG_CPSR];
579 struct sigreturn_args /* {
580 const struct __ucontext *sigcntxp;
583 struct proc *p = td->td_proc;
585 struct trapframe *tf;
590 if (copyin(uap->sigcntxp, &sf, sizeof(sf)))
593 * Make sure the processor mode has not been tampered with and
594 * interrupts have not been disabled.
596 spsr = sf.sf_uc.uc_mcontext.__gregs[_REG_CPSR];
597 if ((spsr & PSR_MODE) != PSR_USR32_MODE ||
598 (spsr & (I32_bit | F32_bit)) != 0)
600 /* Restore register context. */
602 set_mcontext(td, &sf.sf_uc.uc_mcontext);
604 /* Restore signal mask. */
606 td->td_sigmask = sf.sf_uc.uc_sigmask;
607 SIG_CANTMASK(td->td_sigmask);
611 return (EJUSTRETURN);
616 * Construct a PCB from a trapframe. This is called from kdb_trap() where
617 * we want to start a backtrace from the function that caused us to enter
618 * the debugger. We have the context in the trapframe, but base the trace
619 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
620 * enough for a backtrace.
623 makectx(struct trapframe *tf, struct pcb *pcb)
625 pcb->un_32.pcb32_r8 = tf->tf_r8;
626 pcb->un_32.pcb32_r9 = tf->tf_r9;
627 pcb->un_32.pcb32_r10 = tf->tf_r10;
628 pcb->un_32.pcb32_r11 = tf->tf_r11;
629 pcb->un_32.pcb32_r12 = tf->tf_r12;
630 pcb->un_32.pcb32_pc = tf->tf_pc;
631 pcb->un_32.pcb32_lr = tf->tf_usr_lr;
632 pcb->un_32.pcb32_sp = tf->tf_usr_sp;