2 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3 * Copyright (C) 1995, 1996 TooLs GmbH.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by TooLs GmbH.
17 * 4. The name of TooLs GmbH may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * Copyright (C) 2001 Benno Rice
33 * All rights reserved.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
44 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
49 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
50 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
51 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
52 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
53 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $
57 #include <sys/cdefs.h>
58 __FBSDID("$FreeBSD$");
60 #include "opt_compat.h"
62 #include "opt_kstack_pages.h"
63 #include "opt_msgbuf.h"
65 #include <sys/param.h>
67 #include <sys/systm.h>
73 #include <sys/eventhandler.h>
75 #include <sys/imgact.h>
77 #include <sys/kernel.h>
79 #include <sys/linker.h>
81 #include <sys/malloc.h>
83 #include <sys/msgbuf.h>
84 #include <sys/mutex.h>
85 #include <sys/ptrace.h>
86 #include <sys/reboot.h>
87 #include <sys/signalvar.h>
88 #include <sys/sysctl.h>
89 #include <sys/sysent.h>
90 #include <sys/sysproto.h>
91 #include <sys/ucontext.h>
93 #include <sys/vmmeter.h>
94 #include <sys/vnode.h>
96 #include <net/netisr.h>
99 #include <vm/vm_extern.h>
100 #include <vm/vm_kern.h>
101 #include <vm/vm_page.h>
102 #include <vm/vm_map.h>
103 #include <vm/vm_object.h>
104 #include <vm/vm_pager.h>
106 #include <machine/altivec.h>
107 #include <machine/bat.h>
108 #include <machine/cpu.h>
109 #include <machine/elf.h>
110 #include <machine/fpu.h>
111 #include <machine/hid.h>
112 #include <machine/kdb.h>
113 #include <machine/md_var.h>
114 #include <machine/metadata.h>
115 #include <machine/mmuvar.h>
116 #include <machine/pcb.h>
117 #include <machine/reg.h>
118 #include <machine/sigframe.h>
119 #include <machine/spr.h>
120 #include <machine/trap.h>
121 #include <machine/vmparam.h>
125 #include <dev/ofw/openfirm.h>
128 extern vm_offset_t ksym_start, ksym_end;
132 int cacheline_size = 32;
133 int hw_direct_map = 1;
135 struct pcpu __pcpu[MAXCPU];
137 static struct trapframe frame0;
139 char machine[] = "powerpc";
140 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
142 static void cpu_startup(void *);
143 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
145 SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size,
146 CTLFLAG_RD, &cacheline_size, 0, "");
148 u_int powerpc_init(u_int, u_int, u_int, void *);
150 int save_ofw_mapping(void);
151 int restore_ofw_mapping(void);
153 void install_extint(void (*)(void));
155 int setfault(faultbuf); /* defined in locore.S */
157 static int grab_mcontext(struct thread *, mcontext_t *, int);
159 void asm_panic(char *);
164 struct pmap ofw_pmap;
167 struct bat battable[16];
169 struct kva_md_info kmi;
172 powerpc_ofw_shutdown(void *junk, int howto)
174 if (howto & RB_HALT) {
181 cpu_startup(void *dummy)
185 * Initialise the decrementer-based clock.
190 * Good {morning,afternoon,evening,night}.
192 cpu_setup(PCPU_GET(cpuid));
197 printf("real memory = %ld (%ld MB)\n", ptoa(physmem),
198 ptoa(physmem) / 1048576);
202 printf("available KVA = %zd (%zd MB)\n",
203 virtual_end - virtual_avail,
204 (virtual_end - virtual_avail) / 1048576);
207 * Display any holes after the first chunk of extended memory.
212 printf("Physical memory chunk(s):\n");
213 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
214 int size1 = phys_avail[indx + 1] - phys_avail[indx];
216 printf("0x%08x - 0x%08x, %d bytes (%d pages)\n",
217 phys_avail[indx], phys_avail[indx + 1] - 1, size1,
222 vm_ksubmap_init(&kmi);
224 printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
225 ptoa(cnt.v_free_count) / 1048576);
228 * Set up buffers, so they can be used to read disk labels.
231 vm_pager_bufferinit();
233 EVENTHANDLER_REGISTER(shutdown_final, powerpc_ofw_shutdown, 0,
237 extern char kernel_text[], _end[];
239 extern void *testppc64, *testppc64size;
240 extern void *restorebridge, *restorebridgesize;
241 extern void *rfid_patch, *rfi_patch1, *rfi_patch2;
243 extern void *rstcode, *rstsize;
245 extern void *trapcode, *trapcode64, *trapsize;
246 extern void *alitrap, *alisize;
247 extern void *dsitrap, *dsisize;
248 extern void *decrint, *decrsize;
249 extern void *extint, *extsize;
250 extern void *dblow, *dbsize;
253 powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
261 uint32_t msr, scratch;
262 uint8_t *cache_check;
270 * Parse metadata if present and fetch parameters. Must be done
271 * before console is inited so cninit gets the right value of
275 preload_metadata = mdp;
276 kmdp = preload_search_by_type("elf kernel");
278 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
279 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
280 end = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
282 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
283 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
289 * Init params/tunables that can be overridden by the loader
294 * Start initializing proc0 and thread0.
296 proc_linkup0(&proc0, &thread0);
297 thread0.td_frame = &frame0;
300 * Set up per-cpu data.
303 pcpu_init(pc, 0, sizeof(struct pcpu));
304 pc->pc_curthread = &thread0;
307 __asm __volatile("mtsprg 0, %0" :: "r"(pc));
310 * Init mutexes, which we use heavily in PMAP
316 * Install the OF client interface
322 * Initialize the console before printing anything.
327 * Complain if there is no metadata.
329 if (mdp == NULL || kmdp == NULL) {
330 printf("powerpc_init: no loader metadata.\n");
340 * PowerPC 970 CPUs have a misfeature requested by Apple that makes
341 * them pretend they have a 32-byte cacheline. Turn this off
342 * before we measure the cacheline size.
345 switch (mfpvr() >> 16) {
350 scratch = mfspr64upper(SPR_HID5,msr);
351 scratch &= ~HID5_970_DCBZ_SIZE_HI;
352 mtspr64(SPR_HID5, scratch, mfspr(SPR_HID5), msr);
357 * Initialize the interrupt tables and figure out our cache line
358 * size and whether or not we need the 64-bit bridge code.
362 * Disable translation in case the vector area hasn't been
367 mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI);
371 * Measure the cacheline size using dcbz
373 * Use EXC_PGM as a playground. We are about to overwrite it
374 * anyway, we know it exists, and we know it is cache-aligned.
377 cache_check = (void *)EXC_PGM;
379 for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++)
380 cache_check[cacheline_size] = 0xff;
382 __asm __volatile("dcbz 0,%0":: "r" (cache_check) : "memory");
384 /* Find the first byte dcbz did not zero to get the cache line size */
385 for (cacheline_size = 0; cacheline_size < 0x100 &&
386 cache_check[cacheline_size] == 0; cacheline_size++);
388 /* Work around psim bug */
389 if (cacheline_size == 0) {
390 printf("WARNING: cacheline size undetermined, setting to 32\n");
395 * Figure out whether we need to use the 64 bit PMAP. This works by
396 * executing an instruction that is only legal on 64-bit PPC (mtmsrd),
397 * and setting ppc64 = 0 if that causes a trap.
402 bcopy(&testppc64, (void *)EXC_PGM, (size_t)&testppc64size);
403 __syncicache((void *)EXC_PGM, (size_t)&testppc64size);
411 : "=r"(scratch), "=r"(ppc64));
414 cpu_features |= PPC_FEATURE_64;
417 * Now copy restorebridge into all the handlers, if necessary,
418 * and set up the trap tables.
421 if (cpu_features & PPC_FEATURE_64) {
422 /* Patch the two instances of rfi -> rfid */
423 bcopy(&rfid_patch,&rfi_patch1,4);
425 /* rfi_patch2 is at the end of dbleave */
426 bcopy(&rfid_patch,&rfi_patch2,4);
430 * Copy a code snippet to restore 32-bit bridge mode
431 * to the top of every non-generic trap handler
434 trap_offset += (size_t)&restorebridgesize;
435 bcopy(&restorebridge, (void *)EXC_RST, trap_offset);
436 bcopy(&restorebridge, (void *)EXC_DSI, trap_offset);
437 bcopy(&restorebridge, (void *)EXC_ALI, trap_offset);
438 bcopy(&restorebridge, (void *)EXC_PGM, trap_offset);
439 bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset);
440 bcopy(&restorebridge, (void *)EXC_TRC, trap_offset);
441 bcopy(&restorebridge, (void *)EXC_BPT, trap_offset);
444 * Set the common trap entry point to the one that
445 * knows to restore 32-bit operation on execution.
448 generictrap = &trapcode64;
450 generictrap = &trapcode;
454 bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstsize);
456 bcopy(generictrap, (void *)EXC_RST, (size_t)&trapsize);
460 bcopy(&dblow, (void *)(EXC_MCHK + trap_offset), (size_t)&dbsize);
461 bcopy(&dblow, (void *)(EXC_PGM + trap_offset), (size_t)&dbsize);
462 bcopy(&dblow, (void *)(EXC_TRC + trap_offset), (size_t)&dbsize);
463 bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbsize);
465 bcopy(generictrap, (void *)EXC_MCHK, (size_t)&trapsize);
466 bcopy(generictrap, (void *)EXC_PGM, (size_t)&trapsize);
467 bcopy(generictrap, (void *)EXC_TRC, (size_t)&trapsize);
468 bcopy(generictrap, (void *)EXC_BPT, (size_t)&trapsize);
470 bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsisize);
471 bcopy(&alitrap, (void *)(EXC_ALI + trap_offset), (size_t)&alisize);
472 bcopy(generictrap, (void *)EXC_ISI, (size_t)&trapsize);
473 bcopy(generictrap, (void *)EXC_EXI, (size_t)&trapsize);
474 bcopy(generictrap, (void *)EXC_FPU, (size_t)&trapsize);
475 bcopy(generictrap, (void *)EXC_DECR, (size_t)&trapsize);
476 bcopy(generictrap, (void *)EXC_SC, (size_t)&trapsize);
477 bcopy(generictrap, (void *)EXC_FPA, (size_t)&trapsize);
478 bcopy(generictrap, (void *)EXC_VEC, (size_t)&trapsize);
479 bcopy(generictrap, (void *)EXC_VECAST, (size_t)&trapsize);
480 bcopy(generictrap, (void *)EXC_THRM, (size_t)&trapsize);
481 __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD);
490 * Choose a platform module so we can get the physical memory map.
493 platform_probe_and_attach();
496 * Initialise virtual memory. Use BUS_PROBE_GENERIC priority
497 * in case the platform module had a better idea of what we
500 if (cpu_features & PPC_FEATURE_64)
501 pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC);
503 pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC);
505 pmap_bootstrap(startkernel, endkernel);
506 mtmsr(mfmsr() | PSL_IR|PSL_DR|PSL_ME|PSL_RI);
510 * Initialize params/tunables that are derived from memsize
512 init_param2(physmem);
515 * Grab booted kernel's name
517 env = getenv("kernelname");
519 strlcpy(kernelname, env, sizeof(kernelname));
524 * Finish setting up thread0.
526 thread0.td_pcb = (struct pcb *)
527 ((thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE -
528 sizeof(struct pcb)) & ~15);
529 bzero((void *)thread0.td_pcb, sizeof(struct pcb));
530 pc->pc_curpcb = thread0.td_pcb;
532 /* Initialise the message buffer. */
533 msgbufinit(msgbufp, MSGBUF_SIZE);
536 if (boothowto & RB_KDB)
537 kdb_enter(KDB_WHY_BOOTFLAGS,
538 "Boot flags requested debugger");
541 return (((uintptr_t)thread0.td_pcb - 16) & ~15);
545 bzero(void *buf, size_t len)
551 while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
556 while (len >= sizeof(u_long) * 8) {
558 *((u_long*) p + 1) = 0;
559 *((u_long*) p + 2) = 0;
560 *((u_long*) p + 3) = 0;
561 len -= sizeof(u_long) * 8;
562 *((u_long*) p + 4) = 0;
563 *((u_long*) p + 5) = 0;
564 *((u_long*) p + 6) = 0;
565 *((u_long*) p + 7) = 0;
566 p += sizeof(u_long) * 8;
569 while (len >= sizeof(u_long)) {
571 len -= sizeof(u_long);
582 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
584 struct trapframe *tf;
585 struct sigframe *sfp;
590 int oonstack, rndfsize;
596 PROC_LOCK_ASSERT(p, MA_OWNED);
597 sig = ksi->ksi_signo;
598 code = ksi->ksi_code;
600 mtx_assert(&psp->ps_mtx, MA_OWNED);
602 oonstack = sigonstack(tf->fixreg[1]);
604 rndfsize = ((sizeof(sf) + 15) / 16) * 16;
606 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
612 memset(&sf, 0, sizeof(sf));
613 grab_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
614 sf.sf_uc.uc_sigmask = *mask;
615 sf.sf_uc.uc_stack = td->td_sigstk;
616 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
617 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
619 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
622 * Allocate and validate space for the signal handler context.
624 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
625 SIGISMEMBER(psp->ps_sigonstack, sig)) {
626 sfp = (struct sigframe *)(td->td_sigstk.ss_sp +
627 td->td_sigstk.ss_size - rndfsize);
629 sfp = (struct sigframe *)(tf->fixreg[1] - rndfsize);
633 * Translate the signal if appropriate (Linux emu ?)
635 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
636 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
639 * Save the floating-point state, if necessary, then copy it.
644 * Set up the registers to return to sigcode.
646 * r1/sp - sigframe ptr
647 * lr - sig function, dispatched to by blrl in trampoline
649 * r4 - SIGINFO ? &siginfo : exception code
651 * srr0 - trampoline function addr
653 tf->lr = (register_t)catcher;
654 tf->fixreg[1] = (register_t)sfp;
655 tf->fixreg[FIRSTARG] = sig;
656 tf->fixreg[FIRSTARG+2] = (register_t)&sfp->sf_uc;
657 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
659 * Signal handler installed with SA_SIGINFO.
661 tf->fixreg[FIRSTARG+1] = (register_t)&sfp->sf_si;
664 * Fill siginfo structure.
666 sf.sf_si = ksi->ksi_info;
667 sf.sf_si.si_signo = sig;
668 sf.sf_si.si_addr = (void *)((tf->exc == EXC_DSI) ?
669 tf->cpu.aim.dar : tf->srr0);
671 /* Old FreeBSD-style arguments. */
672 tf->fixreg[FIRSTARG+1] = code;
673 tf->fixreg[FIRSTARG+3] = (tf->exc == EXC_DSI) ?
674 tf->cpu.aim.dar : tf->srr0;
676 mtx_unlock(&psp->ps_mtx);
679 tf->srr0 = (register_t)(PS_STRINGS - *(p->p_sysent->sv_szsigcode));
682 * copy the frame out to userland.
684 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
686 * Process has trashed its stack. Kill it.
688 CTR2(KTR_SIG, "sendsig: sigexit td=%p sfp=%p", td, sfp);
693 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td,
694 tf->srr0, tf->fixreg[1]);
697 mtx_lock(&psp->ps_mtx);
701 sigreturn(struct thread *td, struct sigreturn_args *uap)
706 CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp);
708 if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) {
709 CTR1(KTR_SIG, "sigreturn: efault td=%p", td);
713 error = set_mcontext(td, &uc.uc_mcontext);
717 kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
719 CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x",
720 td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]);
722 return (EJUSTRETURN);
725 #ifdef COMPAT_FREEBSD4
727 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
730 return sigreturn(td, (struct sigreturn_args *)uap);
735 * Construct a PCB from a trapframe. This is called from kdb_trap() where
736 * we want to start a backtrace from the function that caused us to enter
737 * the debugger. We have the context in the trapframe, but base the trace
738 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
739 * enough for a backtrace.
742 makectx(struct trapframe *tf, struct pcb *pcb)
745 pcb->pcb_lr = tf->srr0;
746 pcb->pcb_sp = tf->fixreg[1];
750 * get_mcontext/sendsig helper routine that doesn't touch the
754 grab_mcontext(struct thread *td, mcontext_t *mcp, int flags)
760 memset(mcp, 0, sizeof(mcontext_t));
762 mcp->mc_vers = _MC_VERSION;
764 memcpy(&mcp->mc_frame, td->td_frame, sizeof(struct trapframe));
765 if (flags & GET_MC_CLEAR_RET) {
771 * This assumes that floating-point context is *not* lazy,
772 * so if the thread has used FP there would have been a
773 * FP-unavailable exception that would have set things up
776 if (pcb->pcb_flags & PCB_FPU) {
777 KASSERT(td == curthread,
778 ("get_mcontext: fp save not curthread"));
782 mcp->mc_flags |= _MC_FP_VALID;
783 memcpy(&mcp->mc_fpscr, &pcb->pcb_fpu.fpscr, sizeof(double));
784 memcpy(mcp->mc_fpreg, pcb->pcb_fpu.fpr, 32*sizeof(double));
788 * Repeat for Altivec context
791 if (pcb->pcb_flags & PCB_VEC) {
792 KASSERT(td == curthread,
793 ("get_mcontext: fp save not curthread"));
797 mcp->mc_flags |= _MC_AV_VALID;
798 mcp->mc_vscr = pcb->pcb_vec.vscr;
799 mcp->mc_vrsave = pcb->pcb_vec.vrsave;
800 memcpy(mcp->mc_avec, pcb->pcb_vec.vr, sizeof(mcp->mc_avec));
803 mcp->mc_len = sizeof(*mcp);
809 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
813 error = grab_mcontext(td, mcp, flags);
815 PROC_LOCK(curthread->td_proc);
816 mcp->mc_onstack = sigonstack(td->td_frame->fixreg[1]);
817 PROC_UNLOCK(curthread->td_proc);
824 set_mcontext(struct thread *td, const mcontext_t *mcp)
827 struct trapframe *tf;
832 if (mcp->mc_vers != _MC_VERSION ||
833 mcp->mc_len != sizeof(*mcp))
837 * Don't let the user set privileged MSR bits
839 if ((mcp->mc_srr1 & PSL_USERSTATIC) != (tf->srr1 & PSL_USERSTATIC)) {
843 memcpy(tf, mcp->mc_frame, sizeof(mcp->mc_frame));
845 if (mcp->mc_flags & _MC_FP_VALID) {
846 if ((pcb->pcb_flags & PCB_FPU) != PCB_FPU) {
851 memcpy(&pcb->pcb_fpu.fpscr, &mcp->mc_fpscr, sizeof(double));
852 memcpy(pcb->pcb_fpu.fpr, mcp->mc_fpreg, 32*sizeof(double));
855 if (mcp->mc_flags & _MC_AV_VALID) {
856 if ((pcb->pcb_flags & PCB_VEC) != PCB_VEC) {
861 pcb->pcb_vec.vscr = mcp->mc_vscr;
862 pcb->pcb_vec.vrsave = mcp->mc_vrsave;
863 memcpy(pcb->pcb_vec.vr, mcp->mc_avec, sizeof(mcp->mc_avec));
876 * Flush the D-cache for non-DMA I/O so that the I-cache can
877 * be made coherent later.
880 cpu_flush_dcache(void *ptr, size_t len)
895 * Shutdown the CPU as much as possible.
912 if ((msr & PSL_EE) != PSL_EE) {
913 struct thread *td = curthread;
914 printf("td msr %x\n", td->td_md.md_saved_msr);
915 panic("ints disabled in idleproc!");
918 if (powerpc_pow_enabled) {
920 mtmsr(msr | PSL_POW);
926 cpu_idle_wakeup(int cpu)
933 * Set set up registers on exec.
936 exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
938 struct trapframe *tf;
939 struct ps_strings arginfo;
942 bzero(tf, sizeof *tf);
943 tf->fixreg[1] = -roundup(-stack + 8, 16);
946 * XXX Machine-independent code has already copied arguments and
947 * XXX environment to userland. Get them back here.
949 (void)copyin((char *)PS_STRINGS, &arginfo, sizeof(arginfo));
952 * Set up arguments for _start():
953 * _start(argc, argv, envp, obj, cleanup, ps_strings);
956 * - obj and cleanup are the auxilliary and termination
957 * vectors. They are fixed up by ld.elf_so.
958 * - ps_strings is a NetBSD extention, and will be
959 * ignored by executables which are strictly
960 * compliant with the SVR4 ABI.
962 * XXX We have to set both regs and retval here due to different
963 * XXX calling convention in trap.c and init_main.c.
966 * XXX PG: these get overwritten in the syscall return code.
967 * execve() should return EJUSTRETURN, like it does on NetBSD.
968 * Emulate by setting the syscall return value cells. The
969 * registers still have to be set for init's fork trampoline.
971 td->td_retval[0] = arginfo.ps_nargvstr;
972 td->td_retval[1] = (register_t)arginfo.ps_argvstr;
973 tf->fixreg[3] = arginfo.ps_nargvstr;
974 tf->fixreg[4] = (register_t)arginfo.ps_argvstr;
975 tf->fixreg[5] = (register_t)arginfo.ps_envstr;
976 tf->fixreg[6] = 0; /* auxillary vector */
977 tf->fixreg[7] = 0; /* termination vector */
978 tf->fixreg[8] = (register_t)PS_STRINGS; /* NetBSD extension */
981 tf->srr1 = PSL_MBO | PSL_USERSET | PSL_FE_DFLT;
982 td->td_pcb->pcb_flags = 0;
986 fill_regs(struct thread *td, struct reg *regs)
988 struct trapframe *tf;
991 memcpy(regs, tf, sizeof(struct reg));
997 fill_dbregs(struct thread *td, struct dbreg *dbregs)
999 /* No debug registers on PowerPC */
1004 fill_fpregs(struct thread *td, struct fpreg *fpregs)
1010 if ((pcb->pcb_flags & PCB_FPU) == 0)
1011 memset(fpregs, 0, sizeof(struct fpreg));
1013 memcpy(fpregs, &pcb->pcb_fpu, sizeof(struct fpreg));
1019 set_regs(struct thread *td, struct reg *regs)
1021 struct trapframe *tf;
1024 memcpy(tf, regs, sizeof(struct reg));
1030 set_dbregs(struct thread *td, struct dbreg *dbregs)
1032 /* No debug registers on PowerPC */
1037 set_fpregs(struct thread *td, struct fpreg *fpregs)
1042 if ((pcb->pcb_flags & PCB_FPU) == 0)
1044 memcpy(&pcb->pcb_fpu, fpregs, sizeof(struct fpreg));
1050 ptrace_set_pc(struct thread *td, unsigned long addr)
1052 struct trapframe *tf;
1055 tf->srr0 = (register_t)addr;
1061 ptrace_single_step(struct thread *td)
1063 struct trapframe *tf;
1072 ptrace_clear_single_step(struct thread *td)
1074 struct trapframe *tf;
1077 tf->srr1 &= ~PSL_SE;
1083 kdb_cpu_clear_singlestep(void)
1086 kdb_frame->srr1 &= ~PSL_SE;
1090 kdb_cpu_set_singlestep(void)
1093 kdb_frame->srr1 |= PSL_SE;
1097 * Initialise a struct pcpu.
1100 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz)
1106 spinlock_enter(void)
1111 if (td->td_md.md_spinlock_count == 0)
1112 td->td_md.md_saved_msr = intr_disable();
1113 td->td_md.md_spinlock_count++;
1124 td->td_md.md_spinlock_count--;
1125 if (td->td_md.md_spinlock_count == 0)
1126 intr_restore(td->td_md.md_saved_msr);
1130 * kcopy(const void *src, void *dst, size_t len);
1132 * Copy len bytes from src to dst, aborting if we encounter a fatal
1135 * kcopy() _must_ save and restore the old fault handler since it is
1136 * called by uiomove(), which may be in the path of servicing a non-fatal
1140 kcopy(const void *src, void *dst, size_t len)
1143 faultbuf env, *oldfault;
1146 td = PCPU_GET(curthread);
1147 oldfault = td->td_pcb->pcb_onfault;
1148 if ((rv = setfault(env)) != 0) {
1149 td->td_pcb->pcb_onfault = oldfault;
1153 memcpy(dst, src, len);
1155 td->td_pcb->pcb_onfault = oldfault;
1160 asm_panic(char *pstr)
1165 int db_trap_glue(struct trapframe *); /* Called from trap_subr.S */
1168 db_trap_glue(struct trapframe *frame)
1170 if (!(frame->srr1 & PSL_PR)
1171 && (frame->exc == EXC_TRC || frame->exc == EXC_RUNMODETRC
1172 || (frame->exc == EXC_PGM
1173 && (frame->srr1 & 0x20000))
1174 || frame->exc == EXC_BPT
1175 || frame->exc == EXC_DSI)) {
1176 int type = frame->exc;
1177 if (type == EXC_PGM && (frame->srr1 & 0x20000)) {
1178 type = T_BREAKPOINT;
1180 return (kdb_trap(type, 0, frame));