2 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3 * Copyright (C) 1995, 1996 TooLs GmbH.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by TooLs GmbH.
17 * 4. The name of TooLs GmbH may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * Copyright (C) 2001 Benno Rice
33 * All rights reserved.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
44 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
49 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
50 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
51 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
52 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
53 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $
57 #include <sys/cdefs.h>
58 __FBSDID("$FreeBSD$");
60 #include "opt_compat.h"
62 #include "opt_kstack_pages.h"
63 #include "opt_msgbuf.h"
65 #include <sys/param.h>
67 #include <sys/systm.h>
73 #include <sys/eventhandler.h>
75 #include <sys/imgact.h>
77 #include <sys/kernel.h>
79 #include <sys/linker.h>
81 #include <sys/malloc.h>
83 #include <sys/msgbuf.h>
84 #include <sys/mutex.h>
85 #include <sys/ptrace.h>
86 #include <sys/reboot.h>
87 #include <sys/signalvar.h>
88 #include <sys/sysctl.h>
89 #include <sys/sysent.h>
90 #include <sys/sysproto.h>
91 #include <sys/ucontext.h>
93 #include <sys/vmmeter.h>
94 #include <sys/vnode.h>
96 #include <net/netisr.h>
99 #include <vm/vm_extern.h>
100 #include <vm/vm_kern.h>
101 #include <vm/vm_page.h>
102 #include <vm/vm_map.h>
103 #include <vm/vm_object.h>
104 #include <vm/vm_pager.h>
106 #include <machine/altivec.h>
107 #include <machine/bat.h>
108 #include <machine/cpu.h>
109 #include <machine/elf.h>
110 #include <machine/fpu.h>
111 #include <machine/hid.h>
112 #include <machine/kdb.h>
113 #include <machine/md_var.h>
114 #include <machine/metadata.h>
115 #include <machine/mmuvar.h>
116 #include <machine/pcb.h>
117 #include <machine/reg.h>
118 #include <machine/sigframe.h>
119 #include <machine/spr.h>
120 #include <machine/trap.h>
121 #include <machine/vmparam.h>
125 #include <dev/ofw/openfirm.h>
128 extern vm_offset_t ksym_start, ksym_end;
132 int cacheline_size = 32;
134 int hw_direct_map = 1;
136 struct pcpu __pcpu[MAXCPU];
138 static struct trapframe frame0;
140 char machine[] = "powerpc";
141 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
143 static void cpu_startup(void *);
144 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
146 SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size,
147 CTLFLAG_RD, &cacheline_size, 0, "");
149 u_int powerpc_init(u_int, u_int, u_int, void *);
151 int save_ofw_mapping(void);
152 int restore_ofw_mapping(void);
154 void install_extint(void (*)(void));
156 int setfault(faultbuf); /* defined in locore.S */
158 static int grab_mcontext(struct thread *, mcontext_t *, int);
160 void asm_panic(char *);
165 struct pmap ofw_pmap;
168 struct bat battable[16];
170 struct kva_md_info kmi;
173 powerpc_ofw_shutdown(void *junk, int howto)
175 if (howto & RB_HALT) {
182 cpu_startup(void *dummy)
186 * Initialise the decrementer-based clock.
191 * Good {morning,afternoon,evening,night}.
193 cpu_setup(PCPU_GET(cpuid));
198 printf("real memory = %ld (%ld MB)\n", ptoa(physmem),
199 ptoa(physmem) / 1048576);
203 * Display any holes after the first chunk of extended memory.
208 printf("Physical memory chunk(s):\n");
209 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
210 int size1 = phys_avail[indx + 1] - phys_avail[indx];
212 printf("0x%08x - 0x%08x, %d bytes (%d pages)\n",
213 phys_avail[indx], phys_avail[indx + 1] - 1, size1,
218 vm_ksubmap_init(&kmi);
220 printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
221 ptoa(cnt.v_free_count) / 1048576);
224 * Set up buffers, so they can be used to read disk labels.
227 vm_pager_bufferinit();
229 EVENTHANDLER_REGISTER(shutdown_final, powerpc_ofw_shutdown, 0,
233 extern char kernel_text[], _end[];
235 extern void *testppc64, *testppc64size;
236 extern void *restorebridge, *restorebridgesize;
237 extern void *rfid_patch, *rfi_patch1, *rfi_patch2;
239 extern void *rstcode, *rstsize;
241 extern void *trapcode, *trapcode64, *trapsize;
242 extern void *alitrap, *alisize;
243 extern void *dsitrap, *dsisize;
244 extern void *decrint, *decrsize;
245 extern void *extint, *extsize;
246 extern void *dblow, *dbsize;
249 powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
257 uint32_t msr, scratch;
258 uint8_t *cache_check;
265 * Parse metadata if present and fetch parameters. Must be done
266 * before console is inited so cninit gets the right value of
270 preload_metadata = mdp;
271 kmdp = preload_search_by_type("elf kernel");
273 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
274 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
275 end = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
277 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
278 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
284 * Init params/tunables that can be overridden by the loader
289 * Start initializing proc0 and thread0.
291 proc_linkup0(&proc0, &thread0);
292 thread0.td_frame = &frame0;
295 * Set up per-cpu data.
298 pcpu_init(pc, 0, sizeof(struct pcpu));
299 pc->pc_curthread = &thread0;
302 __asm __volatile("mtsprg 0, %0" :: "r"(pc));
305 * Init mutexes, which we use heavily in PMAP
311 * Install the OF client interface
317 * Initialize the console before printing anything.
322 * Complain if there is no metadata.
324 if (mdp == NULL || kmdp == NULL) {
325 printf("powerpc_init: no loader metadata.\n");
335 * PowerPC 970 CPUs have a misfeature requested by Apple that makes
336 * them pretend they have a 32-byte cacheline. Turn this off
337 * before we measure the cacheline size.
340 switch (mfpvr() >> 16) {
345 scratch = mfspr64upper(SPR_HID5,msr);
346 scratch &= ~HID5_970_DCBZ_SIZE_HI;
347 mtspr64(SPR_HID5, scratch, mfspr(SPR_HID5), msr);
352 * Initialize the interrupt tables and figure out our cache line
353 * size and whether or not we need the 64-bit bridge code.
357 * Disable translation in case the vector area hasn't been
362 mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI);
366 * Measure the cacheline size using dcbz
368 * Use EXC_PGM as a playground. We are about to overwrite it
369 * anyway, we know it exists, and we know it is cache-aligned.
372 cache_check = (void *)EXC_PGM;
374 for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++)
375 cache_check[cacheline_size] = 0xff;
377 __asm __volatile("dcbz %0,0":: "r" (cache_check) : "memory");
379 /* Find the first byte dcbz did not zero to get the cache line size */
380 for (cacheline_size = 0; cacheline_size < 0x100 &&
381 cache_check[cacheline_size] == 0; cacheline_size++);
383 /* Work around psim bug */
384 if (cacheline_size == 0) {
385 printf("WARNING: cacheline size undetermined, setting to 32\n");
390 * Figure out whether we need to use the 64 bit PMAP. This works by
391 * executing an instruction that is only legal on 64-bit PPC (mtmsrd),
392 * and setting ppc64 = 0 if that causes a trap.
397 bcopy(&testppc64, (void *)EXC_PGM, (size_t)&testppc64size);
398 __syncicache((void *)EXC_PGM, (size_t)&testppc64size);
406 : "=r"(scratch), "=r"(ppc64));
409 * Now copy restorebridge into all the handlers, if necessary,
410 * and set up the trap tables.
414 /* Patch the two instances of rfi -> rfid */
415 bcopy(&rfid_patch,&rfi_patch1,4);
417 /* rfi_patch2 is at the end of dbleave */
418 bcopy(&rfid_patch,&rfi_patch2,4);
422 * Copy a code snippet to restore 32-bit bridge mode
423 * to the top of every non-generic trap handler
426 trap_offset += (size_t)&restorebridgesize;
427 bcopy(&restorebridge, (void *)EXC_RST, trap_offset);
428 bcopy(&restorebridge, (void *)EXC_DSI, trap_offset);
429 bcopy(&restorebridge, (void *)EXC_ALI, trap_offset);
430 bcopy(&restorebridge, (void *)EXC_PGM, trap_offset);
431 bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset);
432 bcopy(&restorebridge, (void *)EXC_TRC, trap_offset);
433 bcopy(&restorebridge, (void *)EXC_BPT, trap_offset);
436 * Set the common trap entry point to the one that
437 * knows to restore 32-bit operation on execution.
440 generictrap = &trapcode64;
442 generictrap = &trapcode;
446 bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstsize);
448 bcopy(generictrap, (void *)EXC_RST, (size_t)&trapsize);
452 bcopy(&dblow, (void *)(EXC_MCHK + trap_offset), (size_t)&dbsize);
453 bcopy(&dblow, (void *)(EXC_PGM + trap_offset), (size_t)&dbsize);
454 bcopy(&dblow, (void *)(EXC_TRC + trap_offset), (size_t)&dbsize);
455 bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbsize);
457 bcopy(generictrap, (void *)EXC_MCHK, (size_t)&trapsize);
458 bcopy(generictrap, (void *)EXC_PGM, (size_t)&trapsize);
459 bcopy(generictrap, (void *)EXC_TRC, (size_t)&trapsize);
460 bcopy(generictrap, (void *)EXC_BPT, (size_t)&trapsize);
462 bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsisize);
463 bcopy(&alitrap, (void *)(EXC_ALI + trap_offset), (size_t)&alisize);
464 bcopy(generictrap, (void *)EXC_ISI, (size_t)&trapsize);
465 bcopy(generictrap, (void *)EXC_EXI, (size_t)&trapsize);
466 bcopy(generictrap, (void *)EXC_FPU, (size_t)&trapsize);
467 bcopy(generictrap, (void *)EXC_DECR, (size_t)&trapsize);
468 bcopy(generictrap, (void *)EXC_SC, (size_t)&trapsize);
469 bcopy(generictrap, (void *)EXC_FPA, (size_t)&trapsize);
470 bcopy(generictrap, (void *)EXC_VEC, (size_t)&trapsize);
471 bcopy(generictrap, (void *)EXC_VECAST, (size_t)&trapsize);
472 bcopy(generictrap, (void *)EXC_THRM, (size_t)&trapsize);
473 __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD);
482 * Choose a platform module so we can get the physical memory map.
485 platform_probe_and_attach();
488 * Initialise virtual memory. Use BUS_PROBE_GENERIC priority
489 * in case the platform module had a better idea of what we
493 pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC);
495 pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC);
497 pmap_bootstrap(startkernel, endkernel);
498 mtmsr(mfmsr() | PSL_IR|PSL_DR|PSL_ME|PSL_RI);
502 * Initialize params/tunables that are derived from memsize
504 init_param2(physmem);
507 * Grab booted kernel's name
509 env = getenv("kernelname");
511 strlcpy(kernelname, env, sizeof(kernelname));
516 * Finish setting up thread0.
518 thread0.td_pcb = (struct pcb *)
519 ((thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE -
520 sizeof(struct pcb)) & ~15);
521 bzero((void *)thread0.td_pcb, sizeof(struct pcb));
522 pc->pc_curpcb = thread0.td_pcb;
524 /* Initialise the message buffer. */
525 msgbufinit(msgbufp, MSGBUF_SIZE);
528 if (boothowto & RB_KDB)
529 kdb_enter(KDB_WHY_BOOTFLAGS,
530 "Boot flags requested debugger");
533 return (((uintptr_t)thread0.td_pcb - 16) & ~15);
537 bzero(void *buf, size_t len)
543 while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
548 while (len >= sizeof(u_long) * 8) {
550 *((u_long*) p + 1) = 0;
551 *((u_long*) p + 2) = 0;
552 *((u_long*) p + 3) = 0;
553 len -= sizeof(u_long) * 8;
554 *((u_long*) p + 4) = 0;
555 *((u_long*) p + 5) = 0;
556 *((u_long*) p + 6) = 0;
557 *((u_long*) p + 7) = 0;
558 p += sizeof(u_long) * 8;
561 while (len >= sizeof(u_long)) {
563 len -= sizeof(u_long);
574 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
576 struct trapframe *tf;
577 struct sigframe *sfp;
582 int oonstack, rndfsize;
588 PROC_LOCK_ASSERT(p, MA_OWNED);
589 sig = ksi->ksi_signo;
590 code = ksi->ksi_code;
592 mtx_assert(&psp->ps_mtx, MA_OWNED);
594 oonstack = sigonstack(tf->fixreg[1]);
596 rndfsize = ((sizeof(sf) + 15) / 16) * 16;
598 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
604 memset(&sf, 0, sizeof(sf));
605 grab_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
606 sf.sf_uc.uc_sigmask = *mask;
607 sf.sf_uc.uc_stack = td->td_sigstk;
608 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
609 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
611 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
614 * Allocate and validate space for the signal handler context.
616 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
617 SIGISMEMBER(psp->ps_sigonstack, sig)) {
618 sfp = (struct sigframe *)(td->td_sigstk.ss_sp +
619 td->td_sigstk.ss_size - rndfsize);
621 sfp = (struct sigframe *)(tf->fixreg[1] - rndfsize);
625 * Translate the signal if appropriate (Linux emu ?)
627 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
628 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
631 * Save the floating-point state, if necessary, then copy it.
636 * Set up the registers to return to sigcode.
638 * r1/sp - sigframe ptr
639 * lr - sig function, dispatched to by blrl in trampoline
641 * r4 - SIGINFO ? &siginfo : exception code
643 * srr0 - trampoline function addr
645 tf->lr = (register_t)catcher;
646 tf->fixreg[1] = (register_t)sfp;
647 tf->fixreg[FIRSTARG] = sig;
648 tf->fixreg[FIRSTARG+2] = (register_t)&sfp->sf_uc;
649 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
651 * Signal handler installed with SA_SIGINFO.
653 tf->fixreg[FIRSTARG+1] = (register_t)&sfp->sf_si;
656 * Fill siginfo structure.
658 sf.sf_si = ksi->ksi_info;
659 sf.sf_si.si_signo = sig;
660 sf.sf_si.si_addr = (void *)((tf->exc == EXC_DSI) ?
661 tf->cpu.aim.dar : tf->srr0);
663 /* Old FreeBSD-style arguments. */
664 tf->fixreg[FIRSTARG+1] = code;
665 tf->fixreg[FIRSTARG+3] = (tf->exc == EXC_DSI) ?
666 tf->cpu.aim.dar : tf->srr0;
668 mtx_unlock(&psp->ps_mtx);
671 tf->srr0 = (register_t)(PS_STRINGS - *(p->p_sysent->sv_szsigcode));
674 * copy the frame out to userland.
676 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
678 * Process has trashed its stack. Kill it.
680 CTR2(KTR_SIG, "sendsig: sigexit td=%p sfp=%p", td, sfp);
685 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td,
686 tf->srr0, tf->fixreg[1]);
689 mtx_lock(&psp->ps_mtx);
693 sigreturn(struct thread *td, struct sigreturn_args *uap)
699 CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp);
701 if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) {
702 CTR1(KTR_SIG, "sigreturn: efault td=%p", td);
706 error = set_mcontext(td, &uc.uc_mcontext);
712 td->td_sigmask = uc.uc_sigmask;
713 SIG_CANTMASK(td->td_sigmask);
717 CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x",
718 td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]);
720 return (EJUSTRETURN);
723 #ifdef COMPAT_FREEBSD4
725 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
728 return sigreturn(td, (struct sigreturn_args *)uap);
733 * Construct a PCB from a trapframe. This is called from kdb_trap() where
734 * we want to start a backtrace from the function that caused us to enter
735 * the debugger. We have the context in the trapframe, but base the trace
736 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
737 * enough for a backtrace.
740 makectx(struct trapframe *tf, struct pcb *pcb)
743 pcb->pcb_lr = tf->srr0;
744 pcb->pcb_sp = tf->fixreg[1];
748 * get_mcontext/sendsig helper routine that doesn't touch the
752 grab_mcontext(struct thread *td, mcontext_t *mcp, int flags)
758 memset(mcp, 0, sizeof(mcontext_t));
760 mcp->mc_vers = _MC_VERSION;
762 memcpy(&mcp->mc_frame, td->td_frame, sizeof(struct trapframe));
763 if (flags & GET_MC_CLEAR_RET) {
769 * This assumes that floating-point context is *not* lazy,
770 * so if the thread has used FP there would have been a
771 * FP-unavailable exception that would have set things up
774 if (pcb->pcb_flags & PCB_FPU) {
775 KASSERT(td == curthread,
776 ("get_mcontext: fp save not curthread"));
780 mcp->mc_flags |= _MC_FP_VALID;
781 memcpy(&mcp->mc_fpscr, &pcb->pcb_fpu.fpscr, sizeof(double));
782 memcpy(mcp->mc_fpreg, pcb->pcb_fpu.fpr, 32*sizeof(double));
786 * Repeat for Altivec context
789 if (pcb->pcb_flags & PCB_VEC) {
790 KASSERT(td == curthread,
791 ("get_mcontext: fp save not curthread"));
795 mcp->mc_flags |= _MC_AV_VALID;
796 mcp->mc_vscr = pcb->pcb_vec.vscr;
797 mcp->mc_vrsave = pcb->pcb_vec.vrsave;
798 memcpy(mcp->mc_avec, pcb->pcb_vec.vr, sizeof(mcp->mc_avec));
801 mcp->mc_len = sizeof(*mcp);
807 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
811 error = grab_mcontext(td, mcp, flags);
813 PROC_LOCK(curthread->td_proc);
814 mcp->mc_onstack = sigonstack(td->td_frame->fixreg[1]);
815 PROC_UNLOCK(curthread->td_proc);
822 set_mcontext(struct thread *td, const mcontext_t *mcp)
825 struct trapframe *tf;
830 if (mcp->mc_vers != _MC_VERSION ||
831 mcp->mc_len != sizeof(*mcp))
835 * Don't let the user set privileged MSR bits
837 if ((mcp->mc_srr1 & PSL_USERSTATIC) != (tf->srr1 & PSL_USERSTATIC)) {
841 memcpy(tf, mcp->mc_frame, sizeof(mcp->mc_frame));
843 if (mcp->mc_flags & _MC_FP_VALID) {
844 if ((pcb->pcb_flags & PCB_FPU) != PCB_FPU) {
849 memcpy(&pcb->pcb_fpu.fpscr, &mcp->mc_fpscr, sizeof(double));
850 memcpy(pcb->pcb_fpu.fpr, mcp->mc_fpreg, 32*sizeof(double));
853 if (mcp->mc_flags & _MC_AV_VALID) {
854 if ((pcb->pcb_flags & PCB_VEC) != PCB_VEC) {
859 pcb->pcb_vec.vscr = mcp->mc_vscr;
860 pcb->pcb_vec.vrsave = mcp->mc_vrsave;
861 memcpy(pcb->pcb_vec.vr, mcp->mc_avec, sizeof(mcp->mc_avec));
874 * Flush the D-cache for non-DMA I/O so that the I-cache can
875 * be made coherent later.
878 cpu_flush_dcache(void *ptr, size_t len)
891 * Shutdown the CPU as much as possible.
908 if ((msr & PSL_EE) != PSL_EE) {
909 struct thread *td = curthread;
910 printf("td msr %x\n", td->td_md.md_saved_msr);
911 panic("ints disabled in idleproc!");
914 if (powerpc_pow_enabled) {
916 mtmsr(msr | PSL_POW);
922 cpu_idle_wakeup(int cpu)
929 * Set set up registers on exec.
932 exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
934 struct trapframe *tf;
935 struct ps_strings arginfo;
938 bzero(tf, sizeof *tf);
939 tf->fixreg[1] = -roundup(-stack + 8, 16);
942 * XXX Machine-independent code has already copied arguments and
943 * XXX environment to userland. Get them back here.
945 (void)copyin((char *)PS_STRINGS, &arginfo, sizeof(arginfo));
948 * Set up arguments for _start():
949 * _start(argc, argv, envp, obj, cleanup, ps_strings);
952 * - obj and cleanup are the auxilliary and termination
953 * vectors. They are fixed up by ld.elf_so.
954 * - ps_strings is a NetBSD extention, and will be
955 * ignored by executables which are strictly
956 * compliant with the SVR4 ABI.
958 * XXX We have to set both regs and retval here due to different
959 * XXX calling convention in trap.c and init_main.c.
962 * XXX PG: these get overwritten in the syscall return code.
963 * execve() should return EJUSTRETURN, like it does on NetBSD.
964 * Emulate by setting the syscall return value cells. The
965 * registers still have to be set for init's fork trampoline.
967 td->td_retval[0] = arginfo.ps_nargvstr;
968 td->td_retval[1] = (register_t)arginfo.ps_argvstr;
969 tf->fixreg[3] = arginfo.ps_nargvstr;
970 tf->fixreg[4] = (register_t)arginfo.ps_argvstr;
971 tf->fixreg[5] = (register_t)arginfo.ps_envstr;
972 tf->fixreg[6] = 0; /* auxillary vector */
973 tf->fixreg[7] = 0; /* termination vector */
974 tf->fixreg[8] = (register_t)PS_STRINGS; /* NetBSD extension */
977 tf->srr1 = PSL_MBO | PSL_USERSET | PSL_FE_DFLT;
978 td->td_pcb->pcb_flags = 0;
982 fill_regs(struct thread *td, struct reg *regs)
984 struct trapframe *tf;
987 memcpy(regs, tf, sizeof(struct reg));
993 fill_dbregs(struct thread *td, struct dbreg *dbregs)
995 /* No debug registers on PowerPC */
1000 fill_fpregs(struct thread *td, struct fpreg *fpregs)
1006 if ((pcb->pcb_flags & PCB_FPU) == 0)
1007 memset(fpregs, 0, sizeof(struct fpreg));
1009 memcpy(fpregs, &pcb->pcb_fpu, sizeof(struct fpreg));
1015 set_regs(struct thread *td, struct reg *regs)
1017 struct trapframe *tf;
1020 memcpy(tf, regs, sizeof(struct reg));
1026 set_dbregs(struct thread *td, struct dbreg *dbregs)
1028 /* No debug registers on PowerPC */
1033 set_fpregs(struct thread *td, struct fpreg *fpregs)
1038 if ((pcb->pcb_flags & PCB_FPU) == 0)
1040 memcpy(&pcb->pcb_fpu, fpregs, sizeof(struct fpreg));
1046 ptrace_set_pc(struct thread *td, unsigned long addr)
1048 struct trapframe *tf;
1051 tf->srr0 = (register_t)addr;
1057 ptrace_single_step(struct thread *td)
1059 struct trapframe *tf;
1068 ptrace_clear_single_step(struct thread *td)
1070 struct trapframe *tf;
1073 tf->srr1 &= ~PSL_SE;
1079 kdb_cpu_clear_singlestep(void)
1082 kdb_frame->srr1 &= ~PSL_SE;
1086 kdb_cpu_set_singlestep(void)
1089 kdb_frame->srr1 |= PSL_SE;
1093 * Initialise a struct pcpu.
1096 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz)
1102 spinlock_enter(void)
1107 if (td->td_md.md_spinlock_count == 0)
1108 td->td_md.md_saved_msr = intr_disable();
1109 td->td_md.md_spinlock_count++;
1120 td->td_md.md_spinlock_count--;
1121 if (td->td_md.md_spinlock_count == 0)
1122 intr_restore(td->td_md.md_saved_msr);
1126 * kcopy(const void *src, void *dst, size_t len);
1128 * Copy len bytes from src to dst, aborting if we encounter a fatal
1131 * kcopy() _must_ save and restore the old fault handler since it is
1132 * called by uiomove(), which may be in the path of servicing a non-fatal
1136 kcopy(const void *src, void *dst, size_t len)
1139 faultbuf env, *oldfault;
1142 td = PCPU_GET(curthread);
1143 oldfault = td->td_pcb->pcb_onfault;
1144 if ((rv = setfault(env)) != 0) {
1145 td->td_pcb->pcb_onfault = oldfault;
1149 memcpy(dst, src, len);
1151 td->td_pcb->pcb_onfault = oldfault;
1156 asm_panic(char *pstr)
1161 int db_trap_glue(struct trapframe *); /* Called from trap_subr.S */
1164 db_trap_glue(struct trapframe *frame)
1166 if (!(frame->srr1 & PSL_PR)
1167 && (frame->exc == EXC_TRC || frame->exc == EXC_RUNMODETRC
1168 || (frame->exc == EXC_PGM
1169 && (frame->srr1 & 0x20000))
1170 || frame->exc == EXC_BPT
1171 || frame->exc == EXC_DSI)) {
1172 int type = frame->exc;
1173 if (type == EXC_PGM && (frame->srr1 & 0x20000)) {
1174 type = T_BREAKPOINT;
1176 return (kdb_trap(type, 0, frame));