2 * SPDX-License-Identifier: BSD-4-Clause AND BSD-2-Clause-FreeBSD
4 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
5 * Copyright (C) 1995, 1996 TooLs GmbH.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by TooLs GmbH.
19 * 4. The name of TooLs GmbH may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 * Copyright (C) 2001 Benno Rice
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
46 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
47 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
48 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
49 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
50 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
51 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
52 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
53 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
54 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
55 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56 * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
62 #include "opt_fpu_emu.h"
64 #include <sys/param.h>
66 #include <sys/systm.h>
73 #include <sys/imgact.h>
74 #include <sys/kernel.h>
77 #include <sys/malloc.h>
78 #include <sys/mutex.h>
80 #include <sys/signalvar.h>
81 #include <sys/syscallsubr.h>
82 #include <sys/syscall.h>
83 #include <sys/sysent.h>
84 #include <sys/sysproto.h>
85 #include <sys/ucontext.h>
88 #include <machine/altivec.h>
89 #include <machine/cpu.h>
90 #include <machine/elf.h>
91 #include <machine/fpu.h>
92 #include <machine/pcb.h>
93 #include <machine/sigframe.h>
94 #include <machine/trap.h>
95 #include <machine/vmparam.h>
98 #include <vm/vm_param.h>
100 #include <vm/vm_map.h>
103 #include <powerpc/fpu/fpu_extern.h>
106 #ifdef COMPAT_FREEBSD32
107 #include <compat/freebsd32/freebsd32_signal.h>
108 #include <compat/freebsd32/freebsd32_util.h>
109 #include <compat/freebsd32/freebsd32_proto.h>
111 typedef struct __ucontext32 {
113 mcontext32_t uc_mcontext;
115 struct sigaltstack32 uc_stack;
117 uint32_t __spare__[4];
122 struct siginfo32 sf_si;
125 static int grab_mcontext32(struct thread *td, mcontext32_t *, int flags);
128 static int grab_mcontext(struct thread *, mcontext_t *, int);
130 static void cleanup_power_extras(struct thread *);
133 extern struct sysentvec elf64_freebsd_sysvec_v2;
137 _Static_assert(sizeof(mcontext_t) == 1392, "mcontext_t size incorrect");
138 _Static_assert(sizeof(ucontext_t) == 1472, "ucontext_t size incorrect");
139 _Static_assert(sizeof(siginfo_t) == 80, "siginfo_t size incorrect");
140 #ifdef COMPAT_FREEBSD32
141 _Static_assert(sizeof(mcontext32_t) == 1224, "mcontext32_t size incorrect");
142 _Static_assert(sizeof(ucontext32_t) == 1280, "ucontext32_t size incorrect");
143 _Static_assert(sizeof(struct siginfo32) == 64, "struct siginfo32 size incorrect");
144 #endif /* COMPAT_FREEBSD32 */
146 _Static_assert(sizeof(mcontext_t) == 1224, "mcontext_t size incorrect");
147 _Static_assert(sizeof(ucontext_t) == 1280, "ucontext_t size incorrect");
148 _Static_assert(sizeof(siginfo_t) == 64, "siginfo_t size incorrect");
152 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
154 struct trapframe *tf;
159 #ifdef COMPAT_FREEBSD32
160 struct siginfo32 siginfo32;
161 struct sigframe32 sf32;
166 int oonstack, rndfsize;
172 PROC_LOCK_ASSERT(p, MA_OWNED);
175 mtx_assert(&psp->ps_mtx, MA_OWNED);
179 * Fill siginfo structure.
181 ksi->ksi_info.si_signo = ksi->ksi_signo;
182 ksi->ksi_info.si_addr =
183 (void *)((tf->exc == EXC_DSI || tf->exc == EXC_DSE) ?
186 #ifdef COMPAT_FREEBSD32
187 if (SV_PROC_FLAG(p, SV_ILP32)) {
188 siginfo_to_siginfo32(&ksi->ksi_info, &siginfo32);
189 sig = siginfo32.si_signo;
190 code = siginfo32.si_code;
191 sfp = (caddr_t)&sf32;
192 sfpsize = sizeof(sf32);
193 rndfsize = roundup(sizeof(sf32), 16);
194 sp = (uint32_t)tf->fixreg[1];
195 oonstack = sigonstack(sp);
201 memset(&sf32, 0, sizeof(sf32));
202 grab_mcontext32(td, &sf32.sf_uc.uc_mcontext, 0);
204 sf32.sf_uc.uc_sigmask = *mask;
205 sf32.sf_uc.uc_stack.ss_sp = (uintptr_t)td->td_sigstk.ss_sp;
206 sf32.sf_uc.uc_stack.ss_size = (uint32_t)td->td_sigstk.ss_size;
207 sf32.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
208 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
210 sf32.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
213 sig = ksi->ksi_signo;
214 code = ksi->ksi_code;
216 sfpsize = sizeof(sf);
219 * 64-bit PPC defines a 288 byte scratch region
222 rndfsize = 288 + roundup(sizeof(sf), 48);
224 rndfsize = roundup(sizeof(sf), 16);
227 oonstack = sigonstack(sp);
233 memset(&sf, 0, sizeof(sf));
234 grab_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
236 sf.sf_uc.uc_sigmask = *mask;
237 sf.sf_uc.uc_stack = td->td_sigstk;
238 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
239 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
241 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
242 #ifdef COMPAT_FREEBSD32
246 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
250 * Allocate and validate space for the signal handler context.
252 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
253 SIGISMEMBER(psp->ps_sigonstack, sig)) {
254 usfp = (void *)(((uintptr_t)td->td_sigstk.ss_sp +
255 td->td_sigstk.ss_size - rndfsize) & ~0xFul);
257 usfp = (void *)((sp - rndfsize) & ~0xFul);
261 * Set Floating Point facility to "Ignore Exceptions Mode" so signal
264 if (td->td_pcb->pcb_flags & PCB_FPU)
265 tf->srr1 = tf->srr1 & ~(PSL_FE0 | PSL_FE1);
268 * Set up the registers to return to sigcode.
270 * r1/sp - sigframe ptr
271 * lr - sig function, dispatched to by blrl in trampoline
273 * r4 - SIGINFO ? &siginfo : exception code
275 * srr0 - trampoline function addr
277 tf->lr = (register_t)catcher;
278 tf->fixreg[1] = (register_t)usfp;
279 tf->fixreg[FIRSTARG] = sig;
280 #ifdef COMPAT_FREEBSD32
281 tf->fixreg[FIRSTARG+2] = (register_t)usfp +
282 ((SV_PROC_FLAG(p, SV_ILP32)) ?
283 offsetof(struct sigframe32, sf_uc) :
284 offsetof(struct sigframe, sf_uc));
286 tf->fixreg[FIRSTARG+2] = (register_t)usfp +
287 offsetof(struct sigframe, sf_uc);
289 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
291 * Signal handler installed with SA_SIGINFO.
293 #ifdef COMPAT_FREEBSD32
294 if (SV_PROC_FLAG(p, SV_ILP32)) {
295 sf32.sf_si = siginfo32;
296 tf->fixreg[FIRSTARG+1] = (register_t)usfp +
297 offsetof(struct sigframe32, sf_si);
298 sf32.sf_si = siginfo32;
301 tf->fixreg[FIRSTARG+1] = (register_t)usfp +
302 offsetof(struct sigframe, sf_si);
303 sf.sf_si = ksi->ksi_info;
304 #ifdef COMPAT_FREEBSD32
308 /* Old FreeBSD-style arguments. */
309 tf->fixreg[FIRSTARG+1] = code;
310 tf->fixreg[FIRSTARG+3] = (tf->exc == EXC_DSI) ?
313 mtx_unlock(&psp->ps_mtx);
316 tf->srr0 = (register_t)PROC_SIGCODE(p);
319 * copy the frame out to userland.
321 if (copyout(sfp, usfp, sfpsize) != 0) {
323 * Process has trashed its stack. Kill it.
325 CTR2(KTR_SIG, "sendsig: sigexit td=%p sfp=%p", td, sfp);
330 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td,
331 tf->srr0, tf->fixreg[1]);
334 mtx_lock(&psp->ps_mtx);
338 sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
343 CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp);
345 if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) {
346 CTR1(KTR_SIG, "sigreturn: efault td=%p", td);
350 error = set_mcontext(td, &uc.uc_mcontext);
355 * Save FPU state if needed. User may have changed it on
358 if (uc.uc_mcontext.mc_srr1 & PSL_FP)
361 kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
363 CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x",
364 td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]);
366 return (EJUSTRETURN);
369 #ifdef COMPAT_FREEBSD4
371 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
374 return sys_sigreturn(td, (struct sigreturn_args *)uap);
379 * Construct a PCB from a trapframe. This is called from kdb_trap() where
380 * we want to start a backtrace from the function that caused us to enter
381 * the debugger. We have the context in the trapframe, but base the trace
382 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
383 * enough for a backtrace.
386 makectx(struct trapframe *tf, struct pcb *pcb)
389 pcb->pcb_lr = tf->srr0;
390 pcb->pcb_sp = tf->fixreg[1];
394 * get_mcontext/sendsig helper routine that doesn't touch the
398 grab_mcontext(struct thread *td, mcontext_t *mcp, int flags)
405 memset(mcp, 0, sizeof(mcontext_t));
407 mcp->mc_vers = _MC_VERSION;
409 memcpy(&mcp->mc_frame, td->td_frame, sizeof(struct trapframe));
410 if (flags & GET_MC_CLEAR_RET) {
416 * This assumes that floating-point context is *not* lazy,
417 * so if the thread has used FP there would have been a
418 * FP-unavailable exception that would have set things up
421 if (pcb->pcb_flags & PCB_FPREGS) {
422 if (pcb->pcb_flags & PCB_FPU) {
423 KASSERT(td == curthread,
424 ("get_mcontext: fp save not curthread"));
429 mcp->mc_flags |= _MC_FP_VALID;
430 memcpy(&mcp->mc_fpscr, &pcb->pcb_fpu.fpscr, sizeof(double));
431 for (i = 0; i < 32; i++)
432 memcpy(&mcp->mc_fpreg[i], &pcb->pcb_fpu.fpr[i].fpr,
436 if (pcb->pcb_flags & PCB_VSX) {
437 for (i = 0; i < 32; i++)
438 memcpy(&mcp->mc_vsxfpreg[i],
439 &pcb->pcb_fpu.fpr[i].vsr[2], sizeof(double));
443 * Repeat for Altivec context
446 if (pcb->pcb_flags & PCB_VEC) {
447 KASSERT(td == curthread,
448 ("get_mcontext: fp save not curthread"));
452 mcp->mc_flags |= _MC_AV_VALID;
453 mcp->mc_vscr = pcb->pcb_vec.vscr;
454 mcp->mc_vrsave = pcb->pcb_vec.vrsave;
455 memcpy(mcp->mc_avec, pcb->pcb_vec.vr, sizeof(mcp->mc_avec));
458 mcp->mc_len = sizeof(*mcp);
464 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
468 error = grab_mcontext(td, mcp, flags);
470 PROC_LOCK(curthread->td_proc);
471 mcp->mc_onstack = sigonstack(td->td_frame->fixreg[1]);
472 PROC_UNLOCK(curthread->td_proc);
479 set_mcontext(struct thread *td, mcontext_t *mcp)
482 struct trapframe *tf;
489 if (mcp->mc_vers != _MC_VERSION || mcp->mc_len != sizeof(*mcp))
493 * Don't let the user change privileged MSR bits.
495 * psl_userstatic is used here to mask off any bits that can
496 * legitimately vary between user contexts (Floating point
497 * exception control and any facilities that we are using the
498 * "enable on first use" pattern with.)
500 * All other bits are required to match psl_userset(32).
502 * Remember to update the platform cpu_init code when implementing
503 * support for a new conditional facility!
505 if ((mcp->mc_srr1 & psl_userstatic) != (tf->srr1 & psl_userstatic)) {
509 /* Copy trapframe, preserving TLS pointer across context change */
510 if (SV_PROC_FLAG(td->td_proc, SV_LP64))
511 tls = tf->fixreg[13];
514 memcpy(tf, mcp->mc_frame, sizeof(mcp->mc_frame));
515 if (SV_PROC_FLAG(td->td_proc, SV_LP64))
516 tf->fixreg[13] = tls;
521 * Force the FPU back off to ensure the new context will not bypass
522 * the enable_fpu() setup code accidentally.
524 * This prevents an issue where a process that uses floating point
525 * inside a signal handler could end up in a state where the MSR
526 * did not match pcb_flags.
528 * Additionally, ensure VSX is disabled as well, as it is illegal
529 * to leave it turned on when FP or VEC are off.
531 tf->srr1 &= ~(PSL_FP | PSL_VSX);
532 pcb->pcb_flags &= ~(PCB_FPU | PCB_VSX);
534 if (mcp->mc_flags & _MC_FP_VALID) {
535 /* enable_fpu() will happen lazily on a fault */
536 pcb->pcb_flags |= PCB_FPREGS;
537 memcpy(&pcb->pcb_fpu.fpscr, &mcp->mc_fpscr, sizeof(double));
538 bzero(pcb->pcb_fpu.fpr, sizeof(pcb->pcb_fpu.fpr));
539 for (i = 0; i < 32; i++) {
540 memcpy(&pcb->pcb_fpu.fpr[i].fpr, &mcp->mc_fpreg[i],
542 memcpy(&pcb->pcb_fpu.fpr[i].vsr[2],
543 &mcp->mc_vsxfpreg[i], sizeof(double));
547 if (mcp->mc_flags & _MC_AV_VALID) {
548 if ((pcb->pcb_flags & PCB_VEC) != PCB_VEC) {
553 pcb->pcb_vec.vscr = mcp->mc_vscr;
554 pcb->pcb_vec.vrsave = mcp->mc_vrsave;
555 memcpy(pcb->pcb_vec.vr, mcp->mc_avec, sizeof(mcp->mc_avec));
557 tf->srr1 &= ~PSL_VEC;
558 pcb->pcb_flags &= ~PCB_VEC;
565 * Clean up extra POWER state. Some per-process registers and states are not
566 * managed by the MSR, so must be cleaned up explicitly on thread exit.
568 * Currently this includes:
569 * DSCR -- Data stream control register (PowerISA 2.06+)
570 * FSCR -- Facility Status and Control Register (PowerISA 2.07+)
573 cleanup_power_extras(struct thread *td)
580 pcb_flags = td->td_pcb->pcb_flags;
581 /* Clean up registers not managed by MSR. */
582 if (pcb_flags & PCB_CFSCR)
584 if (pcb_flags & PCB_CDSCR)
587 if (pcb_flags & PCB_FPU)
592 * Ensure the PCB has been updated in preparation for copying a thread.
594 * This is needed because normally this only happens during switching tasks,
595 * but when we are cloning a thread, we need the updated state before doing
596 * the actual copy, so the new thread inherits the current state instead of
597 * the state at the last task switch.
599 * Keep this in sync with the assembly code in cpu_switch()!
602 cpu_save_thread_regs(struct thread *td)
607 KASSERT(td == curthread,
608 ("cpu_save_thread_regs: td is not curthread"));
612 pcb_flags = pcb->pcb_flags;
614 #if defined(__powerpc64__)
615 /* Are *any* FSCR flags in use? */
616 if (pcb_flags & PCB_CFSCR) {
617 pcb->pcb_fscr = mfspr(SPR_FSCR);
619 if (pcb->pcb_fscr & FSCR_EBB) {
620 pcb->pcb_ebb.ebbhr = mfspr(SPR_EBBHR);
621 pcb->pcb_ebb.ebbrr = mfspr(SPR_EBBRR);
622 pcb->pcb_ebb.bescr = mfspr(SPR_BESCR);
624 if (pcb->pcb_fscr & FSCR_LM) {
625 pcb->pcb_lm.lmrr = mfspr(SPR_LMRR);
626 pcb->pcb_lm.lmser = mfspr(SPR_LMSER);
628 if (pcb->pcb_fscr & FSCR_TAR)
629 pcb->pcb_tar = mfspr(SPR_TAR);
633 * This is outside of the PCB_CFSCR check because it can be set
634 * independently when running on POWER7/POWER8.
636 if (pcb_flags & PCB_CDSCR)
637 pcb->pcb_dscr = mfspr(SPR_DSCRP);
642 * On E500v2, single-precision scalar instructions and access to
643 * SPEFSCR may be used without PSL_VEC turned on, as long as they
644 * limit themselves to the low word of the registers.
646 * As such, we need to unconditionally save SPEFSCR, even though
647 * it is also updated in save_vec_nodrop().
649 pcb->pcb_vec.vscr = mfspr(SPR_SPEFSCR);
652 if (pcb_flags & PCB_FPU)
655 if (pcb_flags & PCB_VEC)
660 * Set set up registers on exec.
663 exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack)
665 struct trapframe *tf;
669 bzero(tf, sizeof *tf);
671 tf->fixreg[1] = -roundup(-stack + 48, 16);
673 tf->fixreg[1] = -roundup(-stack + 8, 16);
677 * Set up arguments for _start():
678 * _start(argc, argv, envp, obj, cleanup, ps_strings);
681 * - obj and cleanup are the auxilliary and termination
682 * vectors. They are fixed up by ld.elf_so.
683 * - ps_strings is a NetBSD extention, and will be
684 * ignored by executables which are strictly
685 * compliant with the SVR4 ABI.
688 /* Collect argc from the user stack */
689 argc = fuword((void *)stack);
691 tf->fixreg[3] = argc;
692 tf->fixreg[4] = stack + sizeof(register_t);
693 tf->fixreg[5] = stack + (2 + argc)*sizeof(register_t);
694 tf->fixreg[6] = 0; /* auxiliary vector */
695 tf->fixreg[7] = 0; /* termination vector */
696 tf->fixreg[8] = (register_t)imgp->ps_strings; /* NetBSD extension */
698 tf->srr0 = imgp->entry_addr;
700 tf->fixreg[12] = imgp->entry_addr;
702 tf->srr1 = psl_userset | PSL_FE_DFLT;
703 cleanup_power_extras(td);
704 td->td_pcb->pcb_flags = 0;
707 #ifdef COMPAT_FREEBSD32
709 ppc32_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack)
711 struct trapframe *tf;
715 bzero(tf, sizeof *tf);
716 tf->fixreg[1] = -roundup(-stack + 8, 16);
718 argc = fuword32((void *)stack);
720 tf->fixreg[3] = argc;
721 tf->fixreg[4] = stack + sizeof(uint32_t);
722 tf->fixreg[5] = stack + (2 + argc)*sizeof(uint32_t);
723 tf->fixreg[6] = 0; /* auxiliary vector */
724 tf->fixreg[7] = 0; /* termination vector */
725 tf->fixreg[8] = (register_t)imgp->ps_strings; /* NetBSD extension */
727 tf->srr0 = imgp->entry_addr;
728 tf->srr1 = psl_userset32 | PSL_FE_DFLT;
729 cleanup_power_extras(td);
730 td->td_pcb->pcb_flags = 0;
735 fill_regs(struct thread *td, struct reg *regs)
737 struct trapframe *tf;
740 memcpy(regs, tf, sizeof(struct reg));
746 fill_dbregs(struct thread *td, struct dbreg *dbregs)
748 /* No debug registers on PowerPC */
753 fill_fpregs(struct thread *td, struct fpreg *fpregs)
760 if ((pcb->pcb_flags & PCB_FPREGS) == 0)
761 memset(fpregs, 0, sizeof(struct fpreg));
763 memcpy(&fpregs->fpscr, &pcb->pcb_fpu.fpscr, sizeof(double));
764 for (i = 0; i < 32; i++)
765 memcpy(&fpregs->fpreg[i], &pcb->pcb_fpu.fpr[i].fpr,
773 set_regs(struct thread *td, struct reg *regs)
775 struct trapframe *tf;
778 memcpy(tf, regs, sizeof(struct reg));
784 set_dbregs(struct thread *td, struct dbreg *dbregs)
786 /* No debug registers on PowerPC */
791 set_fpregs(struct thread *td, struct fpreg *fpregs)
797 pcb->pcb_flags |= PCB_FPREGS;
798 memcpy(&pcb->pcb_fpu.fpscr, &fpregs->fpscr, sizeof(double));
799 for (i = 0; i < 32; i++) {
800 memcpy(&pcb->pcb_fpu.fpr[i].fpr, &fpregs->fpreg[i],
807 #ifdef COMPAT_FREEBSD32
809 set_regs32(struct thread *td, struct reg32 *regs)
811 struct trapframe *tf;
815 for (i = 0; i < 32; i++)
816 tf->fixreg[i] = regs->fixreg[i];
827 fill_regs32(struct thread *td, struct reg32 *regs)
829 struct trapframe *tf;
833 for (i = 0; i < 32; i++)
834 regs->fixreg[i] = tf->fixreg[i];
845 grab_mcontext32(struct thread *td, mcontext32_t *mcp, int flags)
850 error = grab_mcontext(td, &mcp64, flags);
854 mcp->mc_vers = mcp64.mc_vers;
855 mcp->mc_flags = mcp64.mc_flags;
856 mcp->mc_onstack = mcp64.mc_onstack;
857 mcp->mc_len = mcp64.mc_len;
858 memcpy(mcp->mc_avec,mcp64.mc_avec,sizeof(mcp64.mc_avec));
859 memcpy(mcp->mc_av,mcp64.mc_av,sizeof(mcp64.mc_av));
860 for (i = 0; i < 42; i++)
861 mcp->mc_frame[i] = mcp64.mc_frame[i];
862 memcpy(mcp->mc_fpreg,mcp64.mc_fpreg,sizeof(mcp64.mc_fpreg));
863 memcpy(mcp->mc_vsxfpreg,mcp64.mc_vsxfpreg,sizeof(mcp64.mc_vsxfpreg));
869 get_mcontext32(struct thread *td, mcontext32_t *mcp, int flags)
873 error = grab_mcontext32(td, mcp, flags);
875 PROC_LOCK(curthread->td_proc);
876 mcp->mc_onstack = sigonstack(td->td_frame->fixreg[1]);
877 PROC_UNLOCK(curthread->td_proc);
884 set_mcontext32(struct thread *td, mcontext32_t *mcp)
889 mcp64.mc_vers = mcp->mc_vers;
890 mcp64.mc_flags = mcp->mc_flags;
891 mcp64.mc_onstack = mcp->mc_onstack;
892 mcp64.mc_len = mcp->mc_len;
893 memcpy(mcp64.mc_avec,mcp->mc_avec,sizeof(mcp64.mc_avec));
894 memcpy(mcp64.mc_av,mcp->mc_av,sizeof(mcp64.mc_av));
895 for (i = 0; i < 42; i++)
896 mcp64.mc_frame[i] = mcp->mc_frame[i];
897 mcp64.mc_srr1 |= (td->td_frame->srr1 & 0xFFFFFFFF00000000ULL);
898 memcpy(mcp64.mc_fpreg,mcp->mc_fpreg,sizeof(mcp64.mc_fpreg));
899 memcpy(mcp64.mc_vsxfpreg,mcp->mc_vsxfpreg,sizeof(mcp64.mc_vsxfpreg));
901 error = set_mcontext(td, &mcp64);
907 #ifdef COMPAT_FREEBSD32
909 freebsd32_sigreturn(struct thread *td, struct freebsd32_sigreturn_args *uap)
914 CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp);
916 if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) {
917 CTR1(KTR_SIG, "sigreturn: efault td=%p", td);
921 error = set_mcontext32(td, &uc.uc_mcontext);
926 * Save FPU state if needed. User may have changed it on
929 if (uc.uc_mcontext.mc_srr1 & PSL_FP)
932 kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
934 CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x",
935 td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]);
937 return (EJUSTRETURN);
941 * The first two fields of a ucontext_t are the signal mask and the machine
942 * context. The next field is uc_link; we want to avoid destroying the link
943 * when copying out contexts.
945 #define UC32_COPY_SIZE offsetof(ucontext32_t, uc_link)
948 freebsd32_getcontext(struct thread *td, struct freebsd32_getcontext_args *uap)
953 if (uap->ucp == NULL)
956 bzero(&uc, sizeof(uc));
957 get_mcontext32(td, &uc.uc_mcontext, GET_MC_CLEAR_RET);
958 PROC_LOCK(td->td_proc);
959 uc.uc_sigmask = td->td_sigmask;
960 PROC_UNLOCK(td->td_proc);
961 ret = copyout(&uc, uap->ucp, UC32_COPY_SIZE);
967 freebsd32_setcontext(struct thread *td, struct freebsd32_setcontext_args *uap)
972 if (uap->ucp == NULL)
975 ret = copyin(uap->ucp, &uc, UC32_COPY_SIZE);
977 ret = set_mcontext32(td, &uc.uc_mcontext);
979 kern_sigprocmask(td, SIG_SETMASK,
980 &uc.uc_sigmask, NULL, 0);
984 return (ret == 0 ? EJUSTRETURN : ret);
988 freebsd32_swapcontext(struct thread *td, struct freebsd32_swapcontext_args *uap)
993 if (uap->oucp == NULL || uap->ucp == NULL)
996 bzero(&uc, sizeof(uc));
997 get_mcontext32(td, &uc.uc_mcontext, GET_MC_CLEAR_RET);
998 PROC_LOCK(td->td_proc);
999 uc.uc_sigmask = td->td_sigmask;
1000 PROC_UNLOCK(td->td_proc);
1001 ret = copyout(&uc, uap->oucp, UC32_COPY_SIZE);
1003 ret = copyin(uap->ucp, &uc, UC32_COPY_SIZE);
1005 ret = set_mcontext32(td, &uc.uc_mcontext);
1007 kern_sigprocmask(td, SIG_SETMASK,
1008 &uc.uc_sigmask, NULL, 0);
1013 return (ret == 0 ? EJUSTRETURN : ret);
1019 cpu_set_syscall_retval(struct thread *td, int error)
1022 struct trapframe *tf;
1025 if (error == EJUSTRETURN)
1031 if (tf->fixreg[0] == SYS___syscall &&
1032 (SV_PROC_FLAG(p, SV_ILP32))) {
1033 int code = tf->fixreg[FIRSTARG + 1];
1035 #if defined(COMPAT_FREEBSD6) && defined(SYS_freebsd6_lseek)
1036 code != SYS_freebsd6_lseek &&
1038 code != SYS_lseek) ? 1 : 0;
1046 * 64-bit return, 32-bit syscall. Fixup byte order
1048 tf->fixreg[FIRSTARG] = 0;
1049 tf->fixreg[FIRSTARG + 1] = td->td_retval[0];
1051 tf->fixreg[FIRSTARG] = td->td_retval[0];
1052 tf->fixreg[FIRSTARG + 1] = td->td_retval[1];
1054 tf->cr &= ~0x10000000; /* Unset summary overflow */
1058 * Set user's pc back to redo the system call.
1063 tf->fixreg[FIRSTARG] = error;
1064 tf->cr |= 0x10000000; /* Set summary overflow */
1070 * Threading functions
1073 cpu_thread_exit(struct thread *td)
1075 cleanup_power_extras(td);
1079 cpu_thread_clean(struct thread *td)
1084 cpu_thread_alloc(struct thread *td)
1088 pcb = (struct pcb *)((td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
1089 sizeof(struct pcb)) & ~0x2fUL);
1091 td->td_frame = (struct trapframe *)pcb - 1;
1095 cpu_thread_free(struct thread *td)
1100 cpu_set_user_tls(struct thread *td, void *tls_base)
1103 if (SV_PROC_FLAG(td->td_proc, SV_LP64))
1104 td->td_frame->fixreg[13] = (register_t)tls_base + 0x7010;
1106 td->td_frame->fixreg[2] = (register_t)tls_base + 0x7008;
1111 cpu_copy_thread(struct thread *td, struct thread *td0)
1114 struct trapframe *tf;
1115 struct callframe *cf;
1117 /* Ensure td0 pcb is up to date. */
1118 if (td0 == curthread)
1119 cpu_save_thread_regs(td0);
1123 /* Copy the upcall pcb */
1124 bcopy(td0->td_pcb, pcb2, sizeof(*pcb2));
1126 /* Create a stack for the new thread */
1128 bcopy(td0->td_frame, tf, sizeof(struct trapframe));
1129 tf->fixreg[FIRSTARG] = 0;
1130 tf->fixreg[FIRSTARG + 1] = 0;
1131 tf->cr &= ~0x10000000;
1133 /* Set registers for trampoline to user mode. */
1134 cf = (struct callframe *)tf - 1;
1135 memset(cf, 0, sizeof(struct callframe));
1136 cf->cf_func = (register_t)fork_return;
1137 cf->cf_arg0 = (register_t)td;
1138 cf->cf_arg1 = (register_t)tf;
1140 pcb2->pcb_sp = (register_t)cf;
1141 #if defined(__powerpc64__) && (!defined(_CALL_ELF) || _CALL_ELF == 1)
1142 pcb2->pcb_lr = ((register_t *)fork_trampoline)[0];
1143 pcb2->pcb_toc = ((register_t *)fork_trampoline)[1];
1145 pcb2->pcb_lr = (register_t)fork_trampoline;
1146 pcb2->pcb_context[0] = pcb2->pcb_lr;
1148 pcb2->pcb_cpu.aim.usr_vsid = 0;
1150 pcb2->pcb_vec.vscr = SPEFSCR_DFLT;
1153 /* Setup to release spin count in fork_exit(). */
1154 td->td_md.md_spinlock_count = 1;
1155 td->td_md.md_saved_msr = psl_kernset;
1159 cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg,
1162 struct trapframe *tf;
1166 /* align stack and alloc space for frame ptr and saved LR */
1167 #ifdef __powerpc64__
1168 sp = ((uintptr_t)stack->ss_sp + stack->ss_size - 48) &
1171 sp = ((uintptr_t)stack->ss_sp + stack->ss_size - 8) &
1174 bzero(tf, sizeof(struct trapframe));
1176 tf->fixreg[1] = (register_t)sp;
1177 tf->fixreg[3] = (register_t)arg;
1178 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
1179 tf->srr0 = (register_t)entry;
1180 #ifdef __powerpc64__
1181 tf->srr1 = psl_userset32 | PSL_FE_DFLT;
1183 tf->srr1 = psl_userset | PSL_FE_DFLT;
1186 #ifdef __powerpc64__
1187 if (td->td_proc->p_sysent == &elf64_freebsd_sysvec_v2) {
1188 tf->srr0 = (register_t)entry;
1189 /* ELFv2 ABI requires that the global entry point be in r12. */
1190 tf->fixreg[12] = (register_t)entry;
1193 register_t entry_desc[3];
1194 (void)copyin((void *)entry, entry_desc, sizeof(entry_desc));
1195 tf->srr0 = entry_desc[0];
1196 tf->fixreg[2] = entry_desc[1];
1197 tf->fixreg[11] = entry_desc[2];
1199 tf->srr1 = psl_userset | PSL_FE_DFLT;
1203 td->td_pcb->pcb_flags = 0;
1205 td->td_pcb->pcb_vec.vscr = SPEFSCR_DFLT;
1208 td->td_retval[0] = (register_t)entry;
1209 td->td_retval[1] = 0;
1213 emulate_mfspr(int spr, int reg, struct trapframe *frame){
1218 if (spr == SPR_DSCR || spr == SPR_DSCRP) {
1219 if (!(cpu_features2 & PPC_FEATURE2_DSCR))
1221 // If DSCR was never set, get the default DSCR
1222 if ((td->td_pcb->pcb_flags & PCB_CDSCR) == 0)
1223 td->td_pcb->pcb_dscr = mfspr(SPR_DSCRP);
1225 frame->fixreg[reg] = td->td_pcb->pcb_dscr;
1233 emulate_mtspr(int spr, int reg, struct trapframe *frame){
1238 if (spr == SPR_DSCR || spr == SPR_DSCRP) {
1239 if (!(cpu_features2 & PPC_FEATURE2_DSCR))
1241 td->td_pcb->pcb_flags |= PCB_CDSCR;
1242 td->td_pcb->pcb_dscr = frame->fixreg[reg];
1243 mtspr(SPR_DSCRP, frame->fixreg[reg]);
1250 #define XFX 0xFC0007FF
1252 ppc_instr_emulate(struct trapframe *frame, struct thread *td)
1259 instr = fuword32((void *)frame->srr0);
1262 if ((instr & 0xfc1fffff) == 0x7c1f42a6) { /* mfpvr */
1263 reg = (instr & ~0xfc1fffff) >> 21;
1264 frame->fixreg[reg] = mfpvr();
1267 } else if ((instr & XFX) == 0x7c0002a6) { /* mfspr */
1268 rs = (instr & 0x3e00000) >> 21;
1269 spr = (instr & 0x1ff800) >> 16;
1270 return emulate_mfspr(spr, rs, frame);
1271 } else if ((instr & XFX) == 0x7c0003a6) { /* mtspr */
1272 rs = (instr & 0x3e00000) >> 21;
1273 spr = (instr & 0x1ff800) >> 16;
1274 return emulate_mtspr(spr, rs, frame);
1275 } else if ((instr & 0xfc000ffe) == 0x7c0004ac) { /* various sync */
1276 powerpc_sync(); /* Do a heavy-weight sync */
1283 if (!(pcb->pcb_flags & PCB_FPREGS)) {
1284 bzero(&pcb->pcb_fpu, sizeof(pcb->pcb_fpu));
1285 pcb->pcb_flags |= PCB_FPREGS;
1286 } else if (pcb->pcb_flags & PCB_FPU)
1288 sig = fpu_emulate(frame, &pcb->pcb_fpu);
1289 if ((sig == 0 || sig == SIGFPE) && pcb->pcb_flags & PCB_FPU)
1292 if (sig == SIGILL) {
1293 if (pcb->pcb_lastill != frame->srr0) {
1294 /* Allow a second chance, in case of cache sync issues. */
1296 pmap_sync_icache(PCPU_GET(curpmap), frame->srr0, 4);
1297 pcb->pcb_lastill = frame->srr0;