2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
44 #include "opt_compat.h"
45 #include "opt_ktrace.h"
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/signalvar.h>
50 #include <sys/vnode.h>
52 #include <sys/condvar.h>
53 #include <sys/event.h>
54 #include <sys/fcntl.h>
55 #include <sys/kernel.h>
58 #include <sys/ktrace.h>
60 #include <sys/malloc.h>
61 #include <sys/mutex.h>
62 #include <sys/namei.h>
64 #include <sys/pioctl.h>
65 #include <sys/resourcevar.h>
69 #include <sys/syscallsubr.h>
70 #include <sys/sysctl.h>
71 #include <sys/sysent.h>
72 #include <sys/syslog.h>
73 #include <sys/sysproto.h>
74 #include <sys/unistd.h>
77 #include <machine/cpu.h>
79 #if defined (__alpha__) && !defined(COMPAT_43)
80 #error "You *really* need COMPAT_43 on the alpha for longjmp(3)"
83 #define ONSIG 32 /* NSIG for osig* syscalls. XXX. */
85 static int coredump(struct thread *);
86 static char *expand_name(const char *, uid_t, pid_t);
87 static int killpg1(struct thread *td, int sig, int pgid, int all);
88 static int issignal(struct thread *p);
89 static int sigprop(int sig);
90 static void stop(struct proc *);
91 static void tdsigwakeup(struct thread *td, int sig, sig_t action);
92 static int filt_sigattach(struct knote *kn);
93 static void filt_sigdetach(struct knote *kn);
94 static int filt_signal(struct knote *kn, long hint);
95 static struct thread *sigtd(struct proc *p, int sig, int prop);
96 static int kern_sigtimedwait(struct thread *td, sigset_t set,
97 siginfo_t *info, struct timespec *timeout);
98 static void do_tdsignal(struct thread *td, int sig, sigtarget_t target);
100 struct filterops sig_filtops =
101 { 0, filt_sigattach, filt_sigdetach, filt_signal };
103 static int kern_logsigexit = 1;
104 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,
106 "Log processes quitting on abnormal signals to syslog(3)");
109 * Policy -- Can ucred cr1 send SIGIO to process cr2?
110 * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG
111 * in the right situations.
113 #define CANSIGIO(cr1, cr2) \
114 ((cr1)->cr_uid == 0 || \
115 (cr1)->cr_ruid == (cr2)->cr_ruid || \
116 (cr1)->cr_uid == (cr2)->cr_ruid || \
117 (cr1)->cr_ruid == (cr2)->cr_uid || \
118 (cr1)->cr_uid == (cr2)->cr_uid)
121 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RW,
122 &sugid_coredump, 0, "Enable coredumping set user/group ID processes");
124 static int do_coredump = 1;
125 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,
126 &do_coredump, 0, "Enable/Disable coredumps");
129 * Signal properties and actions.
130 * The array below categorizes the signals and their default actions
131 * according to the following properties:
133 #define SA_KILL 0x01 /* terminates process by default */
134 #define SA_CORE 0x02 /* ditto and coredumps */
135 #define SA_STOP 0x04 /* suspend process */
136 #define SA_TTYSTOP 0x08 /* ditto, from tty */
137 #define SA_IGNORE 0x10 /* ignore by default */
138 #define SA_CONT 0x20 /* continue if suspended */
139 #define SA_CANTMASK 0x40 /* non-maskable, catchable */
140 #define SA_PROC 0x80 /* deliverable to any thread */
142 static int sigproptbl[NSIG] = {
143 SA_KILL|SA_PROC, /* SIGHUP */
144 SA_KILL|SA_PROC, /* SIGINT */
145 SA_KILL|SA_CORE|SA_PROC, /* SIGQUIT */
146 SA_KILL|SA_CORE, /* SIGILL */
147 SA_KILL|SA_CORE, /* SIGTRAP */
148 SA_KILL|SA_CORE, /* SIGABRT */
149 SA_KILL|SA_CORE|SA_PROC, /* SIGEMT */
150 SA_KILL|SA_CORE, /* SIGFPE */
151 SA_KILL|SA_PROC, /* SIGKILL */
152 SA_KILL|SA_CORE, /* SIGBUS */
153 SA_KILL|SA_CORE, /* SIGSEGV */
154 SA_KILL|SA_CORE, /* SIGSYS */
155 SA_KILL|SA_PROC, /* SIGPIPE */
156 SA_KILL|SA_PROC, /* SIGALRM */
157 SA_KILL|SA_PROC, /* SIGTERM */
158 SA_IGNORE|SA_PROC, /* SIGURG */
159 SA_STOP|SA_PROC, /* SIGSTOP */
160 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTSTP */
161 SA_IGNORE|SA_CONT|SA_PROC, /* SIGCONT */
162 SA_IGNORE|SA_PROC, /* SIGCHLD */
163 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTTIN */
164 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTTOU */
165 SA_IGNORE|SA_PROC, /* SIGIO */
166 SA_KILL, /* SIGXCPU */
167 SA_KILL, /* SIGXFSZ */
168 SA_KILL|SA_PROC, /* SIGVTALRM */
169 SA_KILL|SA_PROC, /* SIGPROF */
170 SA_IGNORE|SA_PROC, /* SIGWINCH */
171 SA_IGNORE|SA_PROC, /* SIGINFO */
172 SA_KILL|SA_PROC, /* SIGUSR1 */
173 SA_KILL|SA_PROC, /* SIGUSR2 */
177 * Determine signal that should be delivered to process p, the current
178 * process, 0 if none. If there is a pending stop signal with default
179 * action, the process stops in issignal().
180 * XXXKSE the check for a pending stop is not done under KSE
185 cursig(struct thread *td)
187 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
188 mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED);
189 mtx_assert(&sched_lock, MA_NOTOWNED);
190 return (SIGPENDING(td) ? issignal(td) : 0);
194 * Arrange for ast() to handle unmasked pending signals on return to user
195 * mode. This must be called whenever a signal is added to td_siglist or
196 * unmasked in td_sigmask.
199 signotify(struct thread *td)
206 PROC_LOCK_ASSERT(p, MA_OWNED);
209 * If our mask changed we may have to move signal that were
210 * previously masked by all threads to our siglist.
213 if (p->p_flag & P_SA)
214 saved = p->p_siglist;
215 SIGSETNAND(set, td->td_sigmask);
216 SIGSETNAND(p->p_siglist, set);
217 SIGSETOR(td->td_siglist, set);
219 if (SIGPENDING(td)) {
220 mtx_lock_spin(&sched_lock);
221 td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING;
222 mtx_unlock_spin(&sched_lock);
224 if ((p->p_flag & P_SA) && !(p->p_flag & P_SIGEVENT)) {
225 if (SIGSETEQ(saved, p->p_siglist))
228 /* pending set changed */
229 p->p_flag |= P_SIGEVENT;
230 wakeup(&p->p_siglist);
236 sigonstack(size_t sp)
238 struct thread *td = curthread;
240 return ((td->td_pflags & TDP_ALTSTACK) ?
241 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
242 ((td->td_sigstk.ss_size == 0) ?
243 (td->td_sigstk.ss_flags & SS_ONSTACK) :
244 ((sp - (size_t)td->td_sigstk.ss_sp) < td->td_sigstk.ss_size))
246 ((sp - (size_t)td->td_sigstk.ss_sp) < td->td_sigstk.ss_size)
255 if (sig > 0 && sig < NSIG)
256 return (sigproptbl[_SIG_IDX(sig)]);
261 sig_ffs(sigset_t *set)
265 for (i = 0; i < _SIG_WORDS; i++)
267 return (ffs(set->__bits[i]) + (i * 32));
280 kern_sigaction(td, sig, act, oact, flags)
283 struct sigaction *act, *oact;
288 struct proc *p = td->td_proc;
290 if (!_SIG_VALID(sig))
295 mtx_lock(&ps->ps_mtx);
297 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)];
298 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)];
300 if (SIGISMEMBER(ps->ps_sigonstack, sig))
301 oact->sa_flags |= SA_ONSTACK;
302 if (!SIGISMEMBER(ps->ps_sigintr, sig))
303 oact->sa_flags |= SA_RESTART;
304 if (SIGISMEMBER(ps->ps_sigreset, sig))
305 oact->sa_flags |= SA_RESETHAND;
306 if (SIGISMEMBER(ps->ps_signodefer, sig))
307 oact->sa_flags |= SA_NODEFER;
308 if (SIGISMEMBER(ps->ps_siginfo, sig))
309 oact->sa_flags |= SA_SIGINFO;
310 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP)
311 oact->sa_flags |= SA_NOCLDSTOP;
312 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT)
313 oact->sa_flags |= SA_NOCLDWAIT;
316 if ((sig == SIGKILL || sig == SIGSTOP) &&
317 act->sa_handler != SIG_DFL) {
318 mtx_unlock(&ps->ps_mtx);
324 * Change setting atomically.
327 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask;
328 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]);
329 if (act->sa_flags & SA_SIGINFO) {
330 ps->ps_sigact[_SIG_IDX(sig)] =
331 (__sighandler_t *)act->sa_sigaction;
332 SIGADDSET(ps->ps_siginfo, sig);
334 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler;
335 SIGDELSET(ps->ps_siginfo, sig);
337 if (!(act->sa_flags & SA_RESTART))
338 SIGADDSET(ps->ps_sigintr, sig);
340 SIGDELSET(ps->ps_sigintr, sig);
341 if (act->sa_flags & SA_ONSTACK)
342 SIGADDSET(ps->ps_sigonstack, sig);
344 SIGDELSET(ps->ps_sigonstack, sig);
345 if (act->sa_flags & SA_RESETHAND)
346 SIGADDSET(ps->ps_sigreset, sig);
348 SIGDELSET(ps->ps_sigreset, sig);
349 if (act->sa_flags & SA_NODEFER)
350 SIGADDSET(ps->ps_signodefer, sig);
352 SIGDELSET(ps->ps_signodefer, sig);
354 if (act->sa_flags & SA_USERTRAMP)
355 SIGADDSET(ps->ps_usertramp, sig);
357 SIGDELSET(ps->ps_usertramp, sig);
359 if (sig == SIGCHLD) {
360 if (act->sa_flags & SA_NOCLDSTOP)
361 ps->ps_flag |= PS_NOCLDSTOP;
363 ps->ps_flag &= ~PS_NOCLDSTOP;
364 if (act->sa_flags & SA_NOCLDWAIT) {
366 * Paranoia: since SA_NOCLDWAIT is implemented
367 * by reparenting the dying child to PID 1 (and
368 * trust it to reap the zombie), PID 1 itself
369 * is forbidden to set SA_NOCLDWAIT.
372 ps->ps_flag &= ~PS_NOCLDWAIT;
374 ps->ps_flag |= PS_NOCLDWAIT;
376 ps->ps_flag &= ~PS_NOCLDWAIT;
377 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
378 ps->ps_flag |= PS_CLDSIGIGN;
380 ps->ps_flag &= ~PS_CLDSIGIGN;
383 * Set bit in ps_sigignore for signals that are set to SIG_IGN,
384 * and for signals set to SIG_DFL where the default is to
385 * ignore. However, don't put SIGCONT in ps_sigignore, as we
386 * have to restart the process.
388 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
389 (sigprop(sig) & SA_IGNORE &&
390 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
391 if ((p->p_flag & P_SA) &&
392 SIGISMEMBER(p->p_siglist, sig)) {
393 p->p_flag |= P_SIGEVENT;
394 wakeup(&p->p_siglist);
396 /* never to be seen again */
397 SIGDELSET(p->p_siglist, sig);
398 mtx_lock_spin(&sched_lock);
399 FOREACH_THREAD_IN_PROC(p, td0)
400 SIGDELSET(td0->td_siglist, sig);
401 mtx_unlock_spin(&sched_lock);
403 /* easier in psignal */
404 SIGADDSET(ps->ps_sigignore, sig);
405 SIGDELSET(ps->ps_sigcatch, sig);
407 SIGDELSET(ps->ps_sigignore, sig);
408 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)
409 SIGDELSET(ps->ps_sigcatch, sig);
411 SIGADDSET(ps->ps_sigcatch, sig);
413 #ifdef COMPAT_FREEBSD4
414 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
415 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
416 (flags & KSA_FREEBSD4) == 0)
417 SIGDELSET(ps->ps_freebsd4, sig);
419 SIGADDSET(ps->ps_freebsd4, sig);
422 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
423 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
424 (flags & KSA_OSIGSET) == 0)
425 SIGDELSET(ps->ps_osigset, sig);
427 SIGADDSET(ps->ps_osigset, sig);
430 mtx_unlock(&ps->ps_mtx);
435 #ifndef _SYS_SYSPROTO_H_
436 struct sigaction_args {
438 struct sigaction *act;
439 struct sigaction *oact;
448 register struct sigaction_args *uap;
450 struct sigaction act, oact;
451 register struct sigaction *actp, *oactp;
454 actp = (uap->act != NULL) ? &act : NULL;
455 oactp = (uap->oact != NULL) ? &oact : NULL;
457 error = copyin(uap->act, actp, sizeof(act));
461 error = kern_sigaction(td, uap->sig, actp, oactp, 0);
463 error = copyout(oactp, uap->oact, sizeof(oact));
467 #ifdef COMPAT_FREEBSD4
468 #ifndef _SYS_SYSPROTO_H_
469 struct freebsd4_sigaction_args {
471 struct sigaction *act;
472 struct sigaction *oact;
479 freebsd4_sigaction(td, uap)
481 register struct freebsd4_sigaction_args *uap;
483 struct sigaction act, oact;
484 register struct sigaction *actp, *oactp;
488 actp = (uap->act != NULL) ? &act : NULL;
489 oactp = (uap->oact != NULL) ? &oact : NULL;
491 error = copyin(uap->act, actp, sizeof(act));
495 error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4);
497 error = copyout(oactp, uap->oact, sizeof(oact));
500 #endif /* COMAPT_FREEBSD4 */
502 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
503 #ifndef _SYS_SYSPROTO_H_
504 struct osigaction_args {
506 struct osigaction *nsa;
507 struct osigaction *osa;
516 register struct osigaction_args *uap;
518 struct osigaction sa;
519 struct sigaction nsa, osa;
520 register struct sigaction *nsap, *osap;
523 if (uap->signum <= 0 || uap->signum >= ONSIG)
526 nsap = (uap->nsa != NULL) ? &nsa : NULL;
527 osap = (uap->osa != NULL) ? &osa : NULL;
530 error = copyin(uap->nsa, &sa, sizeof(sa));
533 nsap->sa_handler = sa.sa_handler;
534 nsap->sa_flags = sa.sa_flags;
535 OSIG2SIG(sa.sa_mask, nsap->sa_mask);
537 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
538 if (osap && !error) {
539 sa.sa_handler = osap->sa_handler;
540 sa.sa_flags = osap->sa_flags;
541 SIG2OSIG(osap->sa_mask, sa.sa_mask);
542 error = copyout(&sa, uap->osa, sizeof(sa));
547 #if !defined(__i386__) && !defined(__alpha__)
548 /* Avoid replicating the same stub everywhere */
552 struct osigreturn_args *uap;
555 return (nosys(td, (struct nosys_args *)uap));
558 #endif /* COMPAT_43 */
561 * Initialize signal state for process 0;
562 * set to ignore signals that are ignored by default.
573 mtx_lock(&ps->ps_mtx);
574 for (i = 1; i <= NSIG; i++)
575 if (sigprop(i) & SA_IGNORE && i != SIGCONT)
576 SIGADDSET(ps->ps_sigignore, i);
577 mtx_unlock(&ps->ps_mtx);
582 * Reset signals for an exec of the specified process.
585 execsigs(struct proc *p)
592 * Reset caught signals. Held signals remain held
593 * through td_sigmask (unless they were caught,
594 * and are now ignored by default).
596 PROC_LOCK_ASSERT(p, MA_OWNED);
597 td = FIRST_THREAD_IN_PROC(p);
599 mtx_lock(&ps->ps_mtx);
600 while (SIGNOTEMPTY(ps->ps_sigcatch)) {
601 sig = sig_ffs(&ps->ps_sigcatch);
602 SIGDELSET(ps->ps_sigcatch, sig);
603 if (sigprop(sig) & SA_IGNORE) {
605 SIGADDSET(ps->ps_sigignore, sig);
606 SIGDELSET(p->p_siglist, sig);
608 * There is only one thread at this point.
610 SIGDELSET(td->td_siglist, sig);
612 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
615 * Reset stack state to the user stack.
616 * Clear set of signals caught on the signal stack.
618 td->td_sigstk.ss_flags = SS_DISABLE;
619 td->td_sigstk.ss_size = 0;
620 td->td_sigstk.ss_sp = 0;
621 td->td_pflags &= ~TDP_ALTSTACK;
623 * Reset no zombies if child dies flag as Solaris does.
625 ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN);
626 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
627 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL;
628 mtx_unlock(&ps->ps_mtx);
634 * Manipulate signal mask.
637 kern_sigprocmask(td, how, set, oset, old)
640 sigset_t *set, *oset;
645 PROC_LOCK(td->td_proc);
647 *oset = td->td_sigmask;
654 SIGSETOR(td->td_sigmask, *set);
657 SIGSETNAND(td->td_sigmask, *set);
663 SIGSETLO(td->td_sigmask, *set);
665 td->td_sigmask = *set;
673 PROC_UNLOCK(td->td_proc);
678 * sigprocmask() - MP SAFE
681 #ifndef _SYS_SYSPROTO_H_
682 struct sigprocmask_args {
690 register struct thread *td;
691 struct sigprocmask_args *uap;
694 sigset_t *setp, *osetp;
697 setp = (uap->set != NULL) ? &set : NULL;
698 osetp = (uap->oset != NULL) ? &oset : NULL;
700 error = copyin(uap->set, setp, sizeof(set));
704 error = kern_sigprocmask(td, uap->how, setp, osetp, 0);
705 if (osetp && !error) {
706 error = copyout(osetp, uap->oset, sizeof(oset));
711 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
713 * osigprocmask() - MP SAFE
715 #ifndef _SYS_SYSPROTO_H_
716 struct osigprocmask_args {
722 osigprocmask(td, uap)
723 register struct thread *td;
724 struct osigprocmask_args *uap;
729 OSIG2SIG(uap->mask, set);
730 error = kern_sigprocmask(td, uap->how, &set, &oset, 1);
731 SIG2OSIG(oset, td->td_retval[0]);
734 #endif /* COMPAT_43 */
736 #ifndef _SYS_SYSPROTO_H_
737 struct sigpending_args {
745 sigwait(struct thread *td, struct sigwait_args *uap)
751 error = copyin(uap->set, &set, sizeof(set));
755 error = kern_sigtimedwait(td, set, &info, NULL);
759 error = copyout(&info.si_signo, uap->sig, sizeof(info.si_signo));
760 /* Repost if we got an error. */
761 if (error && info.si_signo) {
762 PROC_LOCK(td->td_proc);
763 tdsignal(td, info.si_signo, SIGTARGET_TD);
764 PROC_UNLOCK(td->td_proc);
772 sigtimedwait(struct thread *td, struct sigtimedwait_args *uap)
775 struct timespec *timeout;
781 error = copyin(uap->timeout, &ts, sizeof(ts));
789 error = copyin(uap->set, &set, sizeof(set));
793 error = kern_sigtimedwait(td, set, &info, timeout);
798 error = copyout(&info, uap->info, sizeof(info));
799 /* Repost if we got an error. */
800 if (error && info.si_signo) {
801 PROC_LOCK(td->td_proc);
802 tdsignal(td, info.si_signo, SIGTARGET_TD);
803 PROC_UNLOCK(td->td_proc);
805 td->td_retval[0] = info.si_signo;
814 sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap)
820 error = copyin(uap->set, &set, sizeof(set));
824 error = kern_sigtimedwait(td, set, &info, NULL);
829 error = copyout(&info, uap->info, sizeof(info));
830 /* Repost if we got an error. */
831 if (error && info.si_signo) {
832 PROC_LOCK(td->td_proc);
833 tdsignal(td, info.si_signo, SIGTARGET_TD);
834 PROC_UNLOCK(td->td_proc);
836 td->td_retval[0] = info.si_signo;
842 kern_sigtimedwait(struct thread *td, sigset_t waitset, siginfo_t *info,
843 struct timespec *timeout)
846 sigset_t savedmask, sigset;
856 SIG_CANTMASK(waitset);
860 savedmask = td->td_sigmask;
863 for (i = 1; i <= _SIG_MAXSIG; ++i) {
864 if (!SIGISMEMBER(waitset, i))
866 if (SIGISMEMBER(td->td_siglist, i)) {
867 SIGFILLSET(td->td_sigmask);
868 SIG_CANTMASK(td->td_sigmask);
869 SIGDELSET(td->td_sigmask, i);
870 mtx_lock(&ps->ps_mtx);
873 mtx_unlock(&ps->ps_mtx);
874 } else if (SIGISMEMBER(p->p_siglist, i)) {
875 if (p->p_flag & P_SA) {
876 p->p_flag |= P_SIGEVENT;
877 wakeup(&p->p_siglist);
879 SIGDELSET(p->p_siglist, i);
880 SIGADDSET(td->td_siglist, i);
881 SIGFILLSET(td->td_sigmask);
882 SIG_CANTMASK(td->td_sigmask);
883 SIGDELSET(td->td_sigmask, i);
884 mtx_lock(&ps->ps_mtx);
887 mtx_unlock(&ps->ps_mtx);
890 td->td_sigmask = savedmask;
898 td->td_sigmask = savedmask;
900 sigset = td->td_siglist;
901 SIGSETOR(sigset, p->p_siglist);
902 SIGSETAND(sigset, waitset);
903 if (!SIGISEMPTY(sigset))
907 * POSIX says this must be checked after looking for pending
913 if (timeout->tv_nsec < 0 || timeout->tv_nsec > 1000000000) {
917 if (timeout->tv_sec == 0 && timeout->tv_nsec == 0) {
921 TIMESPEC_TO_TIMEVAL(&tv, timeout);
926 td->td_waitset = &waitset;
927 error = msleep(ps, &p->p_mtx, PPAUSE|PCATCH, "sigwait", hz);
928 td->td_waitset = NULL;
929 if (error == 0) /* surplus wakeup ? */
938 mtx_lock(&ps->ps_mtx);
939 action = ps->ps_sigact[_SIG_IDX(sig)];
940 mtx_unlock(&ps->ps_mtx);
942 if (KTRPOINT(td, KTR_PSIG))
943 ktrpsig(sig, action, &td->td_sigmask, 0);
945 _STOPEVENT(p, S_SIG, sig);
947 SIGDELSET(td->td_siglist, sig);
948 info->si_signo = sig;
961 struct sigpending_args *uap;
963 struct proc *p = td->td_proc;
967 siglist = p->p_siglist;
968 SIGSETOR(siglist, td->td_siglist);
970 return (copyout(&siglist, uap->set, sizeof(sigset_t)));
973 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
974 #ifndef _SYS_SYSPROTO_H_
975 struct osigpending_args {
985 struct osigpending_args *uap;
987 struct proc *p = td->td_proc;
991 siglist = p->p_siglist;
992 SIGSETOR(siglist, td->td_siglist);
994 SIG2OSIG(siglist, td->td_retval[0]);
997 #endif /* COMPAT_43 */
999 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
1001 * Generalized interface signal handler, 4.3-compatible.
1003 #ifndef _SYS_SYSPROTO_H_
1004 struct osigvec_args {
1017 register struct osigvec_args *uap;
1020 struct sigaction nsa, osa;
1021 register struct sigaction *nsap, *osap;
1024 if (uap->signum <= 0 || uap->signum >= ONSIG)
1026 nsap = (uap->nsv != NULL) ? &nsa : NULL;
1027 osap = (uap->osv != NULL) ? &osa : NULL;
1029 error = copyin(uap->nsv, &vec, sizeof(vec));
1032 nsap->sa_handler = vec.sv_handler;
1033 OSIG2SIG(vec.sv_mask, nsap->sa_mask);
1034 nsap->sa_flags = vec.sv_flags;
1035 nsap->sa_flags ^= SA_RESTART; /* opposite of SV_INTERRUPT */
1037 nsap->sa_flags |= SA_USERTRAMP;
1040 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
1041 if (osap && !error) {
1042 vec.sv_handler = osap->sa_handler;
1043 SIG2OSIG(osap->sa_mask, vec.sv_mask);
1044 vec.sv_flags = osap->sa_flags;
1045 vec.sv_flags &= ~SA_NOCLDWAIT;
1046 vec.sv_flags ^= SA_RESTART;
1048 vec.sv_flags &= ~SA_NOCLDSTOP;
1050 error = copyout(&vec, uap->osv, sizeof(vec));
1055 #ifndef _SYS_SYSPROTO_H_
1056 struct osigblock_args {
1065 register struct thread *td;
1066 struct osigblock_args *uap;
1068 struct proc *p = td->td_proc;
1071 OSIG2SIG(uap->mask, set);
1074 SIG2OSIG(td->td_sigmask, td->td_retval[0]);
1075 SIGSETOR(td->td_sigmask, set);
1080 #ifndef _SYS_SYSPROTO_H_
1081 struct osigsetmask_args {
1089 osigsetmask(td, uap)
1091 struct osigsetmask_args *uap;
1093 struct proc *p = td->td_proc;
1096 OSIG2SIG(uap->mask, set);
1099 SIG2OSIG(td->td_sigmask, td->td_retval[0]);
1100 SIGSETLO(td->td_sigmask, set);
1105 #endif /* COMPAT_43 || COMPAT_SUNOS */
1108 * Suspend process until signal, providing mask to be set
1110 ***** XXXKSE this doesn't make sense under KSE.
1111 ***** Do we suspend the thread or all threads in the process?
1112 ***** How do we suspend threads running NOW on another processor?
1114 #ifndef _SYS_SYSPROTO_H_
1115 struct sigsuspend_args {
1116 const sigset_t *sigmask;
1126 struct sigsuspend_args *uap;
1131 error = copyin(uap->sigmask, &mask, sizeof(mask));
1134 return (kern_sigsuspend(td, mask));
1138 kern_sigsuspend(struct thread *td, sigset_t mask)
1140 struct proc *p = td->td_proc;
1143 * When returning from sigsuspend, we want
1144 * the old mask to be restored after the
1145 * signal handler has finished. Thus, we
1146 * save it here and mark the sigacts structure
1150 td->td_oldsigmask = td->td_sigmask;
1151 td->td_pflags |= TDP_OLDMASK;
1153 td->td_sigmask = mask;
1155 while (msleep(p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause", 0) == 0)
1158 /* always return EINTR rather than ERESTART... */
1162 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1164 * Compatibility sigsuspend call for old binaries. Note nonstandard calling
1165 * convention: libc stub passes mask, not pointer, to save a copyin.
1167 #ifndef _SYS_SYSPROTO_H_
1168 struct osigsuspend_args {
1177 osigsuspend(td, uap)
1179 struct osigsuspend_args *uap;
1181 struct proc *p = td->td_proc;
1185 td->td_oldsigmask = td->td_sigmask;
1186 td->td_pflags |= TDP_OLDMASK;
1187 OSIG2SIG(uap->mask, mask);
1189 SIGSETLO(td->td_sigmask, mask);
1191 while (msleep(p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "opause", 0) == 0)
1194 /* always return EINTR rather than ERESTART... */
1197 #endif /* COMPAT_43 */
1199 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
1200 #ifndef _SYS_SYSPROTO_H_
1201 struct osigstack_args {
1202 struct sigstack *nss;
1203 struct sigstack *oss;
1213 register struct osigstack_args *uap;
1215 struct sigstack nss, oss;
1218 if (uap->nss != NULL) {
1219 error = copyin(uap->nss, &nss, sizeof(nss));
1223 oss.ss_sp = td->td_sigstk.ss_sp;
1224 oss.ss_onstack = sigonstack(cpu_getstack(td));
1225 if (uap->nss != NULL) {
1226 td->td_sigstk.ss_sp = nss.ss_sp;
1227 td->td_sigstk.ss_size = 0;
1228 td->td_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK;
1229 td->td_pflags |= TDP_ALTSTACK;
1231 if (uap->oss != NULL)
1232 error = copyout(&oss, uap->oss, sizeof(oss));
1236 #endif /* COMPAT_43 || COMPAT_SUNOS */
1238 #ifndef _SYS_SYSPROTO_H_
1239 struct sigaltstack_args {
1249 sigaltstack(td, uap)
1251 register struct sigaltstack_args *uap;
1256 if (uap->ss != NULL) {
1257 error = copyin(uap->ss, &ss, sizeof(ss));
1261 error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL,
1262 (uap->oss != NULL) ? &oss : NULL);
1265 if (uap->oss != NULL)
1266 error = copyout(&oss, uap->oss, sizeof(stack_t));
1271 kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss)
1273 struct proc *p = td->td_proc;
1276 oonstack = sigonstack(cpu_getstack(td));
1279 *oss = td->td_sigstk;
1280 oss->ss_flags = (td->td_pflags & TDP_ALTSTACK)
1281 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
1287 if ((ss->ss_flags & ~SS_DISABLE) != 0)
1289 if (!(ss->ss_flags & SS_DISABLE)) {
1290 if (ss->ss_size < p->p_sysent->sv_minsigstksz) {
1293 td->td_sigstk = *ss;
1294 td->td_pflags |= TDP_ALTSTACK;
1296 td->td_pflags &= ~TDP_ALTSTACK;
1303 * Common code for kill process group/broadcast kill.
1304 * cp is calling process.
1307 killpg1(td, sig, pgid, all)
1308 register struct thread *td;
1311 register struct proc *p;
1319 sx_slock(&allproc_lock);
1320 LIST_FOREACH(p, &allproc, p_list) {
1322 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1327 if (p_cansignal(td, p, sig) == 0) {
1334 sx_sunlock(&allproc_lock);
1336 sx_slock(&proctree_lock);
1339 * zero pgid means send to my process group.
1341 pgrp = td->td_proc->p_pgrp;
1344 pgrp = pgfind(pgid);
1346 sx_sunlock(&proctree_lock);
1350 sx_sunlock(&proctree_lock);
1351 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1353 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM) {
1357 if (p->p_state == PRS_ZOMBIE) {
1361 if (p_cansignal(td, p, sig) == 0) {
1370 return (nfound ? 0 : ESRCH);
1373 #ifndef _SYS_SYSPROTO_H_
1385 register struct thread *td;
1386 register struct kill_args *uap;
1388 register struct proc *p;
1391 if ((u_int)uap->signum > _SIG_MAXSIG)
1395 /* kill single process */
1396 if ((p = pfind(uap->pid)) == NULL)
1398 error = p_cansignal(td, p, uap->signum);
1399 if (error == 0 && uap->signum)
1400 psignal(p, uap->signum);
1405 case -1: /* broadcast signal */
1406 return (killpg1(td, uap->signum, 0, 1));
1407 case 0: /* signal own process group */
1408 return (killpg1(td, uap->signum, 0, 0));
1409 default: /* negative explicit process group */
1410 return (killpg1(td, uap->signum, -uap->pid, 0));
1415 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
1416 #ifndef _SYS_SYSPROTO_H_
1417 struct okillpg_args {
1429 register struct okillpg_args *uap;
1432 if ((u_int)uap->signum > _SIG_MAXSIG)
1434 return (killpg1(td, uap->signum, uap->pgid, 0));
1436 #endif /* COMPAT_43 || COMPAT_SUNOS */
1439 * Send a signal to a process group.
1448 sx_slock(&proctree_lock);
1449 pgrp = pgfind(pgid);
1450 sx_sunlock(&proctree_lock);
1452 pgsignal(pgrp, sig, 0);
1459 * Send a signal to a process group. If checktty is 1,
1460 * limit to members which have a controlling terminal.
1463 pgsignal(pgrp, sig, checkctty)
1467 register struct proc *p;
1470 PGRP_LOCK_ASSERT(pgrp, MA_OWNED);
1471 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1473 if (checkctty == 0 || p->p_flag & P_CONTROLT)
1481 * Send a signal caused by a trap to the current thread.
1482 * If it will be caught immediately, deliver it with correct code.
1483 * Otherwise, post it normally.
1488 trapsignal(struct thread *td, int sig, u_long code)
1496 if (td->td_flags & TDF_SA) {
1497 if (td->td_mailbox == NULL)
1498 thread_user_enter(p, td);
1500 if (td->td_mailbox) {
1501 SIGDELSET(td->td_sigmask, sig);
1502 mtx_lock_spin(&sched_lock);
1504 * Force scheduling an upcall, so UTS has chance to
1505 * process the signal before thread runs again in
1509 td->td_upcall->ku_flags |= KUF_DOUPCALL;
1510 mtx_unlock_spin(&sched_lock);
1512 /* UTS caused a sync signal */
1513 p->p_code = code; /* XXX for core dump/debugger */
1514 p->p_sig = sig; /* XXX to verify code */
1521 mtx_lock(&ps->ps_mtx);
1522 if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) &&
1523 !SIGISMEMBER(td->td_sigmask, sig)) {
1524 p->p_stats->p_ru.ru_nsignals++;
1526 if (KTRPOINT(curthread, KTR_PSIG))
1527 ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
1528 &td->td_sigmask, code);
1530 if (!(td->td_flags & TDF_SA))
1531 (*p->p_sysent->sv_sendsig)(
1532 ps->ps_sigact[_SIG_IDX(sig)], sig,
1533 &td->td_sigmask, code);
1535 cpu_thread_siginfo(sig, code, &siginfo);
1536 mtx_unlock(&ps->ps_mtx);
1538 error = copyout(&siginfo, &td->td_mailbox->tm_syncsig,
1541 /* UTS memory corrupted */
1543 sigexit(td, SIGILL);
1544 SIGADDSET(td->td_sigmask, sig);
1545 mtx_lock(&ps->ps_mtx);
1547 SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
1548 if (!SIGISMEMBER(ps->ps_signodefer, sig))
1549 SIGADDSET(td->td_sigmask, sig);
1550 if (SIGISMEMBER(ps->ps_sigreset, sig)) {
1552 * See kern_sigaction() for origin of this code.
1554 SIGDELSET(ps->ps_sigcatch, sig);
1555 if (sig != SIGCONT &&
1556 sigprop(sig) & SA_IGNORE)
1557 SIGADDSET(ps->ps_sigignore, sig);
1558 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
1560 mtx_unlock(&ps->ps_mtx);
1562 mtx_unlock(&ps->ps_mtx);
1563 p->p_code = code; /* XXX for core dump/debugger */
1564 p->p_sig = sig; /* XXX to verify code */
1565 tdsignal(td, sig, SIGTARGET_TD);
1570 static struct thread *
1571 sigtd(struct proc *p, int sig, int prop)
1573 struct thread *td, *signal_td;
1575 PROC_LOCK_ASSERT(p, MA_OWNED);
1578 * First find a thread in sigwait state and signal belongs to
1579 * its wait set. POSIX's arguments is that speed of delivering signal
1580 * to sigwait thread is faster than delivering signal to user stack.
1581 * If we can not find sigwait thread, then find the first thread in
1582 * the proc that doesn't have this signal masked, an exception is
1583 * if current thread is sending signal to its process, and it does not
1584 * mask the signal, it should get the signal, this is another fast
1585 * way to deliver signal.
1588 mtx_lock_spin(&sched_lock);
1589 FOREACH_THREAD_IN_PROC(p, td) {
1590 if (td->td_waitset != NULL &&
1591 SIGISMEMBER(*(td->td_waitset), sig)) {
1592 mtx_unlock_spin(&sched_lock);
1595 if (!SIGISMEMBER(td->td_sigmask, sig)) {
1596 if (td == curthread)
1597 signal_td = curthread;
1598 else if (signal_td == NULL)
1602 if (signal_td == NULL)
1603 signal_td = FIRST_THREAD_IN_PROC(p);
1604 mtx_unlock_spin(&sched_lock);
1609 * Send the signal to the process. If the signal has an action, the action
1610 * is usually performed by the target process rather than the caller; we add
1611 * the signal to the set of pending signals for the process.
1614 * o When a stop signal is sent to a sleeping process that takes the
1615 * default action, the process is stopped without awakening it.
1616 * o SIGCONT restarts stopped processes (or puts them back to sleep)
1617 * regardless of the signal action (eg, blocked or ignored).
1619 * Other ignored signals are discarded immediately.
1624 psignal(struct proc *p, int sig)
1629 if (!_SIG_VALID(sig))
1630 panic("psignal(): invalid signal");
1632 PROC_LOCK_ASSERT(p, MA_OWNED);
1633 prop = sigprop(sig);
1636 * Find a thread to deliver the signal to.
1638 td = sigtd(p, sig, prop);
1640 tdsignal(td, sig, SIGTARGET_P);
1647 tdsignal(struct thread *td, int sig, sigtarget_t target)
1650 struct proc *p = td->td_proc;
1652 if (p->p_flag & P_SA)
1653 saved = p->p_siglist;
1654 do_tdsignal(td, sig, target);
1655 if ((p->p_flag & P_SA) && !(p->p_flag & P_SIGEVENT)) {
1656 if (SIGSETEQ(saved, p->p_siglist))
1659 /* pending set changed */
1660 p->p_flag |= P_SIGEVENT;
1661 wakeup(&p->p_siglist);
1667 do_tdsignal(struct thread *td, int sig, sigtarget_t target)
1670 register sig_t action;
1676 if (!_SIG_VALID(sig))
1677 panic("do_tdsignal(): invalid signal");
1682 PROC_LOCK_ASSERT(p, MA_OWNED);
1683 KNOTE(&p->p_klist, NOTE_SIGNAL | sig);
1685 prop = sigprop(sig);
1688 * If the signal is blocked and not destined for this thread, then
1689 * assign it to the process so that we can find it later in the first
1690 * thread that unblocks it. Otherwise, assign it to this thread now.
1692 if (target == SIGTARGET_TD) {
1693 siglist = &td->td_siglist;
1695 if (!SIGISMEMBER(td->td_sigmask, sig))
1696 siglist = &td->td_siglist;
1697 else if (td->td_waitset != NULL &&
1698 SIGISMEMBER(*(td->td_waitset), sig))
1699 siglist = &td->td_siglist;
1701 siglist = &p->p_siglist;
1705 * If proc is traced, always give parent a chance;
1706 * if signal event is tracked by procfs, give *that*
1707 * a chance, as well.
1709 if ((p->p_flag & P_TRACED) || (p->p_stops & S_SIG)) {
1713 * If the signal is being ignored,
1714 * then we forget about it immediately.
1715 * (Note: we don't set SIGCONT in ps_sigignore,
1716 * and if it is set to SIG_IGN,
1717 * action will be SIG_DFL here.)
1719 mtx_lock(&ps->ps_mtx);
1720 if (SIGISMEMBER(ps->ps_sigignore, sig) ||
1721 (p->p_flag & P_WEXIT)) {
1722 mtx_unlock(&ps->ps_mtx);
1725 if (((td->td_waitset == NULL) &&
1726 SIGISMEMBER(td->td_sigmask, sig)) ||
1727 ((td->td_waitset != NULL) &&
1728 SIGISMEMBER(td->td_sigmask, sig) &&
1729 !SIGISMEMBER(*(td->td_waitset), sig)))
1731 else if (SIGISMEMBER(ps->ps_sigcatch, sig))
1735 mtx_unlock(&ps->ps_mtx);
1738 if (prop & SA_CONT) {
1739 SIG_STOPSIGMASK(p->p_siglist);
1741 * XXX Should investigate leaving STOP and CONT sigs only in
1742 * the proc's siglist.
1744 mtx_lock_spin(&sched_lock);
1745 FOREACH_THREAD_IN_PROC(p, td0)
1746 SIG_STOPSIGMASK(td0->td_siglist);
1747 mtx_unlock_spin(&sched_lock);
1750 if (prop & SA_STOP) {
1752 * If sending a tty stop signal to a member of an orphaned
1753 * process group, discard the signal here if the action
1754 * is default; don't stop the process below if sleeping,
1755 * and don't clear any pending SIGCONT.
1757 if ((prop & SA_TTYSTOP) &&
1758 (p->p_pgrp->pg_jobc == 0) &&
1759 (action == SIG_DFL))
1761 SIG_CONTSIGMASK(p->p_siglist);
1762 mtx_lock_spin(&sched_lock);
1763 FOREACH_THREAD_IN_PROC(p, td0)
1764 SIG_CONTSIGMASK(td0->td_siglist);
1765 mtx_unlock_spin(&sched_lock);
1766 p->p_flag &= ~P_CONTINUED;
1769 SIGADDSET(*siglist, sig);
1770 signotify(td); /* uses schedlock */
1771 if (siglist == &td->td_siglist && (td->td_waitset != NULL) &&
1772 action != SIG_HOLD) {
1773 td->td_waitset = NULL;
1777 * Defer further processing for signals which are held,
1778 * except that stopped processes must be continued by SIGCONT.
1780 if (action == SIG_HOLD &&
1781 !((prop & SA_CONT) && (p->p_flag & P_STOPPED_SIG)))
1784 * Some signals have a process-wide effect and a per-thread
1785 * component. Most processing occurs when the process next
1786 * tries to cross the user boundary, however there are some
1787 * times when processing needs to be done immediatly, such as
1788 * waking up threads so that they can cross the user boundary.
1789 * We try do the per-process part here.
1791 if (P_SHOULDSTOP(p)) {
1793 * The process is in stopped mode. All the threads should be
1794 * either winding down or already on the suspended queue.
1796 if (p->p_flag & P_TRACED) {
1798 * The traced process is already stopped,
1799 * so no further action is necessary.
1800 * No signal can restart us.
1805 if (sig == SIGKILL) {
1807 * SIGKILL sets process running.
1808 * It will die elsewhere.
1809 * All threads must be restarted.
1811 p->p_flag &= ~P_STOPPED;
1815 if (prop & SA_CONT) {
1817 * If SIGCONT is default (or ignored), we continue the
1818 * process but don't leave the signal in siglist as
1819 * it has no further action. If SIGCONT is held, we
1820 * continue the process and leave the signal in
1821 * siglist. If the process catches SIGCONT, let it
1822 * handle the signal itself. If it isn't waiting on
1823 * an event, it goes back to run state.
1824 * Otherwise, process goes back to sleep state.
1826 p->p_flag &= ~P_STOPPED_SIG;
1827 p->p_flag |= P_CONTINUED;
1828 if (action == SIG_DFL) {
1829 SIGDELSET(*siglist, sig);
1830 } else if (action == SIG_CATCH) {
1832 * The process wants to catch it so it needs
1833 * to run at least one thread, but which one?
1834 * It would seem that the answer would be to
1835 * run an upcall in the next KSE to run, and
1836 * deliver the signal that way. In a NON KSE
1837 * process, we need to make sure that the
1838 * single thread is runnable asap.
1839 * XXXKSE for now however, make them all run.
1844 * The signal is not ignored or caught.
1846 mtx_lock_spin(&sched_lock);
1847 thread_unsuspend(p);
1848 mtx_unlock_spin(&sched_lock);
1852 if (prop & SA_STOP) {
1854 * Already stopped, don't need to stop again
1855 * (If we did the shell could get confused).
1856 * Just make sure the signal STOP bit set.
1858 p->p_flag |= P_STOPPED_SIG;
1859 SIGDELSET(*siglist, sig);
1864 * All other kinds of signals:
1865 * If a thread is sleeping interruptibly, simulate a
1866 * wakeup so that when it is continued it will be made
1867 * runnable and can look at the signal. However, don't make
1868 * the PROCESS runnable, leave it stopped.
1869 * It may run a bit until it hits a thread_suspend_check().
1871 mtx_lock_spin(&sched_lock);
1872 if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR)) {
1873 if (td->td_flags & TDF_CVWAITQ)
1878 mtx_unlock_spin(&sched_lock);
1881 * XXXKSE What about threads that are waiting on mutexes?
1882 * Shouldn't they abort too?
1883 * No, hopefully mutexes are short lived.. They'll
1884 * eventually hit thread_suspend_check().
1886 } else if (p->p_state == PRS_NORMAL) {
1887 if ((p->p_flag & P_TRACED) || (action != SIG_DFL) ||
1888 !(prop & SA_STOP)) {
1889 mtx_lock_spin(&sched_lock);
1890 tdsigwakeup(td, sig, action);
1891 mtx_unlock_spin(&sched_lock);
1894 if (prop & SA_STOP) {
1895 if (p->p_flag & P_PPWAIT)
1897 p->p_flag |= P_STOPPED_SIG;
1899 mtx_lock_spin(&sched_lock);
1900 FOREACH_THREAD_IN_PROC(p, td0) {
1901 if (TD_IS_SLEEPING(td0) &&
1902 (td0->td_flags & TDF_SINTR) &&
1903 !TD_IS_SUSPENDED(td0)) {
1904 thread_suspend_one(td0);
1905 } else if (td != td0) {
1906 td0->td_flags |= TDF_ASTPENDING;
1910 if (p->p_numthreads == p->p_suspcount) {
1911 SIGDELSET(p->p_siglist, p->p_xstat);
1912 FOREACH_THREAD_IN_PROC(p, td0)
1913 SIGDELSET(td0->td_siglist, p->p_xstat);
1915 mtx_unlock_spin(&sched_lock);
1922 /* Not in "NORMAL" state. discard the signal. */
1923 SIGDELSET(*siglist, sig);
1928 * The process is not stopped so we need to apply the signal to all the
1933 mtx_lock_spin(&sched_lock);
1934 tdsigwakeup(td, sig, action);
1935 thread_unsuspend(p);
1936 mtx_unlock_spin(&sched_lock);
1938 /* If we jump here, sched_lock should not be owned. */
1939 mtx_assert(&sched_lock, MA_NOTOWNED);
1943 * The force of a signal has been directed against a single
1944 * thread. We need to see what we can do about knocking it
1945 * out of any sleep it may be in etc.
1948 tdsigwakeup(struct thread *td, int sig, sig_t action)
1950 struct proc *p = td->td_proc;
1953 PROC_LOCK_ASSERT(p, MA_OWNED);
1954 mtx_assert(&sched_lock, MA_OWNED);
1955 prop = sigprop(sig);
1957 * Bring the priority of a thread up if we want it to get
1958 * killed in this lifetime.
1960 if ((action == SIG_DFL) && (prop & SA_KILL)) {
1961 if (td->td_priority > PUSER) {
1962 td->td_priority = PUSER;
1965 if (TD_IS_SLEEPING(td)) {
1967 * If thread is sleeping uninterruptibly
1968 * we can't interrupt the sleep... the signal will
1969 * be noticed when the process returns through
1970 * trap() or syscall().
1972 if ((td->td_flags & TDF_SINTR) == 0) {
1976 * Process is sleeping and traced. Make it runnable
1977 * so it can discover the signal in issignal() and stop
1980 if (p->p_flag & P_TRACED) {
1981 p->p_flag &= ~P_STOPPED_TRACE;
1985 * If SIGCONT is default (or ignored) and process is
1986 * asleep, we are finished; the process should not
1989 if ((prop & SA_CONT) && action == SIG_DFL) {
1990 SIGDELSET(p->p_siglist, sig);
1992 * It may be on either list in this state.
1993 * Remove from both for now.
1995 SIGDELSET(td->td_siglist, sig);
2000 * Raise priority to at least PUSER.
2002 if (td->td_priority > PUSER) {
2003 td->td_priority = PUSER;
2006 if (td->td_flags & TDF_CVWAITQ)
2014 * Other states do nothing with the signal immediatly,
2015 * other than kicking ourselves if we are running.
2016 * It will either never be noticed, or noticed very soon.
2018 if (TD_IS_RUNNING(td) && td != curthread) {
2026 ptracestop(struct thread *td, int sig)
2028 struct proc *p = td->td_proc;
2030 PROC_LOCK_ASSERT(p, MA_OWNED);
2031 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
2032 &p->p_mtx.mtx_object, "Stopping for traced signal");
2035 PROC_LOCK(p->p_pptr);
2036 psignal(p->p_pptr, SIGCHLD);
2037 PROC_UNLOCK(p->p_pptr);
2038 mtx_lock_spin(&sched_lock);
2039 stop(p); /* uses schedlock too eventually */
2040 thread_suspend_one(td);
2043 mi_switch(SW_INVOL);
2044 mtx_unlock_spin(&sched_lock);
2049 * If the current process has received a signal (should be caught or cause
2050 * termination, should interrupt current syscall), return the signal number.
2051 * Stop signals with default action are processed immediately, then cleared;
2052 * they aren't returned. This is checked after each entry to the system for
2053 * a syscall or trap (though this can usually be done without calling issignal
2054 * by checking the pending signal masks in cursig.) The normal call
2057 * while (sig = cursig(curthread))
2066 sigset_t sigpending;
2072 mtx_assert(&ps->ps_mtx, MA_OWNED);
2073 PROC_LOCK_ASSERT(p, MA_OWNED);
2075 int traced = (p->p_flag & P_TRACED) || (p->p_stops & S_SIG);
2077 sigpending = td->td_siglist;
2078 SIGSETNAND(sigpending, td->td_sigmask);
2080 if (p->p_flag & P_PPWAIT)
2081 SIG_STOPSIGMASK(sigpending);
2082 if (SIGISEMPTY(sigpending)) /* no signal to send */
2084 sig = sig_ffs(&sigpending);
2086 if (p->p_stops & S_SIG) {
2087 mtx_unlock(&ps->ps_mtx);
2088 stopevent(p, S_SIG, sig);
2089 mtx_lock(&ps->ps_mtx);
2093 * We should see pending but ignored signals
2094 * only if P_TRACED was on when they were posted.
2096 if (SIGISMEMBER(ps->ps_sigignore, sig) && (traced == 0)) {
2097 SIGDELSET(td->td_siglist, sig);
2100 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
2102 * If traced, always stop.
2104 mtx_unlock(&ps->ps_mtx);
2105 ptracestop(td, sig);
2107 mtx_lock(&ps->ps_mtx);
2110 * If parent wants us to take the signal,
2111 * then it will leave it in p->p_xstat;
2112 * otherwise we just look for signals again.
2114 SIGDELSET(td->td_siglist, sig); /* clear old signal */
2120 * If the traced bit got turned off, go back up
2121 * to the top to rescan signals. This ensures
2122 * that p_sig* and p_sigact are consistent.
2124 if ((p->p_flag & P_TRACED) == 0)
2128 * Put the new signal into td_siglist. If the
2129 * signal is being masked, look for other signals.
2131 SIGADDSET(td->td_siglist, sig);
2132 if (SIGISMEMBER(td->td_sigmask, sig))
2137 prop = sigprop(sig);
2140 * Decide whether the signal should be returned.
2141 * Return the signal's number, or fall through
2142 * to clear it from the pending mask.
2144 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) {
2146 case (intptr_t)SIG_DFL:
2148 * Don't take default actions on system processes.
2150 if (p->p_pid <= 1) {
2153 * Are you sure you want to ignore SIGSEGV
2156 printf("Process (pid %lu) got signal %d\n",
2157 (u_long)p->p_pid, sig);
2159 break; /* == ignore */
2162 * If there is a pending stop signal to process
2163 * with default action, stop here,
2164 * then clear the signal. However,
2165 * if process is member of an orphaned
2166 * process group, ignore tty stop signals.
2168 if (prop & SA_STOP) {
2169 if (p->p_flag & P_TRACED ||
2170 (p->p_pgrp->pg_jobc == 0 &&
2172 break; /* == ignore */
2173 mtx_unlock(&ps->ps_mtx);
2174 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
2175 &p->p_mtx.mtx_object, "Catching SIGSTOP");
2176 p->p_flag |= P_STOPPED_SIG;
2178 mtx_lock_spin(&sched_lock);
2179 FOREACH_THREAD_IN_PROC(p, td0) {
2180 if (TD_IS_SLEEPING(td0) &&
2181 (td0->td_flags & TDF_SINTR) &&
2182 !TD_IS_SUSPENDED(td0)) {
2183 thread_suspend_one(td0);
2184 } else if (td != td0) {
2185 td0->td_flags |= TDF_ASTPENDING;
2189 thread_suspend_one(td);
2192 mi_switch(SW_INVOL);
2193 mtx_unlock_spin(&sched_lock);
2196 mtx_lock(&ps->ps_mtx);
2198 } else if (prop & SA_IGNORE) {
2200 * Except for SIGCONT, shouldn't get here.
2201 * Default action is to ignore; drop it.
2203 break; /* == ignore */
2208 case (intptr_t)SIG_IGN:
2210 * Masking above should prevent us ever trying
2211 * to take action on an ignored signal other
2212 * than SIGCONT, unless process is traced.
2214 if ((prop & SA_CONT) == 0 &&
2215 (p->p_flag & P_TRACED) == 0)
2216 printf("issignal\n");
2217 break; /* == ignore */
2221 * This signal has an action, let
2222 * postsig() process it.
2226 SIGDELSET(td->td_siglist, sig); /* take the signal! */
2232 * Put the argument process into the stopped state and notify the parent
2233 * via wakeup. Signals are handled elsewhere. The process must not be
2234 * on the run queue. Must be called with the proc p locked and the scheduler
2238 stop(struct proc *p)
2241 PROC_LOCK_ASSERT(p, MA_OWNED);
2242 p->p_flag |= P_STOPPED_SIG;
2243 p->p_flag &= ~P_WAITED;
2251 thread_stopped(struct proc *p)
2253 struct proc *p1 = curthread->td_proc;
2257 PROC_LOCK_ASSERT(p, MA_OWNED);
2258 mtx_assert(&sched_lock, MA_OWNED);
2262 if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) {
2263 mtx_unlock_spin(&sched_lock);
2265 PROC_LOCK(p->p_pptr);
2266 ps = p->p_pptr->p_sigacts;
2267 mtx_lock(&ps->ps_mtx);
2268 if ((ps->ps_flag & PS_NOCLDSTOP) == 0) {
2269 mtx_unlock(&ps->ps_mtx);
2270 psignal(p->p_pptr, SIGCHLD);
2272 mtx_unlock(&ps->ps_mtx);
2273 PROC_UNLOCK(p->p_pptr);
2274 mtx_lock_spin(&sched_lock);
2279 * Take the action for the specified signal
2280 * from the current set of pending signals.
2286 struct thread *td = curthread;
2287 register struct proc *p = td->td_proc;
2290 sigset_t returnmask;
2293 KASSERT(sig != 0, ("postsig"));
2295 PROC_LOCK_ASSERT(p, MA_OWNED);
2297 mtx_assert(&ps->ps_mtx, MA_OWNED);
2298 SIGDELSET(td->td_siglist, sig);
2299 action = ps->ps_sigact[_SIG_IDX(sig)];
2301 if (KTRPOINT(td, KTR_PSIG))
2302 ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ?
2303 &td->td_oldsigmask : &td->td_sigmask, 0);
2305 if (p->p_stops & S_SIG) {
2306 mtx_unlock(&ps->ps_mtx);
2307 stopevent(p, S_SIG, sig);
2308 mtx_lock(&ps->ps_mtx);
2311 if (!(td->td_flags & TDF_SA && td->td_mailbox) &&
2312 action == SIG_DFL) {
2314 * Default action, where the default is to kill
2315 * the process. (Other cases were ignored above.)
2317 mtx_unlock(&ps->ps_mtx);
2321 if (td->td_flags & TDF_SA && td->td_mailbox) {
2322 if (sig == SIGKILL) {
2323 mtx_unlock(&ps->ps_mtx);
2329 * If we get here, the signal must be caught.
2331 KASSERT(action != SIG_IGN && !SIGISMEMBER(td->td_sigmask, sig),
2332 ("postsig action"));
2334 * Set the new mask value and also defer further
2335 * occurrences of this signal.
2337 * Special case: user has done a sigsuspend. Here the
2338 * current mask is not of interest, but rather the
2339 * mask from before the sigsuspend is what we want
2340 * restored after the signal processing is completed.
2342 if (td->td_pflags & TDP_OLDMASK) {
2343 returnmask = td->td_oldsigmask;
2344 td->td_pflags &= ~TDP_OLDMASK;
2346 returnmask = td->td_sigmask;
2348 SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
2349 if (!SIGISMEMBER(ps->ps_signodefer, sig))
2350 SIGADDSET(td->td_sigmask, sig);
2352 if (SIGISMEMBER(ps->ps_sigreset, sig)) {
2354 * See kern_sigaction() for origin of this code.
2356 SIGDELSET(ps->ps_sigcatch, sig);
2357 if (sig != SIGCONT &&
2358 sigprop(sig) & SA_IGNORE)
2359 SIGADDSET(ps->ps_sigignore, sig);
2360 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
2362 p->p_stats->p_ru.ru_nsignals++;
2363 if (p->p_sig != sig) {
2370 if (td->td_flags & TDF_SA && td->td_mailbox)
2371 thread_signal_add(curthread, sig);
2373 (*p->p_sysent->sv_sendsig)(action, sig,
2379 * Kill the current process for stated reason.
2387 PROC_LOCK_ASSERT(p, MA_OWNED);
2388 CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)",
2389 p, p->p_pid, p->p_comm);
2390 log(LOG_ERR, "pid %d (%s), uid %d, was killed: %s\n", p->p_pid, p->p_comm,
2391 p->p_ucred ? p->p_ucred->cr_uid : -1, why);
2392 psignal(p, SIGKILL);
2396 * Force the current process to exit with the specified signal, dumping core
2397 * if appropriate. We bypass the normal tests for masked and caught signals,
2398 * allowing unrecoverable failures to terminate the process without changing
2399 * signal state. Mark the accounting record with the signal termination.
2400 * If dumping core, save the signal number for the debugger. Calls exit and
2410 struct proc *p = td->td_proc;
2412 PROC_LOCK_ASSERT(p, MA_OWNED);
2413 p->p_acflag |= AXSIG;
2414 if (sigprop(sig) & SA_CORE) {
2417 * Log signals which would cause core dumps
2418 * (Log as LOG_INFO to appease those who don't want
2420 * XXX : Todo, as well as euid, write out ruid too
2423 if (!mtx_owned(&Giant))
2425 if (coredump(td) == 0)
2427 if (kern_logsigexit)
2429 "pid %d (%s), uid %d: exited on signal %d%s\n",
2430 p->p_pid, p->p_comm,
2431 td->td_ucred ? td->td_ucred->cr_uid : -1,
2433 sig & WCOREFLAG ? " (core dumped)" : "");
2436 if (!mtx_owned(&Giant))
2439 exit1(td, W_EXITCODE(0, sig));
2443 static char corefilename[MAXPATHLEN+1] = {"%N.core"};
2444 SYSCTL_STRING(_kern, OID_AUTO, corefile, CTLFLAG_RW, corefilename,
2445 sizeof(corefilename), "process corefile name format string");
2448 * expand_name(name, uid, pid)
2449 * Expand the name described in corefilename, using name, uid, and pid.
2450 * corefilename is a printf-like string, with three format specifiers:
2451 * %N name of process ("name")
2452 * %P process id (pid)
2454 * For example, "%N.core" is the default; they can be disabled completely
2455 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
2456 * This is controlled by the sysctl variable kern.corefile (see above).
2460 expand_name(name, uid, pid)
2465 const char *format, *appendstr;
2467 char buf[11]; /* Buffer for pid/uid -- max 4B */
2470 format = corefilename;
2471 temp = malloc(MAXPATHLEN, M_TEMP, M_NOWAIT | M_ZERO);
2474 for (i = 0, n = 0; n < MAXPATHLEN && format[i]; i++) {
2475 switch (format[i]) {
2476 case '%': /* Format character */
2478 switch (format[i]) {
2482 case 'N': /* process name */
2485 case 'P': /* process id */
2486 sprintf(buf, "%u", pid);
2489 case 'U': /* user id */
2490 sprintf(buf, "%u", uid);
2496 "Unknown format character %c in `%s'\n",
2499 l = strlen(appendstr);
2500 if ((n + l) >= MAXPATHLEN)
2502 memcpy(temp + n, appendstr, l);
2506 temp[n++] = format[i];
2509 if (format[i] != '\0')
2513 log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too long\n",
2514 (long)pid, name, (u_long)uid);
2520 * Dump a process' core. The main routine does some
2521 * policy checking, and creates the name of the coredump;
2522 * then it passes on a vnode and a size limit to the process-specific
2523 * coredump routine if there is one; if there _is not_ one, it returns
2524 * ENOSYS; otherwise it returns the error from the process-specific routine.
2528 coredump(struct thread *td)
2530 struct proc *p = td->td_proc;
2531 register struct vnode *vp;
2532 register struct ucred *cred = td->td_ucred;
2534 struct nameidata nd;
2536 int error, error1, flags, locked;
2538 char *name; /* name of corefile */
2542 _STOPEVENT(p, S_CORE, 0);
2544 if (((sugid_coredump == 0) && p->p_flag & P_SUGID) || do_coredump == 0) {
2550 * Note that the bulk of limit checking is done after
2551 * the corefile is created. The exception is if the limit
2552 * for corefiles is 0, in which case we don't bother
2553 * creating the corefile at all. This layout means that
2554 * a corefile is truncated instead of not being created,
2555 * if it is larger than the limit.
2557 limit = (off_t)lim_cur(p, RLIMIT_CORE);
2563 name = expand_name(p->p_comm, td->td_ucred->cr_uid, p->p_pid);
2566 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td); /* XXXKSE */
2567 flags = O_CREAT | FWRITE | O_NOFOLLOW;
2568 error = vn_open(&nd, &flags, S_IRUSR | S_IWUSR, -1);
2572 NDFREE(&nd, NDF_ONLY_PNBUF);
2575 /* Don't dump to non-regular files or files with links. */
2576 if (vp->v_type != VREG ||
2577 VOP_GETATTR(vp, &vattr, cred, td) || vattr.va_nlink != 1) {
2578 VOP_UNLOCK(vp, 0, td);
2583 VOP_UNLOCK(vp, 0, td);
2584 lf.l_whence = SEEK_SET;
2587 lf.l_type = F_WRLCK;
2588 locked = (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK) == 0);
2590 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
2591 lf.l_type = F_UNLCK;
2593 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
2594 if ((error = vn_close(vp, FWRITE, cred, td)) != 0)
2596 if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
2603 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2604 VOP_LEASE(vp, td, cred, LEASE_WRITE);
2605 VOP_SETATTR(vp, &vattr, cred, td);
2606 VOP_UNLOCK(vp, 0, td);
2608 p->p_acflag |= ACORE;
2611 error = p->p_sysent->sv_coredump ?
2612 p->p_sysent->sv_coredump(td, vp, limit) :
2616 lf.l_type = F_UNLCK;
2617 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
2619 vn_finished_write(mp);
2621 error1 = vn_close(vp, FWRITE, cred, td);
2628 * Nonexistent system call-- signal process (may want to handle it).
2629 * Flag error in case process won't see signal immediately (blocked or ignored).
2631 #ifndef _SYS_SYSPROTO_H_
2643 struct nosys_args *args;
2645 struct proc *p = td->td_proc;
2654 * Send a SIGIO or SIGURG signal to a process or process group using
2655 * stored credentials rather than those of the current process.
2658 pgsigio(sigiop, sig, checkctty)
2659 struct sigio **sigiop;
2662 struct sigio *sigio;
2666 if (sigio == NULL) {
2670 if (sigio->sio_pgid > 0) {
2671 PROC_LOCK(sigio->sio_proc);
2672 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred))
2673 psignal(sigio->sio_proc, sig);
2674 PROC_UNLOCK(sigio->sio_proc);
2675 } else if (sigio->sio_pgid < 0) {
2678 PGRP_LOCK(sigio->sio_pgrp);
2679 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) {
2681 if (CANSIGIO(sigio->sio_ucred, p->p_ucred) &&
2682 (checkctty == 0 || (p->p_flag & P_CONTROLT)))
2686 PGRP_UNLOCK(sigio->sio_pgrp);
2692 filt_sigattach(struct knote *kn)
2694 struct proc *p = curproc;
2696 kn->kn_ptr.p_proc = p;
2697 kn->kn_flags |= EV_CLEAR; /* automatically set */
2700 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
2707 filt_sigdetach(struct knote *kn)
2709 struct proc *p = kn->kn_ptr.p_proc;
2712 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
2717 * signal knotes are shared with proc knotes, so we apply a mask to
2718 * the hint in order to differentiate them from process hints. This
2719 * could be avoided by using a signal-specific knote list, but probably
2720 * isn't worth the trouble.
2723 filt_signal(struct knote *kn, long hint)
2726 if (hint & NOTE_SIGNAL) {
2727 hint &= ~NOTE_SIGNAL;
2729 if (kn->kn_id == hint)
2732 return (kn->kn_data != 0);
2740 ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO);
2742 mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF);
2747 sigacts_free(struct sigacts *ps)
2750 mtx_lock(&ps->ps_mtx);
2752 if (ps->ps_refcnt == 0) {
2753 mtx_destroy(&ps->ps_mtx);
2754 free(ps, M_SUBPROC);
2756 mtx_unlock(&ps->ps_mtx);
2760 sigacts_hold(struct sigacts *ps)
2762 mtx_lock(&ps->ps_mtx);
2764 mtx_unlock(&ps->ps_mtx);
2769 sigacts_copy(struct sigacts *dest, struct sigacts *src)
2772 KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest"));
2773 mtx_lock(&src->ps_mtx);
2774 bcopy(src, dest, offsetof(struct sigacts, ps_refcnt));
2775 mtx_unlock(&src->ps_mtx);
2779 sigacts_shared(struct sigacts *ps)
2783 mtx_lock(&ps->ps_mtx);
2784 shared = ps->ps_refcnt > 1;
2785 mtx_unlock(&ps->ps_mtx);