2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
42 #include "opt_ktrace.h"
44 #include <sys/param.h>
45 #include <sys/ctype.h>
46 #include <sys/systm.h>
47 #include <sys/signalvar.h>
48 #include <sys/vnode.h>
50 #include <sys/capsicum.h>
51 #include <sys/compressor.h>
52 #include <sys/condvar.h>
53 #include <sys/devctl.h>
54 #include <sys/event.h>
55 #include <sys/fcntl.h>
56 #include <sys/imgact.h>
57 #include <sys/kernel.h>
59 #include <sys/ktrace.h>
60 #include <sys/limits.h>
62 #include <sys/malloc.h>
63 #include <sys/mutex.h>
64 #include <sys/refcount.h>
65 #include <sys/namei.h>
67 #include <sys/procdesc.h>
68 #include <sys/ptrace.h>
69 #include <sys/posix4.h>
70 #include <sys/racct.h>
71 #include <sys/resourcevar.h>
74 #include <sys/sleepqueue.h>
78 #include <sys/syscallsubr.h>
79 #include <sys/sysctl.h>
80 #include <sys/sysent.h>
81 #include <sys/syslog.h>
82 #include <sys/sysproto.h>
83 #include <sys/timers.h>
84 #include <sys/unistd.h>
87 #include <vm/vm_extern.h>
92 #include <machine/cpu.h>
94 #include <security/audit/audit.h>
96 #define ONSIG 32 /* NSIG for osig* syscalls. XXX. */
98 SDT_PROVIDER_DECLARE(proc);
99 SDT_PROBE_DEFINE3(proc, , , signal__send,
100 "struct thread *", "struct proc *", "int");
101 SDT_PROBE_DEFINE2(proc, , , signal__clear,
102 "int", "ksiginfo_t *");
103 SDT_PROBE_DEFINE3(proc, , , signal__discard,
104 "struct thread *", "struct proc *", "int");
106 static int coredump(struct thread *);
107 static int killpg1(struct thread *td, int sig, int pgid, int all,
109 static int issignal(struct thread *td);
110 static void reschedule_signals(struct proc *p, sigset_t block, int flags);
111 static int sigprop(int sig);
112 static void tdsigwakeup(struct thread *, int, sig_t, int);
113 static int sig_suspend_threads(struct thread *, struct proc *, int);
114 static int filt_sigattach(struct knote *kn);
115 static void filt_sigdetach(struct knote *kn);
116 static int filt_signal(struct knote *kn, long hint);
117 static struct thread *sigtd(struct proc *p, int sig, bool fast_sigblock);
118 static void sigqueue_start(void);
120 static uma_zone_t ksiginfo_zone = NULL;
121 struct filterops sig_filtops = {
123 .f_attach = filt_sigattach,
124 .f_detach = filt_sigdetach,
125 .f_event = filt_signal,
128 static int kern_logsigexit = 1;
129 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,
131 "Log processes quitting on abnormal signals to syslog(3)");
133 static int kern_forcesigexit = 1;
134 SYSCTL_INT(_kern, OID_AUTO, forcesigexit, CTLFLAG_RW,
135 &kern_forcesigexit, 0, "Force trap signal to be handled");
137 static SYSCTL_NODE(_kern, OID_AUTO, sigqueue, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
138 "POSIX real time signal");
140 static int max_pending_per_proc = 128;
141 SYSCTL_INT(_kern_sigqueue, OID_AUTO, max_pending_per_proc, CTLFLAG_RW,
142 &max_pending_per_proc, 0, "Max pending signals per proc");
144 static int preallocate_siginfo = 1024;
145 SYSCTL_INT(_kern_sigqueue, OID_AUTO, preallocate, CTLFLAG_RDTUN,
146 &preallocate_siginfo, 0, "Preallocated signal memory size");
148 static int signal_overflow = 0;
149 SYSCTL_INT(_kern_sigqueue, OID_AUTO, overflow, CTLFLAG_RD,
150 &signal_overflow, 0, "Number of signals overflew");
152 static int signal_alloc_fail = 0;
153 SYSCTL_INT(_kern_sigqueue, OID_AUTO, alloc_fail, CTLFLAG_RD,
154 &signal_alloc_fail, 0, "signals failed to be allocated");
156 static int kern_lognosys = 0;
157 SYSCTL_INT(_kern, OID_AUTO, lognosys, CTLFLAG_RWTUN, &kern_lognosys, 0,
158 "Log invalid syscalls");
160 __read_frequently bool sigfastblock_fetch_always = false;
161 SYSCTL_BOOL(_kern, OID_AUTO, sigfastblock_fetch_always, CTLFLAG_RWTUN,
162 &sigfastblock_fetch_always, 0,
163 "Fetch sigfastblock word on each syscall entry for proper "
164 "blocking semantic");
166 SYSINIT(signal, SI_SUB_P1003_1B, SI_ORDER_FIRST+3, sigqueue_start, NULL);
169 * Policy -- Can ucred cr1 send SIGIO to process cr2?
170 * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG
171 * in the right situations.
173 #define CANSIGIO(cr1, cr2) \
174 ((cr1)->cr_uid == 0 || \
175 (cr1)->cr_ruid == (cr2)->cr_ruid || \
176 (cr1)->cr_uid == (cr2)->cr_ruid || \
177 (cr1)->cr_ruid == (cr2)->cr_uid || \
178 (cr1)->cr_uid == (cr2)->cr_uid)
180 static int sugid_coredump;
181 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RWTUN,
182 &sugid_coredump, 0, "Allow setuid and setgid processes to dump core");
184 static int capmode_coredump;
185 SYSCTL_INT(_kern, OID_AUTO, capmode_coredump, CTLFLAG_RWTUN,
186 &capmode_coredump, 0, "Allow processes in capability mode to dump core");
188 static int do_coredump = 1;
189 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,
190 &do_coredump, 0, "Enable/Disable coredumps");
192 static int set_core_nodump_flag = 0;
193 SYSCTL_INT(_kern, OID_AUTO, nodump_coredump, CTLFLAG_RW, &set_core_nodump_flag,
194 0, "Enable setting the NODUMP flag on coredump files");
196 static int coredump_devctl = 0;
197 SYSCTL_INT(_kern, OID_AUTO, coredump_devctl, CTLFLAG_RW, &coredump_devctl,
198 0, "Generate a devctl notification when processes coredump");
201 * Signal properties and actions.
202 * The array below categorizes the signals and their default actions
203 * according to the following properties:
205 #define SIGPROP_KILL 0x01 /* terminates process by default */
206 #define SIGPROP_CORE 0x02 /* ditto and coredumps */
207 #define SIGPROP_STOP 0x04 /* suspend process */
208 #define SIGPROP_TTYSTOP 0x08 /* ditto, from tty */
209 #define SIGPROP_IGNORE 0x10 /* ignore by default */
210 #define SIGPROP_CONT 0x20 /* continue if suspended */
211 #define SIGPROP_CANTMASK 0x40 /* non-maskable, catchable */
213 static int sigproptbl[NSIG] = {
214 [SIGHUP] = SIGPROP_KILL,
215 [SIGINT] = SIGPROP_KILL,
216 [SIGQUIT] = SIGPROP_KILL | SIGPROP_CORE,
217 [SIGILL] = SIGPROP_KILL | SIGPROP_CORE,
218 [SIGTRAP] = SIGPROP_KILL | SIGPROP_CORE,
219 [SIGABRT] = SIGPROP_KILL | SIGPROP_CORE,
220 [SIGEMT] = SIGPROP_KILL | SIGPROP_CORE,
221 [SIGFPE] = SIGPROP_KILL | SIGPROP_CORE,
222 [SIGKILL] = SIGPROP_KILL,
223 [SIGBUS] = SIGPROP_KILL | SIGPROP_CORE,
224 [SIGSEGV] = SIGPROP_KILL | SIGPROP_CORE,
225 [SIGSYS] = SIGPROP_KILL | SIGPROP_CORE,
226 [SIGPIPE] = SIGPROP_KILL,
227 [SIGALRM] = SIGPROP_KILL,
228 [SIGTERM] = SIGPROP_KILL,
229 [SIGURG] = SIGPROP_IGNORE,
230 [SIGSTOP] = SIGPROP_STOP,
231 [SIGTSTP] = SIGPROP_STOP | SIGPROP_TTYSTOP,
232 [SIGCONT] = SIGPROP_IGNORE | SIGPROP_CONT,
233 [SIGCHLD] = SIGPROP_IGNORE,
234 [SIGTTIN] = SIGPROP_STOP | SIGPROP_TTYSTOP,
235 [SIGTTOU] = SIGPROP_STOP | SIGPROP_TTYSTOP,
236 [SIGIO] = SIGPROP_IGNORE,
237 [SIGXCPU] = SIGPROP_KILL,
238 [SIGXFSZ] = SIGPROP_KILL,
239 [SIGVTALRM] = SIGPROP_KILL,
240 [SIGPROF] = SIGPROP_KILL,
241 [SIGWINCH] = SIGPROP_IGNORE,
242 [SIGINFO] = SIGPROP_IGNORE,
243 [SIGUSR1] = SIGPROP_KILL,
244 [SIGUSR2] = SIGPROP_KILL,
247 sigset_t fastblock_mask;
252 ksiginfo_zone = uma_zcreate("ksiginfo", sizeof(ksiginfo_t),
253 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
254 uma_prealloc(ksiginfo_zone, preallocate_siginfo);
255 p31b_setcfg(CTL_P1003_1B_REALTIME_SIGNALS, _POSIX_REALTIME_SIGNALS);
256 p31b_setcfg(CTL_P1003_1B_RTSIG_MAX, SIGRTMAX - SIGRTMIN + 1);
257 p31b_setcfg(CTL_P1003_1B_SIGQUEUE_MAX, max_pending_per_proc);
258 SIGFILLSET(fastblock_mask);
259 SIG_CANTMASK(fastblock_mask);
263 ksiginfo_alloc(int wait)
270 if (ksiginfo_zone != NULL)
271 return ((ksiginfo_t *)uma_zalloc(ksiginfo_zone, flags));
276 ksiginfo_free(ksiginfo_t *ksi)
278 uma_zfree(ksiginfo_zone, ksi);
282 ksiginfo_tryfree(ksiginfo_t *ksi)
284 if (!(ksi->ksi_flags & KSI_EXT)) {
285 uma_zfree(ksiginfo_zone, ksi);
292 sigqueue_init(sigqueue_t *list, struct proc *p)
294 SIGEMPTYSET(list->sq_signals);
295 SIGEMPTYSET(list->sq_kill);
296 SIGEMPTYSET(list->sq_ptrace);
297 TAILQ_INIT(&list->sq_list);
299 list->sq_flags = SQ_INIT;
303 * Get a signal's ksiginfo.
305 * 0 - signal not found
306 * others - signal number
309 sigqueue_get(sigqueue_t *sq, int signo, ksiginfo_t *si)
311 struct proc *p = sq->sq_proc;
312 struct ksiginfo *ksi, *next;
315 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
317 if (!SIGISMEMBER(sq->sq_signals, signo))
320 if (SIGISMEMBER(sq->sq_ptrace, signo)) {
322 SIGDELSET(sq->sq_ptrace, signo);
323 si->ksi_flags |= KSI_PTRACE;
325 if (SIGISMEMBER(sq->sq_kill, signo)) {
328 SIGDELSET(sq->sq_kill, signo);
331 TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
332 if (ksi->ksi_signo == signo) {
334 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
335 ksi->ksi_sigq = NULL;
336 ksiginfo_copy(ksi, si);
337 if (ksiginfo_tryfree(ksi) && p != NULL)
346 SIGDELSET(sq->sq_signals, signo);
347 si->ksi_signo = signo;
352 sigqueue_take(ksiginfo_t *ksi)
358 if (ksi == NULL || (sq = ksi->ksi_sigq) == NULL)
362 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
363 ksi->ksi_sigq = NULL;
364 if (!(ksi->ksi_flags & KSI_EXT) && p != NULL)
367 for (kp = TAILQ_FIRST(&sq->sq_list); kp != NULL;
368 kp = TAILQ_NEXT(kp, ksi_link)) {
369 if (kp->ksi_signo == ksi->ksi_signo)
372 if (kp == NULL && !SIGISMEMBER(sq->sq_kill, ksi->ksi_signo) &&
373 !SIGISMEMBER(sq->sq_ptrace, ksi->ksi_signo))
374 SIGDELSET(sq->sq_signals, ksi->ksi_signo);
378 sigqueue_add(sigqueue_t *sq, int signo, ksiginfo_t *si)
380 struct proc *p = sq->sq_proc;
381 struct ksiginfo *ksi;
384 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
387 * SIGKILL/SIGSTOP cannot be caught or masked, so take the fast path
390 if (signo == SIGKILL || signo == SIGSTOP || si == NULL) {
391 SIGADDSET(sq->sq_kill, signo);
395 /* directly insert the ksi, don't copy it */
396 if (si->ksi_flags & KSI_INS) {
397 if (si->ksi_flags & KSI_HEAD)
398 TAILQ_INSERT_HEAD(&sq->sq_list, si, ksi_link);
400 TAILQ_INSERT_TAIL(&sq->sq_list, si, ksi_link);
405 if (__predict_false(ksiginfo_zone == NULL)) {
406 SIGADDSET(sq->sq_kill, signo);
410 if (p != NULL && p->p_pendingcnt >= max_pending_per_proc) {
413 } else if ((ksi = ksiginfo_alloc(0)) == NULL) {
419 ksiginfo_copy(si, ksi);
420 ksi->ksi_signo = signo;
421 if (si->ksi_flags & KSI_HEAD)
422 TAILQ_INSERT_HEAD(&sq->sq_list, ksi, ksi_link);
424 TAILQ_INSERT_TAIL(&sq->sq_list, ksi, ksi_link);
429 if ((si->ksi_flags & KSI_PTRACE) != 0) {
430 SIGADDSET(sq->sq_ptrace, signo);
433 } else if ((si->ksi_flags & KSI_TRAP) != 0 ||
434 (si->ksi_flags & KSI_SIGQ) == 0) {
435 SIGADDSET(sq->sq_kill, signo);
443 SIGADDSET(sq->sq_signals, signo);
448 sigqueue_flush(sigqueue_t *sq)
450 struct proc *p = sq->sq_proc;
453 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
456 PROC_LOCK_ASSERT(p, MA_OWNED);
458 while ((ksi = TAILQ_FIRST(&sq->sq_list)) != NULL) {
459 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
460 ksi->ksi_sigq = NULL;
461 if (ksiginfo_tryfree(ksi) && p != NULL)
465 SIGEMPTYSET(sq->sq_signals);
466 SIGEMPTYSET(sq->sq_kill);
467 SIGEMPTYSET(sq->sq_ptrace);
471 sigqueue_move_set(sigqueue_t *src, sigqueue_t *dst, const sigset_t *set)
474 struct proc *p1, *p2;
475 ksiginfo_t *ksi, *next;
477 KASSERT(src->sq_flags & SQ_INIT, ("src sigqueue not inited"));
478 KASSERT(dst->sq_flags & SQ_INIT, ("dst sigqueue not inited"));
481 /* Move siginfo to target list */
482 TAILQ_FOREACH_SAFE(ksi, &src->sq_list, ksi_link, next) {
483 if (SIGISMEMBER(*set, ksi->ksi_signo)) {
484 TAILQ_REMOVE(&src->sq_list, ksi, ksi_link);
487 TAILQ_INSERT_TAIL(&dst->sq_list, ksi, ksi_link);
494 /* Move pending bits to target list */
496 SIGSETAND(tmp, *set);
497 SIGSETOR(dst->sq_kill, tmp);
498 SIGSETNAND(src->sq_kill, tmp);
500 tmp = src->sq_ptrace;
501 SIGSETAND(tmp, *set);
502 SIGSETOR(dst->sq_ptrace, tmp);
503 SIGSETNAND(src->sq_ptrace, tmp);
505 tmp = src->sq_signals;
506 SIGSETAND(tmp, *set);
507 SIGSETOR(dst->sq_signals, tmp);
508 SIGSETNAND(src->sq_signals, tmp);
513 sigqueue_move(sigqueue_t *src, sigqueue_t *dst, int signo)
518 SIGADDSET(set, signo);
519 sigqueue_move_set(src, dst, &set);
524 sigqueue_delete_set(sigqueue_t *sq, const sigset_t *set)
526 struct proc *p = sq->sq_proc;
527 ksiginfo_t *ksi, *next;
529 KASSERT(sq->sq_flags & SQ_INIT, ("src sigqueue not inited"));
531 /* Remove siginfo queue */
532 TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
533 if (SIGISMEMBER(*set, ksi->ksi_signo)) {
534 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
535 ksi->ksi_sigq = NULL;
536 if (ksiginfo_tryfree(ksi) && p != NULL)
540 SIGSETNAND(sq->sq_kill, *set);
541 SIGSETNAND(sq->sq_ptrace, *set);
542 SIGSETNAND(sq->sq_signals, *set);
546 sigqueue_delete(sigqueue_t *sq, int signo)
551 SIGADDSET(set, signo);
552 sigqueue_delete_set(sq, &set);
555 /* Remove a set of signals for a process */
557 sigqueue_delete_set_proc(struct proc *p, const sigset_t *set)
562 PROC_LOCK_ASSERT(p, MA_OWNED);
564 sigqueue_init(&worklist, NULL);
565 sigqueue_move_set(&p->p_sigqueue, &worklist, set);
567 FOREACH_THREAD_IN_PROC(p, td0)
568 sigqueue_move_set(&td0->td_sigqueue, &worklist, set);
570 sigqueue_flush(&worklist);
574 sigqueue_delete_proc(struct proc *p, int signo)
579 SIGADDSET(set, signo);
580 sigqueue_delete_set_proc(p, &set);
584 sigqueue_delete_stopmask_proc(struct proc *p)
589 SIGADDSET(set, SIGSTOP);
590 SIGADDSET(set, SIGTSTP);
591 SIGADDSET(set, SIGTTIN);
592 SIGADDSET(set, SIGTTOU);
593 sigqueue_delete_set_proc(p, &set);
597 * Determine signal that should be delivered to thread td, the current
598 * thread, 0 if none. If there is a pending stop signal with default
599 * action, the process stops in issignal().
602 cursig(struct thread *td)
604 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
605 mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED);
606 THREAD_LOCK_ASSERT(td, MA_NOTOWNED);
607 return (SIGPENDING(td) ? issignal(td) : 0);
611 * Arrange for ast() to handle unmasked pending signals on return to user
612 * mode. This must be called whenever a signal is added to td_sigqueue or
613 * unmasked in td_sigmask.
616 signotify(struct thread *td)
619 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
621 if (SIGPENDING(td)) {
623 td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING;
629 * Returns 1 (true) if altstack is configured for the thread, and the
630 * passed stack bottom address falls into the altstack range. Handles
631 * the 43 compat special case where the alt stack size is zero.
634 sigonstack(size_t sp)
639 if ((td->td_pflags & TDP_ALTSTACK) == 0)
641 #if defined(COMPAT_43)
642 if (SV_PROC_FLAG(td->td_proc, SV_AOUT) && td->td_sigstk.ss_size == 0)
643 return ((td->td_sigstk.ss_flags & SS_ONSTACK) != 0);
645 return (sp >= (size_t)td->td_sigstk.ss_sp &&
646 sp < td->td_sigstk.ss_size + (size_t)td->td_sigstk.ss_sp);
653 if (sig > 0 && sig < nitems(sigproptbl))
654 return (sigproptbl[sig]);
659 sig_ffs(sigset_t *set)
663 for (i = 0; i < _SIG_WORDS; i++)
665 return (ffs(set->__bits[i]) + (i * 32));
670 sigact_flag_test(const struct sigaction *act, int flag)
674 * SA_SIGINFO is reset when signal disposition is set to
675 * ignore or default. Other flags are kept according to user
678 return ((act->sa_flags & flag) != 0 && (flag != SA_SIGINFO ||
679 ((__sighandler_t *)act->sa_sigaction != SIG_IGN &&
680 (__sighandler_t *)act->sa_sigaction != SIG_DFL)));
690 kern_sigaction(struct thread *td, int sig, const struct sigaction *act,
691 struct sigaction *oact, int flags)
694 struct proc *p = td->td_proc;
696 if (!_SIG_VALID(sig))
698 if (act != NULL && act->sa_handler != SIG_DFL &&
699 act->sa_handler != SIG_IGN && (act->sa_flags & ~(SA_ONSTACK |
700 SA_RESTART | SA_RESETHAND | SA_NOCLDSTOP | SA_NODEFER |
701 SA_NOCLDWAIT | SA_SIGINFO)) != 0)
706 mtx_lock(&ps->ps_mtx);
708 memset(oact, 0, sizeof(*oact));
709 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)];
710 if (SIGISMEMBER(ps->ps_sigonstack, sig))
711 oact->sa_flags |= SA_ONSTACK;
712 if (!SIGISMEMBER(ps->ps_sigintr, sig))
713 oact->sa_flags |= SA_RESTART;
714 if (SIGISMEMBER(ps->ps_sigreset, sig))
715 oact->sa_flags |= SA_RESETHAND;
716 if (SIGISMEMBER(ps->ps_signodefer, sig))
717 oact->sa_flags |= SA_NODEFER;
718 if (SIGISMEMBER(ps->ps_siginfo, sig)) {
719 oact->sa_flags |= SA_SIGINFO;
721 (__siginfohandler_t *)ps->ps_sigact[_SIG_IDX(sig)];
723 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)];
724 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP)
725 oact->sa_flags |= SA_NOCLDSTOP;
726 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT)
727 oact->sa_flags |= SA_NOCLDWAIT;
730 if ((sig == SIGKILL || sig == SIGSTOP) &&
731 act->sa_handler != SIG_DFL) {
732 mtx_unlock(&ps->ps_mtx);
738 * Change setting atomically.
741 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask;
742 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]);
743 if (sigact_flag_test(act, SA_SIGINFO)) {
744 ps->ps_sigact[_SIG_IDX(sig)] =
745 (__sighandler_t *)act->sa_sigaction;
746 SIGADDSET(ps->ps_siginfo, sig);
748 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler;
749 SIGDELSET(ps->ps_siginfo, sig);
751 if (!sigact_flag_test(act, SA_RESTART))
752 SIGADDSET(ps->ps_sigintr, sig);
754 SIGDELSET(ps->ps_sigintr, sig);
755 if (sigact_flag_test(act, SA_ONSTACK))
756 SIGADDSET(ps->ps_sigonstack, sig);
758 SIGDELSET(ps->ps_sigonstack, sig);
759 if (sigact_flag_test(act, SA_RESETHAND))
760 SIGADDSET(ps->ps_sigreset, sig);
762 SIGDELSET(ps->ps_sigreset, sig);
763 if (sigact_flag_test(act, SA_NODEFER))
764 SIGADDSET(ps->ps_signodefer, sig);
766 SIGDELSET(ps->ps_signodefer, sig);
767 if (sig == SIGCHLD) {
768 if (act->sa_flags & SA_NOCLDSTOP)
769 ps->ps_flag |= PS_NOCLDSTOP;
771 ps->ps_flag &= ~PS_NOCLDSTOP;
772 if (act->sa_flags & SA_NOCLDWAIT) {
774 * Paranoia: since SA_NOCLDWAIT is implemented
775 * by reparenting the dying child to PID 1 (and
776 * trust it to reap the zombie), PID 1 itself
777 * is forbidden to set SA_NOCLDWAIT.
780 ps->ps_flag &= ~PS_NOCLDWAIT;
782 ps->ps_flag |= PS_NOCLDWAIT;
784 ps->ps_flag &= ~PS_NOCLDWAIT;
785 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
786 ps->ps_flag |= PS_CLDSIGIGN;
788 ps->ps_flag &= ~PS_CLDSIGIGN;
791 * Set bit in ps_sigignore for signals that are set to SIG_IGN,
792 * and for signals set to SIG_DFL where the default is to
793 * ignore. However, don't put SIGCONT in ps_sigignore, as we
794 * have to restart the process.
796 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
797 (sigprop(sig) & SIGPROP_IGNORE &&
798 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
799 /* never to be seen again */
800 sigqueue_delete_proc(p, sig);
802 /* easier in psignal */
803 SIGADDSET(ps->ps_sigignore, sig);
804 SIGDELSET(ps->ps_sigcatch, sig);
806 SIGDELSET(ps->ps_sigignore, sig);
807 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)
808 SIGDELSET(ps->ps_sigcatch, sig);
810 SIGADDSET(ps->ps_sigcatch, sig);
812 #ifdef COMPAT_FREEBSD4
813 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
814 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
815 (flags & KSA_FREEBSD4) == 0)
816 SIGDELSET(ps->ps_freebsd4, sig);
818 SIGADDSET(ps->ps_freebsd4, sig);
821 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
822 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
823 (flags & KSA_OSIGSET) == 0)
824 SIGDELSET(ps->ps_osigset, sig);
826 SIGADDSET(ps->ps_osigset, sig);
829 mtx_unlock(&ps->ps_mtx);
834 #ifndef _SYS_SYSPROTO_H_
835 struct sigaction_args {
837 struct sigaction *act;
838 struct sigaction *oact;
842 sys_sigaction(struct thread *td, struct sigaction_args *uap)
844 struct sigaction act, oact;
845 struct sigaction *actp, *oactp;
848 actp = (uap->act != NULL) ? &act : NULL;
849 oactp = (uap->oact != NULL) ? &oact : NULL;
851 error = copyin(uap->act, actp, sizeof(act));
855 error = kern_sigaction(td, uap->sig, actp, oactp, 0);
857 error = copyout(oactp, uap->oact, sizeof(oact));
861 #ifdef COMPAT_FREEBSD4
862 #ifndef _SYS_SYSPROTO_H_
863 struct freebsd4_sigaction_args {
865 struct sigaction *act;
866 struct sigaction *oact;
870 freebsd4_sigaction(struct thread *td, struct freebsd4_sigaction_args *uap)
872 struct sigaction act, oact;
873 struct sigaction *actp, *oactp;
876 actp = (uap->act != NULL) ? &act : NULL;
877 oactp = (uap->oact != NULL) ? &oact : NULL;
879 error = copyin(uap->act, actp, sizeof(act));
883 error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4);
885 error = copyout(oactp, uap->oact, sizeof(oact));
888 #endif /* COMAPT_FREEBSD4 */
890 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
891 #ifndef _SYS_SYSPROTO_H_
892 struct osigaction_args {
894 struct osigaction *nsa;
895 struct osigaction *osa;
899 osigaction(struct thread *td, struct osigaction_args *uap)
901 struct osigaction sa;
902 struct sigaction nsa, osa;
903 struct sigaction *nsap, *osap;
906 if (uap->signum <= 0 || uap->signum >= ONSIG)
909 nsap = (uap->nsa != NULL) ? &nsa : NULL;
910 osap = (uap->osa != NULL) ? &osa : NULL;
913 error = copyin(uap->nsa, &sa, sizeof(sa));
916 nsap->sa_handler = sa.sa_handler;
917 nsap->sa_flags = sa.sa_flags;
918 OSIG2SIG(sa.sa_mask, nsap->sa_mask);
920 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
921 if (osap && !error) {
922 sa.sa_handler = osap->sa_handler;
923 sa.sa_flags = osap->sa_flags;
924 SIG2OSIG(osap->sa_mask, sa.sa_mask);
925 error = copyout(&sa, uap->osa, sizeof(sa));
930 #if !defined(__i386__)
931 /* Avoid replicating the same stub everywhere */
933 osigreturn(struct thread *td, struct osigreturn_args *uap)
936 return (nosys(td, (struct nosys_args *)uap));
939 #endif /* COMPAT_43 */
942 * Initialize signal state for process 0;
943 * set to ignore signals that are ignored by default.
946 siginit(struct proc *p)
953 mtx_lock(&ps->ps_mtx);
954 for (i = 1; i <= NSIG; i++) {
955 if (sigprop(i) & SIGPROP_IGNORE && i != SIGCONT) {
956 SIGADDSET(ps->ps_sigignore, i);
959 mtx_unlock(&ps->ps_mtx);
964 * Reset specified signal to the default disposition.
967 sigdflt(struct sigacts *ps, int sig)
970 mtx_assert(&ps->ps_mtx, MA_OWNED);
971 SIGDELSET(ps->ps_sigcatch, sig);
972 if ((sigprop(sig) & SIGPROP_IGNORE) != 0 && sig != SIGCONT)
973 SIGADDSET(ps->ps_sigignore, sig);
974 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
975 SIGDELSET(ps->ps_siginfo, sig);
979 * Reset signals for an exec of the specified process.
982 execsigs(struct proc *p)
990 * Reset caught signals. Held signals remain held
991 * through td_sigmask (unless they were caught,
992 * and are now ignored by default).
994 PROC_LOCK_ASSERT(p, MA_OWNED);
996 mtx_lock(&ps->ps_mtx);
1000 * As CloudABI processes cannot modify signal handlers, fully
1001 * reset all signals to their default behavior. Do ignore
1002 * SIGPIPE, as it would otherwise be impossible to recover from
1003 * writes to broken pipes and sockets.
1005 if (SV_PROC_ABI(p) == SV_ABI_CLOUDABI) {
1006 osigignore = ps->ps_sigignore;
1007 while (SIGNOTEMPTY(osigignore)) {
1008 sig = sig_ffs(&osigignore);
1009 SIGDELSET(osigignore, sig);
1013 SIGADDSET(ps->ps_sigignore, SIGPIPE);
1017 * Reset stack state to the user stack.
1018 * Clear set of signals caught on the signal stack.
1021 MPASS(td->td_proc == p);
1022 td->td_sigstk.ss_flags = SS_DISABLE;
1023 td->td_sigstk.ss_size = 0;
1024 td->td_sigstk.ss_sp = 0;
1025 td->td_pflags &= ~TDP_ALTSTACK;
1027 * Reset no zombies if child dies flag as Solaris does.
1029 ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN);
1030 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
1031 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL;
1032 mtx_unlock(&ps->ps_mtx);
1036 * kern_sigprocmask()
1038 * Manipulate signal mask.
1041 kern_sigprocmask(struct thread *td, int how, sigset_t *set, sigset_t *oset,
1044 sigset_t new_block, oset1;
1049 if ((flags & SIGPROCMASK_PROC_LOCKED) != 0)
1050 PROC_LOCK_ASSERT(p, MA_OWNED);
1053 mtx_assert(&p->p_sigacts->ps_mtx, (flags & SIGPROCMASK_PS_LOCKED) != 0
1054 ? MA_OWNED : MA_NOTOWNED);
1056 *oset = td->td_sigmask;
1063 oset1 = td->td_sigmask;
1064 SIGSETOR(td->td_sigmask, *set);
1065 new_block = td->td_sigmask;
1066 SIGSETNAND(new_block, oset1);
1069 SIGSETNAND(td->td_sigmask, *set);
1074 oset1 = td->td_sigmask;
1075 if (flags & SIGPROCMASK_OLD)
1076 SIGSETLO(td->td_sigmask, *set);
1078 td->td_sigmask = *set;
1079 new_block = td->td_sigmask;
1080 SIGSETNAND(new_block, oset1);
1089 * The new_block set contains signals that were not previously
1090 * blocked, but are blocked now.
1092 * In case we block any signal that was not previously blocked
1093 * for td, and process has the signal pending, try to schedule
1094 * signal delivery to some thread that does not block the
1095 * signal, possibly waking it up.
1097 if (p->p_numthreads != 1)
1098 reschedule_signals(p, new_block, flags);
1102 if (!(flags & SIGPROCMASK_PROC_LOCKED))
1107 #ifndef _SYS_SYSPROTO_H_
1108 struct sigprocmask_args {
1110 const sigset_t *set;
1115 sys_sigprocmask(struct thread *td, struct sigprocmask_args *uap)
1118 sigset_t *setp, *osetp;
1121 setp = (uap->set != NULL) ? &set : NULL;
1122 osetp = (uap->oset != NULL) ? &oset : NULL;
1124 error = copyin(uap->set, setp, sizeof(set));
1128 error = kern_sigprocmask(td, uap->how, setp, osetp, 0);
1129 if (osetp && !error) {
1130 error = copyout(osetp, uap->oset, sizeof(oset));
1135 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1136 #ifndef _SYS_SYSPROTO_H_
1137 struct osigprocmask_args {
1143 osigprocmask(struct thread *td, struct osigprocmask_args *uap)
1148 OSIG2SIG(uap->mask, set);
1149 error = kern_sigprocmask(td, uap->how, &set, &oset, 1);
1150 SIG2OSIG(oset, td->td_retval[0]);
1153 #endif /* COMPAT_43 */
1156 sys_sigwait(struct thread *td, struct sigwait_args *uap)
1162 error = copyin(uap->set, &set, sizeof(set));
1164 td->td_retval[0] = error;
1168 error = kern_sigtimedwait(td, set, &ksi, NULL);
1170 if (error == EINTR && td->td_proc->p_osrel < P_OSREL_SIGWAIT)
1172 if (error == ERESTART)
1174 td->td_retval[0] = error;
1178 error = copyout(&ksi.ksi_signo, uap->sig, sizeof(ksi.ksi_signo));
1179 td->td_retval[0] = error;
1184 sys_sigtimedwait(struct thread *td, struct sigtimedwait_args *uap)
1187 struct timespec *timeout;
1193 error = copyin(uap->timeout, &ts, sizeof(ts));
1201 error = copyin(uap->set, &set, sizeof(set));
1205 error = kern_sigtimedwait(td, set, &ksi, timeout);
1210 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
1213 td->td_retval[0] = ksi.ksi_signo;
1218 sys_sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap)
1224 error = copyin(uap->set, &set, sizeof(set));
1228 error = kern_sigtimedwait(td, set, &ksi, NULL);
1233 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
1236 td->td_retval[0] = ksi.ksi_signo;
1241 proc_td_siginfo_capture(struct thread *td, siginfo_t *si)
1245 FOREACH_THREAD_IN_PROC(td->td_proc, thr) {
1249 thr->td_si.si_signo = 0;
1254 kern_sigtimedwait(struct thread *td, sigset_t waitset, ksiginfo_t *ksi,
1255 struct timespec *timeout)
1258 sigset_t saved_mask, new_block;
1260 int error, sig, timo, timevalid = 0;
1261 struct timespec rts, ets, ts;
1271 /* Ensure the sigfastblock value is up to date. */
1272 sigfastblock_fetch(td);
1274 if (timeout != NULL) {
1275 if (timeout->tv_nsec >= 0 && timeout->tv_nsec < 1000000000) {
1277 getnanouptime(&rts);
1278 timespecadd(&rts, timeout, &ets);
1282 /* Some signals can not be waited for. */
1283 SIG_CANTMASK(waitset);
1286 saved_mask = td->td_sigmask;
1287 SIGSETNAND(td->td_sigmask, waitset);
1289 mtx_lock(&ps->ps_mtx);
1291 mtx_unlock(&ps->ps_mtx);
1292 KASSERT(sig >= 0, ("sig %d", sig));
1293 if (sig != 0 && SIGISMEMBER(waitset, sig)) {
1294 if (sigqueue_get(&td->td_sigqueue, sig, ksi) != 0 ||
1295 sigqueue_get(&p->p_sigqueue, sig, ksi) != 0) {
1305 * POSIX says this must be checked after looking for pending
1308 if (timeout != NULL) {
1313 getnanouptime(&rts);
1314 if (timespeccmp(&rts, &ets, >=)) {
1318 timespecsub(&ets, &rts, &ts);
1319 TIMESPEC_TO_TIMEVAL(&tv, &ts);
1330 error = msleep(ps, &p->p_mtx, PPAUSE|PCATCH, "sigwait", timo);
1332 if (timeout != NULL) {
1333 if (error == ERESTART) {
1334 /* Timeout can not be restarted. */
1336 } else if (error == EAGAIN) {
1337 /* We will calculate timeout by ourself. */
1343 * If PTRACE_SCE or PTRACE_SCX were set after
1344 * userspace entered the syscall, return spurious
1345 * EINTR after wait was done. Only do this as last
1346 * resort after rechecking for possible queued signals
1347 * and expired timeouts.
1349 if (error == 0 && (p->p_ptevents & PTRACE_SYSCALL) != 0)
1353 new_block = saved_mask;
1354 SIGSETNAND(new_block, td->td_sigmask);
1355 td->td_sigmask = saved_mask;
1357 * Fewer signals can be delivered to us, reschedule signal
1360 if (p->p_numthreads != 1)
1361 reschedule_signals(p, new_block, 0);
1364 SDT_PROBE2(proc, , , signal__clear, sig, ksi);
1366 if (ksi->ksi_code == SI_TIMER)
1367 itimer_accept(p, ksi->ksi_timerid, ksi);
1370 if (KTRPOINT(td, KTR_PSIG)) {
1373 mtx_lock(&ps->ps_mtx);
1374 action = ps->ps_sigact[_SIG_IDX(sig)];
1375 mtx_unlock(&ps->ps_mtx);
1376 ktrpsig(sig, action, &td->td_sigmask, ksi->ksi_code);
1379 if (sig == SIGKILL) {
1380 proc_td_siginfo_capture(td, &ksi->ksi_info);
1388 #ifndef _SYS_SYSPROTO_H_
1389 struct sigpending_args {
1394 sys_sigpending(struct thread *td, struct sigpending_args *uap)
1396 struct proc *p = td->td_proc;
1400 pending = p->p_sigqueue.sq_signals;
1401 SIGSETOR(pending, td->td_sigqueue.sq_signals);
1403 return (copyout(&pending, uap->set, sizeof(sigset_t)));
1406 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1407 #ifndef _SYS_SYSPROTO_H_
1408 struct osigpending_args {
1413 osigpending(struct thread *td, struct osigpending_args *uap)
1415 struct proc *p = td->td_proc;
1419 pending = p->p_sigqueue.sq_signals;
1420 SIGSETOR(pending, td->td_sigqueue.sq_signals);
1422 SIG2OSIG(pending, td->td_retval[0]);
1425 #endif /* COMPAT_43 */
1427 #if defined(COMPAT_43)
1429 * Generalized interface signal handler, 4.3-compatible.
1431 #ifndef _SYS_SYSPROTO_H_
1432 struct osigvec_args {
1440 osigvec(struct thread *td, struct osigvec_args *uap)
1443 struct sigaction nsa, osa;
1444 struct sigaction *nsap, *osap;
1447 if (uap->signum <= 0 || uap->signum >= ONSIG)
1449 nsap = (uap->nsv != NULL) ? &nsa : NULL;
1450 osap = (uap->osv != NULL) ? &osa : NULL;
1452 error = copyin(uap->nsv, &vec, sizeof(vec));
1455 nsap->sa_handler = vec.sv_handler;
1456 OSIG2SIG(vec.sv_mask, nsap->sa_mask);
1457 nsap->sa_flags = vec.sv_flags;
1458 nsap->sa_flags ^= SA_RESTART; /* opposite of SV_INTERRUPT */
1460 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
1461 if (osap && !error) {
1462 vec.sv_handler = osap->sa_handler;
1463 SIG2OSIG(osap->sa_mask, vec.sv_mask);
1464 vec.sv_flags = osap->sa_flags;
1465 vec.sv_flags &= ~SA_NOCLDWAIT;
1466 vec.sv_flags ^= SA_RESTART;
1467 error = copyout(&vec, uap->osv, sizeof(vec));
1472 #ifndef _SYS_SYSPROTO_H_
1473 struct osigblock_args {
1478 osigblock(struct thread *td, struct osigblock_args *uap)
1482 OSIG2SIG(uap->mask, set);
1483 kern_sigprocmask(td, SIG_BLOCK, &set, &oset, 0);
1484 SIG2OSIG(oset, td->td_retval[0]);
1488 #ifndef _SYS_SYSPROTO_H_
1489 struct osigsetmask_args {
1494 osigsetmask(struct thread *td, struct osigsetmask_args *uap)
1498 OSIG2SIG(uap->mask, set);
1499 kern_sigprocmask(td, SIG_SETMASK, &set, &oset, 0);
1500 SIG2OSIG(oset, td->td_retval[0]);
1503 #endif /* COMPAT_43 */
1506 * Suspend calling thread until signal, providing mask to be set in the
1509 #ifndef _SYS_SYSPROTO_H_
1510 struct sigsuspend_args {
1511 const sigset_t *sigmask;
1516 sys_sigsuspend(struct thread *td, struct sigsuspend_args *uap)
1521 error = copyin(uap->sigmask, &mask, sizeof(mask));
1524 return (kern_sigsuspend(td, mask));
1528 kern_sigsuspend(struct thread *td, sigset_t mask)
1530 struct proc *p = td->td_proc;
1533 /* Ensure the sigfastblock value is up to date. */
1534 sigfastblock_fetch(td);
1537 * When returning from sigsuspend, we want
1538 * the old mask to be restored after the
1539 * signal handler has finished. Thus, we
1540 * save it here and mark the sigacts structure
1544 kern_sigprocmask(td, SIG_SETMASK, &mask, &td->td_oldsigmask,
1545 SIGPROCMASK_PROC_LOCKED);
1546 td->td_pflags |= TDP_OLDMASK;
1549 * Process signals now. Otherwise, we can get spurious wakeup
1550 * due to signal entered process queue, but delivered to other
1551 * thread. But sigsuspend should return only on signal
1554 (p->p_sysent->sv_set_syscall_retval)(td, EINTR);
1555 for (has_sig = 0; !has_sig;) {
1556 while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause",
1559 thread_suspend_check(0);
1560 mtx_lock(&p->p_sigacts->ps_mtx);
1561 while ((sig = cursig(td)) != 0) {
1562 KASSERT(sig >= 0, ("sig %d", sig));
1563 has_sig += postsig(sig);
1565 mtx_unlock(&p->p_sigacts->ps_mtx);
1568 * If PTRACE_SCE or PTRACE_SCX were set after
1569 * userspace entered the syscall, return spurious
1572 if ((p->p_ptevents & PTRACE_SYSCALL) != 0)
1576 td->td_errno = EINTR;
1577 td->td_pflags |= TDP_NERRNO;
1578 return (EJUSTRETURN);
1581 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1583 * Compatibility sigsuspend call for old binaries. Note nonstandard calling
1584 * convention: libc stub passes mask, not pointer, to save a copyin.
1586 #ifndef _SYS_SYSPROTO_H_
1587 struct osigsuspend_args {
1593 osigsuspend(struct thread *td, struct osigsuspend_args *uap)
1597 OSIG2SIG(uap->mask, mask);
1598 return (kern_sigsuspend(td, mask));
1600 #endif /* COMPAT_43 */
1602 #if defined(COMPAT_43)
1603 #ifndef _SYS_SYSPROTO_H_
1604 struct osigstack_args {
1605 struct sigstack *nss;
1606 struct sigstack *oss;
1611 osigstack(struct thread *td, struct osigstack_args *uap)
1613 struct sigstack nss, oss;
1616 if (uap->nss != NULL) {
1617 error = copyin(uap->nss, &nss, sizeof(nss));
1621 oss.ss_sp = td->td_sigstk.ss_sp;
1622 oss.ss_onstack = sigonstack(cpu_getstack(td));
1623 if (uap->nss != NULL) {
1624 td->td_sigstk.ss_sp = nss.ss_sp;
1625 td->td_sigstk.ss_size = 0;
1626 td->td_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK;
1627 td->td_pflags |= TDP_ALTSTACK;
1629 if (uap->oss != NULL)
1630 error = copyout(&oss, uap->oss, sizeof(oss));
1634 #endif /* COMPAT_43 */
1636 #ifndef _SYS_SYSPROTO_H_
1637 struct sigaltstack_args {
1644 sys_sigaltstack(struct thread *td, struct sigaltstack_args *uap)
1649 if (uap->ss != NULL) {
1650 error = copyin(uap->ss, &ss, sizeof(ss));
1654 error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL,
1655 (uap->oss != NULL) ? &oss : NULL);
1658 if (uap->oss != NULL)
1659 error = copyout(&oss, uap->oss, sizeof(stack_t));
1664 kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss)
1666 struct proc *p = td->td_proc;
1669 oonstack = sigonstack(cpu_getstack(td));
1672 *oss = td->td_sigstk;
1673 oss->ss_flags = (td->td_pflags & TDP_ALTSTACK)
1674 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
1680 if ((ss->ss_flags & ~SS_DISABLE) != 0)
1682 if (!(ss->ss_flags & SS_DISABLE)) {
1683 if (ss->ss_size < p->p_sysent->sv_minsigstksz)
1686 td->td_sigstk = *ss;
1687 td->td_pflags |= TDP_ALTSTACK;
1689 td->td_pflags &= ~TDP_ALTSTACK;
1695 struct killpg1_ctx {
1705 killpg1_sendsig(struct proc *p, bool notself, struct killpg1_ctx *arg)
1709 if (p->p_pid <= 1 || (p->p_flag & P_SYSTEM) != 0 ||
1710 (notself && p == arg->td->td_proc) || p->p_state == PRS_NEW)
1713 err = p_cansignal(arg->td, p, arg->sig);
1714 if (err == 0 && arg->sig != 0)
1715 pksignal(p, arg->sig, arg->ksi);
1721 else if (arg->ret == 0 && err != ESRCH && err != EPERM)
1726 * Common code for kill process group/broadcast kill.
1727 * cp is calling process.
1730 killpg1(struct thread *td, int sig, int pgid, int all, ksiginfo_t *ksi)
1734 struct killpg1_ctx arg;
1746 sx_slock(&allproc_lock);
1747 FOREACH_PROC_IN_SYSTEM(p) {
1748 killpg1_sendsig(p, true, &arg);
1750 sx_sunlock(&allproc_lock);
1752 sx_slock(&proctree_lock);
1755 * zero pgid means send to my process group.
1757 pgrp = td->td_proc->p_pgrp;
1760 pgrp = pgfind(pgid);
1762 sx_sunlock(&proctree_lock);
1766 sx_sunlock(&proctree_lock);
1767 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1768 killpg1_sendsig(p, false, &arg);
1772 MPASS(arg.ret != 0 || arg.found || !arg.sent);
1773 if (arg.ret == 0 && !arg.sent)
1774 arg.ret = arg.found ? EPERM : ESRCH;
1778 #ifndef _SYS_SYSPROTO_H_
1786 sys_kill(struct thread *td, struct kill_args *uap)
1789 return (kern_kill(td, uap->pid, uap->signum));
1793 kern_kill(struct thread *td, pid_t pid, int signum)
1800 * A process in capability mode can send signals only to himself.
1801 * The main rationale behind this is that abort(3) is implemented as
1802 * kill(getpid(), SIGABRT).
1804 if (IN_CAPABILITY_MODE(td) && pid != td->td_proc->p_pid)
1807 AUDIT_ARG_SIGNUM(signum);
1809 if ((u_int)signum > _SIG_MAXSIG)
1812 ksiginfo_init(&ksi);
1813 ksi.ksi_signo = signum;
1814 ksi.ksi_code = SI_USER;
1815 ksi.ksi_pid = td->td_proc->p_pid;
1816 ksi.ksi_uid = td->td_ucred->cr_ruid;
1819 /* kill single process */
1820 if ((p = pfind_any(pid)) == NULL)
1822 AUDIT_ARG_PROCESS(p);
1823 error = p_cansignal(td, p, signum);
1824 if (error == 0 && signum)
1825 pksignal(p, signum, &ksi);
1830 case -1: /* broadcast signal */
1831 return (killpg1(td, signum, 0, 1, &ksi));
1832 case 0: /* signal own process group */
1833 return (killpg1(td, signum, 0, 0, &ksi));
1834 default: /* negative explicit process group */
1835 return (killpg1(td, signum, -pid, 0, &ksi));
1841 sys_pdkill(struct thread *td, struct pdkill_args *uap)
1846 AUDIT_ARG_SIGNUM(uap->signum);
1847 AUDIT_ARG_FD(uap->fd);
1848 if ((u_int)uap->signum > _SIG_MAXSIG)
1851 error = procdesc_find(td, uap->fd, &cap_pdkill_rights, &p);
1854 AUDIT_ARG_PROCESS(p);
1855 error = p_cansignal(td, p, uap->signum);
1856 if (error == 0 && uap->signum)
1857 kern_psignal(p, uap->signum);
1862 #if defined(COMPAT_43)
1863 #ifndef _SYS_SYSPROTO_H_
1864 struct okillpg_args {
1871 okillpg(struct thread *td, struct okillpg_args *uap)
1875 AUDIT_ARG_SIGNUM(uap->signum);
1876 AUDIT_ARG_PID(uap->pgid);
1877 if ((u_int)uap->signum > _SIG_MAXSIG)
1880 ksiginfo_init(&ksi);
1881 ksi.ksi_signo = uap->signum;
1882 ksi.ksi_code = SI_USER;
1883 ksi.ksi_pid = td->td_proc->p_pid;
1884 ksi.ksi_uid = td->td_ucred->cr_ruid;
1885 return (killpg1(td, uap->signum, uap->pgid, 0, &ksi));
1887 #endif /* COMPAT_43 */
1889 #ifndef _SYS_SYSPROTO_H_
1890 struct sigqueue_args {
1893 /* union sigval */ void *value;
1897 sys_sigqueue(struct thread *td, struct sigqueue_args *uap)
1901 sv.sival_ptr = uap->value;
1903 return (kern_sigqueue(td, uap->pid, uap->signum, &sv));
1907 kern_sigqueue(struct thread *td, pid_t pid, int signum, union sigval *value)
1913 if ((u_int)signum > _SIG_MAXSIG)
1917 * Specification says sigqueue can only send signal to
1923 if ((p = pfind_any(pid)) == NULL)
1925 error = p_cansignal(td, p, signum);
1926 if (error == 0 && signum != 0) {
1927 ksiginfo_init(&ksi);
1928 ksi.ksi_flags = KSI_SIGQ;
1929 ksi.ksi_signo = signum;
1930 ksi.ksi_code = SI_QUEUE;
1931 ksi.ksi_pid = td->td_proc->p_pid;
1932 ksi.ksi_uid = td->td_ucred->cr_ruid;
1933 ksi.ksi_value = *value;
1934 error = pksignal(p, ksi.ksi_signo, &ksi);
1941 * Send a signal to a process group.
1944 gsignal(int pgid, int sig, ksiginfo_t *ksi)
1949 sx_slock(&proctree_lock);
1950 pgrp = pgfind(pgid);
1951 sx_sunlock(&proctree_lock);
1953 pgsignal(pgrp, sig, 0, ksi);
1960 * Send a signal to a process group. If checktty is 1,
1961 * limit to members which have a controlling terminal.
1964 pgsignal(struct pgrp *pgrp, int sig, int checkctty, ksiginfo_t *ksi)
1969 PGRP_LOCK_ASSERT(pgrp, MA_OWNED);
1970 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1972 if (p->p_state == PRS_NORMAL &&
1973 (checkctty == 0 || p->p_flag & P_CONTROLT))
1974 pksignal(p, sig, ksi);
1981 * Recalculate the signal mask and reset the signal disposition after
1982 * usermode frame for delivery is formed. Should be called after
1983 * mach-specific routine, because sysent->sv_sendsig() needs correct
1984 * ps_siginfo and signal mask.
1987 postsig_done(int sig, struct thread *td, struct sigacts *ps)
1991 mtx_assert(&ps->ps_mtx, MA_OWNED);
1992 td->td_ru.ru_nsignals++;
1993 mask = ps->ps_catchmask[_SIG_IDX(sig)];
1994 if (!SIGISMEMBER(ps->ps_signodefer, sig))
1995 SIGADDSET(mask, sig);
1996 kern_sigprocmask(td, SIG_BLOCK, &mask, NULL,
1997 SIGPROCMASK_PROC_LOCKED | SIGPROCMASK_PS_LOCKED);
1998 if (SIGISMEMBER(ps->ps_sigreset, sig))
2003 * Send a signal caused by a trap to the current thread. If it will be
2004 * caught immediately, deliver it with correct code. Otherwise, post it
2008 trapsignal(struct thread *td, ksiginfo_t *ksi)
2016 sig = ksi->ksi_signo;
2017 code = ksi->ksi_code;
2018 KASSERT(_SIG_VALID(sig), ("invalid signal"));
2020 sigfastblock_fetch(td);
2023 mtx_lock(&ps->ps_mtx);
2024 sigmask = td->td_sigmask;
2025 if (td->td_sigblock_val != 0)
2026 SIGSETOR(sigmask, fastblock_mask);
2027 if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) &&
2028 !SIGISMEMBER(sigmask, sig)) {
2030 if (KTRPOINT(curthread, KTR_PSIG))
2031 ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
2032 &td->td_sigmask, code);
2034 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)],
2035 ksi, &td->td_sigmask);
2036 postsig_done(sig, td, ps);
2037 mtx_unlock(&ps->ps_mtx);
2040 * Avoid a possible infinite loop if the thread
2041 * masking the signal or process is ignoring the
2044 if (kern_forcesigexit && (SIGISMEMBER(sigmask, sig) ||
2045 ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN)) {
2046 SIGDELSET(td->td_sigmask, sig);
2047 SIGDELSET(ps->ps_sigcatch, sig);
2048 SIGDELSET(ps->ps_sigignore, sig);
2049 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
2050 td->td_pflags &= ~TDP_SIGFASTBLOCK;
2051 td->td_sigblock_val = 0;
2053 mtx_unlock(&ps->ps_mtx);
2054 p->p_sig = sig; /* XXX to verify code */
2055 tdsendsignal(p, td, sig, ksi);
2060 static struct thread *
2061 sigtd(struct proc *p, int sig, bool fast_sigblock)
2063 struct thread *td, *signal_td;
2065 PROC_LOCK_ASSERT(p, MA_OWNED);
2066 MPASS(!fast_sigblock || p == curproc);
2069 * Check if current thread can handle the signal without
2070 * switching context to another thread.
2072 if (curproc == p && !SIGISMEMBER(curthread->td_sigmask, sig) &&
2073 (!fast_sigblock || curthread->td_sigblock_val == 0))
2076 FOREACH_THREAD_IN_PROC(p, td) {
2077 if (!SIGISMEMBER(td->td_sigmask, sig) && (!fast_sigblock ||
2078 td != curthread || td->td_sigblock_val == 0)) {
2083 if (signal_td == NULL)
2084 signal_td = FIRST_THREAD_IN_PROC(p);
2089 * Send the signal to the process. If the signal has an action, the action
2090 * is usually performed by the target process rather than the caller; we add
2091 * the signal to the set of pending signals for the process.
2094 * o When a stop signal is sent to a sleeping process that takes the
2095 * default action, the process is stopped without awakening it.
2096 * o SIGCONT restarts stopped processes (or puts them back to sleep)
2097 * regardless of the signal action (eg, blocked or ignored).
2099 * Other ignored signals are discarded immediately.
2101 * NB: This function may be entered from the debugger via the "kill" DDB
2102 * command. There is little that can be done to mitigate the possibly messy
2103 * side effects of this unwise possibility.
2106 kern_psignal(struct proc *p, int sig)
2110 ksiginfo_init(&ksi);
2111 ksi.ksi_signo = sig;
2112 ksi.ksi_code = SI_KERNEL;
2113 (void) tdsendsignal(p, NULL, sig, &ksi);
2117 pksignal(struct proc *p, int sig, ksiginfo_t *ksi)
2120 return (tdsendsignal(p, NULL, sig, ksi));
2123 /* Utility function for finding a thread to send signal event to. */
2125 sigev_findtd(struct proc *p ,struct sigevent *sigev, struct thread **ttd)
2129 if (sigev->sigev_notify == SIGEV_THREAD_ID) {
2130 td = tdfind(sigev->sigev_notify_thread_id, p->p_pid);
2142 tdsignal(struct thread *td, int sig)
2146 ksiginfo_init(&ksi);
2147 ksi.ksi_signo = sig;
2148 ksi.ksi_code = SI_KERNEL;
2149 (void) tdsendsignal(td->td_proc, td, sig, &ksi);
2153 tdksignal(struct thread *td, int sig, ksiginfo_t *ksi)
2156 (void) tdsendsignal(td->td_proc, td, sig, ksi);
2160 tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
2163 sigqueue_t *sigqueue;
2170 MPASS(td == NULL || p == td->td_proc);
2171 PROC_LOCK_ASSERT(p, MA_OWNED);
2173 if (!_SIG_VALID(sig))
2174 panic("%s(): invalid signal %d", __func__, sig);
2176 KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("%s: ksi on queue", __func__));
2179 * IEEE Std 1003.1-2001: return success when killing a zombie.
2181 if (p->p_state == PRS_ZOMBIE) {
2182 if (ksi && (ksi->ksi_flags & KSI_INS))
2183 ksiginfo_tryfree(ksi);
2188 KNOTE_LOCKED(p->p_klist, NOTE_SIGNAL | sig);
2189 prop = sigprop(sig);
2192 td = sigtd(p, sig, false);
2193 sigqueue = &p->p_sigqueue;
2195 sigqueue = &td->td_sigqueue;
2197 SDT_PROBE3(proc, , , signal__send, td, p, sig);
2200 * If the signal is being ignored,
2201 * then we forget about it immediately.
2202 * (Note: we don't set SIGCONT in ps_sigignore,
2203 * and if it is set to SIG_IGN,
2204 * action will be SIG_DFL here.)
2206 mtx_lock(&ps->ps_mtx);
2207 if (SIGISMEMBER(ps->ps_sigignore, sig)) {
2208 SDT_PROBE3(proc, , , signal__discard, td, p, sig);
2210 mtx_unlock(&ps->ps_mtx);
2211 if (ksi && (ksi->ksi_flags & KSI_INS))
2212 ksiginfo_tryfree(ksi);
2215 if (SIGISMEMBER(td->td_sigmask, sig))
2217 else if (SIGISMEMBER(ps->ps_sigcatch, sig))
2221 if (SIGISMEMBER(ps->ps_sigintr, sig))
2225 mtx_unlock(&ps->ps_mtx);
2227 if (prop & SIGPROP_CONT)
2228 sigqueue_delete_stopmask_proc(p);
2229 else if (prop & SIGPROP_STOP) {
2231 * If sending a tty stop signal to a member of an orphaned
2232 * process group, discard the signal here if the action
2233 * is default; don't stop the process below if sleeping,
2234 * and don't clear any pending SIGCONT.
2236 if ((prop & SIGPROP_TTYSTOP) != 0 &&
2237 (p->p_pgrp->pg_flags & PGRP_ORPHANED) != 0 &&
2238 action == SIG_DFL) {
2239 if (ksi && (ksi->ksi_flags & KSI_INS))
2240 ksiginfo_tryfree(ksi);
2243 sigqueue_delete_proc(p, SIGCONT);
2244 if (p->p_flag & P_CONTINUED) {
2245 p->p_flag &= ~P_CONTINUED;
2246 PROC_LOCK(p->p_pptr);
2247 sigqueue_take(p->p_ksi);
2248 PROC_UNLOCK(p->p_pptr);
2252 ret = sigqueue_add(sigqueue, sig, ksi);
2257 * Defer further processing for signals which are held,
2258 * except that stopped processes must be continued by SIGCONT.
2260 if (action == SIG_HOLD &&
2261 !((prop & SIGPROP_CONT) && (p->p_flag & P_STOPPED_SIG)))
2267 * Some signals have a process-wide effect and a per-thread
2268 * component. Most processing occurs when the process next
2269 * tries to cross the user boundary, however there are some
2270 * times when processing needs to be done immediately, such as
2271 * waking up threads so that they can cross the user boundary.
2272 * We try to do the per-process part here.
2274 if (P_SHOULDSTOP(p)) {
2275 KASSERT(!(p->p_flag & P_WEXIT),
2276 ("signal to stopped but exiting process"));
2277 if (sig == SIGKILL) {
2279 * If traced process is already stopped,
2280 * then no further action is necessary.
2282 if (p->p_flag & P_TRACED)
2285 * SIGKILL sets process running.
2286 * It will die elsewhere.
2287 * All threads must be restarted.
2289 p->p_flag &= ~P_STOPPED_SIG;
2293 if (prop & SIGPROP_CONT) {
2295 * If traced process is already stopped,
2296 * then no further action is necessary.
2298 if (p->p_flag & P_TRACED)
2301 * If SIGCONT is default (or ignored), we continue the
2302 * process but don't leave the signal in sigqueue as
2303 * it has no further action. If SIGCONT is held, we
2304 * continue the process and leave the signal in
2305 * sigqueue. If the process catches SIGCONT, let it
2306 * handle the signal itself. If it isn't waiting on
2307 * an event, it goes back to run state.
2308 * Otherwise, process goes back to sleep state.
2310 p->p_flag &= ~P_STOPPED_SIG;
2312 if (p->p_numthreads == p->p_suspcount) {
2314 p->p_flag |= P_CONTINUED;
2315 p->p_xsig = SIGCONT;
2316 PROC_LOCK(p->p_pptr);
2317 childproc_continued(p);
2318 PROC_UNLOCK(p->p_pptr);
2321 if (action == SIG_DFL) {
2322 thread_unsuspend(p);
2324 sigqueue_delete(sigqueue, sig);
2327 if (action == SIG_CATCH) {
2329 * The process wants to catch it so it needs
2330 * to run at least one thread, but which one?
2336 * The signal is not ignored or caught.
2338 thread_unsuspend(p);
2343 if (prop & SIGPROP_STOP) {
2345 * If traced process is already stopped,
2346 * then no further action is necessary.
2348 if (p->p_flag & P_TRACED)
2351 * Already stopped, don't need to stop again
2352 * (If we did the shell could get confused).
2353 * Just make sure the signal STOP bit set.
2355 p->p_flag |= P_STOPPED_SIG;
2356 sigqueue_delete(sigqueue, sig);
2361 * All other kinds of signals:
2362 * If a thread is sleeping interruptibly, simulate a
2363 * wakeup so that when it is continued it will be made
2364 * runnable and can look at the signal. However, don't make
2365 * the PROCESS runnable, leave it stopped.
2366 * It may run a bit until it hits a thread_suspend_check().
2370 if (TD_CAN_ABORT(td))
2371 wakeup_swapper = sleepq_abort(td, intrval);
2377 * Mutexes are short lived. Threads waiting on them will
2378 * hit thread_suspend_check() soon.
2380 } else if (p->p_state == PRS_NORMAL) {
2381 if (p->p_flag & P_TRACED || action == SIG_CATCH) {
2382 tdsigwakeup(td, sig, action, intrval);
2386 MPASS(action == SIG_DFL);
2388 if (prop & SIGPROP_STOP) {
2389 if (p->p_flag & (P_PPWAIT|P_WEXIT))
2391 p->p_flag |= P_STOPPED_SIG;
2394 wakeup_swapper = sig_suspend_threads(td, p, 1);
2395 if (p->p_numthreads == p->p_suspcount) {
2397 * only thread sending signal to another
2398 * process can reach here, if thread is sending
2399 * signal to its process, because thread does
2400 * not suspend itself here, p_numthreads
2401 * should never be equal to p_suspcount.
2405 sigqueue_delete_proc(p, p->p_xsig);
2411 /* Not in "NORMAL" state. discard the signal. */
2412 sigqueue_delete(sigqueue, sig);
2417 * The process is not stopped so we need to apply the signal to all the
2421 tdsigwakeup(td, sig, action, intrval);
2423 thread_unsuspend(p);
2426 itimer_proc_continue(p);
2427 kqtimer_proc_continue(p);
2429 /* If we jump here, proc slock should not be owned. */
2430 PROC_SLOCK_ASSERT(p, MA_NOTOWNED);
2438 * The force of a signal has been directed against a single
2439 * thread. We need to see what we can do about knocking it
2440 * out of any sleep it may be in etc.
2443 tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
2445 struct proc *p = td->td_proc;
2446 int prop, wakeup_swapper;
2448 PROC_LOCK_ASSERT(p, MA_OWNED);
2449 prop = sigprop(sig);
2454 * Bring the priority of a thread up if we want it to get
2455 * killed in this lifetime. Be careful to avoid bumping the
2456 * priority of the idle thread, since we still allow to signal
2459 if (action == SIG_DFL && (prop & SIGPROP_KILL) != 0 &&
2460 td->td_priority > PUSER && !TD_IS_IDLETHREAD(td))
2461 sched_prio(td, PUSER);
2462 if (TD_ON_SLEEPQ(td)) {
2464 * If thread is sleeping uninterruptibly
2465 * we can't interrupt the sleep... the signal will
2466 * be noticed when the process returns through
2467 * trap() or syscall().
2469 if ((td->td_flags & TDF_SINTR) == 0)
2472 * If SIGCONT is default (or ignored) and process is
2473 * asleep, we are finished; the process should not
2476 if ((prop & SIGPROP_CONT) && action == SIG_DFL) {
2479 sigqueue_delete(&p->p_sigqueue, sig);
2481 * It may be on either list in this state.
2482 * Remove from both for now.
2484 sigqueue_delete(&td->td_sigqueue, sig);
2489 * Don't awaken a sleeping thread for SIGSTOP if the
2490 * STOP signal is deferred.
2492 if ((prop & SIGPROP_STOP) != 0 && (td->td_flags & (TDF_SBDRY |
2493 TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY)
2497 * Give low priority threads a better chance to run.
2499 if (td->td_priority > PUSER && !TD_IS_IDLETHREAD(td))
2500 sched_prio(td, PUSER);
2502 wakeup_swapper = sleepq_abort(td, intrval);
2510 * Other states do nothing with the signal immediately,
2511 * other than kicking ourselves if we are running.
2512 * It will either never be noticed, or noticed very soon.
2515 if (TD_IS_RUNNING(td) && td != curthread)
2525 ptrace_coredump(struct thread *td)
2528 struct thr_coredump_req *tcq;
2531 MPASS(td == curthread);
2533 PROC_LOCK_ASSERT(p, MA_OWNED);
2534 if ((td->td_dbgflags & TDB_COREDUMPRQ) == 0)
2536 KASSERT((p->p_flag & P_STOPPED_TRACE) != 0, ("not stopped"));
2538 tcq = td->td_coredump;
2539 KASSERT(tcq != NULL, ("td_coredump is NULL"));
2541 if (p->p_sysent->sv_coredump == NULL) {
2542 tcq->tc_error = ENOSYS;
2547 rl_cookie = vn_rangelock_wlock(tcq->tc_vp, 0, OFF_MAX);
2549 tcq->tc_error = p->p_sysent->sv_coredump(td, tcq->tc_vp,
2550 tcq->tc_limit, tcq->tc_flags);
2552 vn_rangelock_unlock(tcq->tc_vp, rl_cookie);
2555 td->td_dbgflags &= ~TDB_COREDUMPRQ;
2556 td->td_coredump = NULL;
2561 sig_suspend_threads(struct thread *td, struct proc *p, int sending)
2566 PROC_LOCK_ASSERT(p, MA_OWNED);
2567 PROC_SLOCK_ASSERT(p, MA_OWNED);
2568 MPASS(sending || td == curthread);
2571 FOREACH_THREAD_IN_PROC(p, td2) {
2573 td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
2574 if ((TD_IS_SLEEPING(td2) || TD_IS_SWAPPED(td2)) &&
2575 (td2->td_flags & TDF_SINTR)) {
2576 if (td2->td_flags & TDF_SBDRY) {
2578 * Once a thread is asleep with
2579 * TDF_SBDRY and without TDF_SERESTART
2580 * or TDF_SEINTR set, it should never
2581 * become suspended due to this check.
2583 KASSERT(!TD_IS_SUSPENDED(td2),
2584 ("thread with deferred stops suspended"));
2585 if (TD_SBDRY_INTR(td2)) {
2586 wakeup_swapper |= sleepq_abort(td2,
2587 TD_SBDRY_ERRNO(td2));
2590 } else if (!TD_IS_SUSPENDED(td2))
2591 thread_suspend_one(td2);
2592 } else if (!TD_IS_SUSPENDED(td2)) {
2593 if (sending || td != td2)
2594 td2->td_flags |= TDF_ASTPENDING;
2596 if (TD_IS_RUNNING(td2) && td2 != td)
2597 forward_signal(td2);
2602 return (wakeup_swapper);
2606 * Stop the process for an event deemed interesting to the debugger. If si is
2607 * non-NULL, this is a signal exchange; the new signal requested by the
2608 * debugger will be returned for handling. If si is NULL, this is some other
2609 * type of interesting event. The debugger may request a signal be delivered in
2610 * that case as well, however it will be deferred until it can be handled.
2613 ptracestop(struct thread *td, int sig, ksiginfo_t *si)
2615 struct proc *p = td->td_proc;
2619 PROC_LOCK_ASSERT(p, MA_OWNED);
2620 KASSERT(!(p->p_flag & P_WEXIT), ("Stopping exiting process"));
2621 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
2622 &p->p_mtx.lock_object, "Stopping for traced signal");
2626 if (si == NULL || (si->ksi_flags & KSI_PTRACE) == 0) {
2627 td->td_dbgflags |= TDB_XSIG;
2628 CTR4(KTR_PTRACE, "ptracestop: tid %d (pid %d) flags %#x sig %d",
2629 td->td_tid, p->p_pid, td->td_dbgflags, sig);
2631 while ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_XSIG)) {
2634 * Ensure that, if we've been PT_KILLed, the
2635 * exit status reflects that. Another thread
2636 * may also be in ptracestop(), having just
2637 * received the SIGKILL, but this thread was
2638 * unsuspended first.
2640 td->td_dbgflags &= ~TDB_XSIG;
2641 td->td_xsig = SIGKILL;
2645 if (p->p_flag & P_SINGLE_EXIT &&
2646 !(td->td_dbgflags & TDB_EXIT)) {
2648 * Ignore ptrace stops except for thread exit
2649 * events when the process exits.
2651 td->td_dbgflags &= ~TDB_XSIG;
2657 * Make wait(2) work. Ensure that right after the
2658 * attach, the thread which was decided to become the
2659 * leader of attach gets reported to the waiter.
2660 * Otherwise, just avoid overwriting another thread's
2661 * assignment to p_xthread. If another thread has
2662 * already set p_xthread, the current thread will get
2663 * a chance to report itself upon the next iteration.
2665 if ((td->td_dbgflags & TDB_FSTP) != 0 ||
2666 ((p->p_flag2 & P2_PTRACE_FSTP) == 0 &&
2667 p->p_xthread == NULL)) {
2672 * If we are on sleepqueue already,
2673 * let sleepqueue code decide if it
2674 * needs to go sleep after attach.
2676 if (td->td_wchan == NULL)
2677 td->td_dbgflags &= ~TDB_FSTP;
2679 p->p_flag2 &= ~P2_PTRACE_FSTP;
2680 p->p_flag |= P_STOPPED_SIG | P_STOPPED_TRACE;
2681 sig_suspend_threads(td, p, 0);
2683 if ((td->td_dbgflags & TDB_STOPATFORK) != 0) {
2684 td->td_dbgflags &= ~TDB_STOPATFORK;
2687 td->td_dbgflags |= TDB_SSWITCH;
2688 thread_suspend_switch(td, p);
2689 td->td_dbgflags &= ~TDB_SSWITCH;
2690 if ((td->td_dbgflags & TDB_COREDUMPRQ) != 0) {
2692 ptrace_coredump(td);
2696 if (p->p_xthread == td)
2697 p->p_xthread = NULL;
2698 if (!(p->p_flag & P_TRACED))
2700 if (td->td_dbgflags & TDB_SUSPEND) {
2701 if (p->p_flag & P_SINGLE_EXIT)
2709 if (si != NULL && sig == td->td_xsig) {
2710 /* Parent wants us to take the original signal unchanged. */
2711 si->ksi_flags |= KSI_HEAD;
2712 if (sigqueue_add(&td->td_sigqueue, sig, si) != 0)
2714 } else if (td->td_xsig != 0) {
2716 * If parent wants us to take a new signal, then it will leave
2717 * it in td->td_xsig; otherwise we just look for signals again.
2719 ksiginfo_init(&ksi);
2720 ksi.ksi_signo = td->td_xsig;
2721 ksi.ksi_flags |= KSI_PTRACE;
2722 td2 = sigtd(p, td->td_xsig, false);
2723 tdsendsignal(p, td2, td->td_xsig, &ksi);
2728 return (td->td_xsig);
2732 reschedule_signals(struct proc *p, sigset_t block, int flags)
2737 bool fastblk, pslocked;
2739 PROC_LOCK_ASSERT(p, MA_OWNED);
2741 pslocked = (flags & SIGPROCMASK_PS_LOCKED) != 0;
2742 mtx_assert(&ps->ps_mtx, pslocked ? MA_OWNED : MA_NOTOWNED);
2743 if (SIGISEMPTY(p->p_siglist))
2745 SIGSETAND(block, p->p_siglist);
2746 fastblk = (flags & SIGPROCMASK_FASTBLK) != 0;
2747 while ((sig = sig_ffs(&block)) != 0) {
2748 SIGDELSET(block, sig);
2749 td = sigtd(p, sig, fastblk);
2752 * If sigtd() selected us despite sigfastblock is
2753 * blocking, do not activate AST or wake us, to avoid
2754 * loop in AST handler.
2756 if (fastblk && td == curthread)
2761 mtx_lock(&ps->ps_mtx);
2762 if (p->p_flag & P_TRACED ||
2763 (SIGISMEMBER(ps->ps_sigcatch, sig) &&
2764 !SIGISMEMBER(td->td_sigmask, sig))) {
2765 tdsigwakeup(td, sig, SIG_CATCH,
2766 (SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR :
2770 mtx_unlock(&ps->ps_mtx);
2775 tdsigcleanup(struct thread *td)
2781 PROC_LOCK_ASSERT(p, MA_OWNED);
2783 sigqueue_flush(&td->td_sigqueue);
2784 if (p->p_numthreads == 1)
2788 * Since we cannot handle signals, notify signal post code
2789 * about this by filling the sigmask.
2791 * Also, if needed, wake up thread(s) that do not block the
2792 * same signals as the exiting thread, since the thread might
2793 * have been selected for delivery and woken up.
2795 SIGFILLSET(unblocked);
2796 SIGSETNAND(unblocked, td->td_sigmask);
2797 SIGFILLSET(td->td_sigmask);
2798 reschedule_signals(p, unblocked, 0);
2803 sigdeferstop_curr_flags(int cflags)
2806 MPASS((cflags & (TDF_SEINTR | TDF_SERESTART)) == 0 ||
2807 (cflags & TDF_SBDRY) != 0);
2808 return (cflags & (TDF_SBDRY | TDF_SEINTR | TDF_SERESTART));
2812 * Defer the delivery of SIGSTOP for the current thread, according to
2813 * the requested mode. Returns previous flags, which must be restored
2814 * by sigallowstop().
2816 * TDF_SBDRY, TDF_SEINTR, and TDF_SERESTART flags are only set and
2817 * cleared by the current thread, which allow the lock-less read-only
2821 sigdeferstop_impl(int mode)
2827 cflags = sigdeferstop_curr_flags(td->td_flags);
2829 case SIGDEFERSTOP_NOP:
2832 case SIGDEFERSTOP_OFF:
2835 case SIGDEFERSTOP_SILENT:
2836 nflags = (cflags | TDF_SBDRY) & ~(TDF_SEINTR | TDF_SERESTART);
2838 case SIGDEFERSTOP_EINTR:
2839 nflags = (cflags | TDF_SBDRY | TDF_SEINTR) & ~TDF_SERESTART;
2841 case SIGDEFERSTOP_ERESTART:
2842 nflags = (cflags | TDF_SBDRY | TDF_SERESTART) & ~TDF_SEINTR;
2845 panic("sigdeferstop: invalid mode %x", mode);
2848 if (cflags == nflags)
2849 return (SIGDEFERSTOP_VAL_NCHG);
2851 td->td_flags = (td->td_flags & ~cflags) | nflags;
2857 * Restores the STOP handling mode, typically permitting the delivery
2858 * of SIGSTOP for the current thread. This does not immediately
2859 * suspend if a stop was posted. Instead, the thread will suspend
2860 * either via ast() or a subsequent interruptible sleep.
2863 sigallowstop_impl(int prev)
2868 KASSERT(prev != SIGDEFERSTOP_VAL_NCHG, ("failed sigallowstop"));
2869 KASSERT((prev & ~(TDF_SBDRY | TDF_SEINTR | TDF_SERESTART)) == 0,
2870 ("sigallowstop: incorrect previous mode %x", prev));
2872 cflags = sigdeferstop_curr_flags(td->td_flags);
2873 if (cflags != prev) {
2875 td->td_flags = (td->td_flags & ~cflags) | prev;
2881 * If the current process has received a signal (should be caught or cause
2882 * termination, should interrupt current syscall), return the signal number.
2883 * Stop signals with default action are processed immediately, then cleared;
2884 * they aren't returned. This is checked after each entry to the system for
2885 * a syscall or trap (though this can usually be done without calling issignal
2886 * by checking the pending signal masks in cursig.) The normal call
2889 * while (sig = cursig(curthread))
2893 issignal(struct thread *td)
2897 struct sigqueue *queue;
2898 sigset_t sigpending;
2904 mtx_assert(&ps->ps_mtx, MA_OWNED);
2905 PROC_LOCK_ASSERT(p, MA_OWNED);
2907 sigpending = td->td_sigqueue.sq_signals;
2908 SIGSETOR(sigpending, p->p_sigqueue.sq_signals);
2909 SIGSETNAND(sigpending, td->td_sigmask);
2911 if ((p->p_flag & P_PPWAIT) != 0 || (td->td_flags &
2912 (TDF_SBDRY | TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY)
2913 SIG_STOPSIGMASK(sigpending);
2914 if (SIGISEMPTY(sigpending)) /* no signal to send */
2918 * Do fast sigblock if requested by usermode. Since
2919 * we do know that there was a signal pending at this
2920 * point, set the FAST_SIGBLOCK_PEND as indicator for
2921 * usermode to perform a dummy call to
2922 * FAST_SIGBLOCK_UNBLOCK, which causes immediate
2923 * delivery of postponed pending signal.
2925 if ((td->td_pflags & TDP_SIGFASTBLOCK) != 0) {
2926 if (td->td_sigblock_val != 0)
2927 SIGSETNAND(sigpending, fastblock_mask);
2928 if (SIGISEMPTY(sigpending)) {
2929 td->td_pflags |= TDP_SIGFASTPENDING;
2934 if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED &&
2935 (p->p_flag2 & P2_PTRACE_FSTP) != 0 &&
2936 SIGISMEMBER(sigpending, SIGSTOP)) {
2938 * If debugger just attached, always consume
2939 * SIGSTOP from ptrace(PT_ATTACH) first, to
2940 * execute the debugger attach ritual in
2944 td->td_dbgflags |= TDB_FSTP;
2946 sig = sig_ffs(&sigpending);
2950 * We should see pending but ignored signals
2951 * only if P_TRACED was on when they were posted.
2953 if (SIGISMEMBER(ps->ps_sigignore, sig) &&
2954 (p->p_flag & P_TRACED) == 0) {
2955 sigqueue_delete(&td->td_sigqueue, sig);
2956 sigqueue_delete(&p->p_sigqueue, sig);
2959 if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED) {
2961 * If traced, always stop.
2962 * Remove old signal from queue before the stop.
2963 * XXX shrug off debugger, it causes siginfo to
2966 queue = &td->td_sigqueue;
2967 ksiginfo_init(&ksi);
2968 if (sigqueue_get(queue, sig, &ksi) == 0) {
2969 queue = &p->p_sigqueue;
2970 sigqueue_get(queue, sig, &ksi);
2972 td->td_si = ksi.ksi_info;
2974 mtx_unlock(&ps->ps_mtx);
2975 sig = ptracestop(td, sig, &ksi);
2976 mtx_lock(&ps->ps_mtx);
2978 td->td_si.si_signo = 0;
2981 * Keep looking if the debugger discarded or
2982 * replaced the signal.
2988 * If the signal became masked, re-queue it.
2990 if (SIGISMEMBER(td->td_sigmask, sig)) {
2991 ksi.ksi_flags |= KSI_HEAD;
2992 sigqueue_add(&p->p_sigqueue, sig, &ksi);
2997 * If the traced bit got turned off, requeue
2998 * the signal and go back up to the top to
2999 * rescan signals. This ensures that p_sig*
3000 * and p_sigact are consistent.
3002 if ((p->p_flag & P_TRACED) == 0) {
3003 ksi.ksi_flags |= KSI_HEAD;
3004 sigqueue_add(queue, sig, &ksi);
3009 prop = sigprop(sig);
3012 * Decide whether the signal should be returned.
3013 * Return the signal's number, or fall through
3014 * to clear it from the pending mask.
3016 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) {
3017 case (intptr_t)SIG_DFL:
3019 * Don't take default actions on system processes.
3021 if (p->p_pid <= 1) {
3024 * Are you sure you want to ignore SIGSEGV
3027 printf("Process (pid %lu) got signal %d\n",
3028 (u_long)p->p_pid, sig);
3030 break; /* == ignore */
3033 * If there is a pending stop signal to process with
3034 * default action, stop here, then clear the signal.
3035 * Traced or exiting processes should ignore stops.
3036 * Additionally, a member of an orphaned process group
3037 * should ignore tty stops.
3039 if (prop & SIGPROP_STOP) {
3040 mtx_unlock(&ps->ps_mtx);
3041 if ((p->p_flag & (P_TRACED | P_WEXIT |
3042 P_SINGLE_EXIT)) != 0 || ((p->p_pgrp->
3043 pg_flags & PGRP_ORPHANED) != 0 &&
3044 (prop & SIGPROP_TTYSTOP) != 0)) {
3045 mtx_lock(&ps->ps_mtx);
3046 break; /* == ignore */
3048 if (TD_SBDRY_INTR(td)) {
3049 KASSERT((td->td_flags & TDF_SBDRY) != 0,
3050 ("lost TDF_SBDRY"));
3051 mtx_lock(&ps->ps_mtx);
3054 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
3055 &p->p_mtx.lock_object, "Catching SIGSTOP");
3056 sigqueue_delete(&td->td_sigqueue, sig);
3057 sigqueue_delete(&p->p_sigqueue, sig);
3058 p->p_flag |= P_STOPPED_SIG;
3061 sig_suspend_threads(td, p, 0);
3062 thread_suspend_switch(td, p);
3064 mtx_lock(&ps->ps_mtx);
3066 } else if (prop & SIGPROP_IGNORE) {
3068 * Except for SIGCONT, shouldn't get here.
3069 * Default action is to ignore; drop it.
3071 break; /* == ignore */
3076 case (intptr_t)SIG_IGN:
3078 * Masking above should prevent us ever trying
3079 * to take action on an ignored signal other
3080 * than SIGCONT, unless process is traced.
3082 if ((prop & SIGPROP_CONT) == 0 &&
3083 (p->p_flag & P_TRACED) == 0)
3084 printf("issignal\n");
3085 break; /* == ignore */
3089 * This signal has an action, let
3090 * postsig() process it.
3094 sigqueue_delete(&td->td_sigqueue, sig); /* take the signal! */
3095 sigqueue_delete(&p->p_sigqueue, sig);
3102 thread_stopped(struct proc *p)
3106 PROC_LOCK_ASSERT(p, MA_OWNED);
3107 PROC_SLOCK_ASSERT(p, MA_OWNED);
3111 if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) {
3113 p->p_flag &= ~P_WAITED;
3114 PROC_LOCK(p->p_pptr);
3115 childproc_stopped(p, (p->p_flag & P_TRACED) ?
3116 CLD_TRAPPED : CLD_STOPPED);
3117 PROC_UNLOCK(p->p_pptr);
3123 * Take the action for the specified signal
3124 * from the current set of pending signals.
3134 sigset_t returnmask;
3136 KASSERT(sig != 0, ("postsig"));
3140 PROC_LOCK_ASSERT(p, MA_OWNED);
3142 mtx_assert(&ps->ps_mtx, MA_OWNED);
3143 ksiginfo_init(&ksi);
3144 if (sigqueue_get(&td->td_sigqueue, sig, &ksi) == 0 &&
3145 sigqueue_get(&p->p_sigqueue, sig, &ksi) == 0)
3147 ksi.ksi_signo = sig;
3148 if (ksi.ksi_code == SI_TIMER)
3149 itimer_accept(p, ksi.ksi_timerid, &ksi);
3150 action = ps->ps_sigact[_SIG_IDX(sig)];
3152 if (KTRPOINT(td, KTR_PSIG))
3153 ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ?
3154 &td->td_oldsigmask : &td->td_sigmask, ksi.ksi_code);
3157 if (action == SIG_DFL) {
3159 * Default action, where the default is to kill
3160 * the process. (Other cases were ignored above.)
3162 mtx_unlock(&ps->ps_mtx);
3163 proc_td_siginfo_capture(td, &ksi.ksi_info);
3168 * If we get here, the signal must be caught.
3170 KASSERT(action != SIG_IGN, ("postsig action %p", action));
3171 KASSERT(!SIGISMEMBER(td->td_sigmask, sig),
3172 ("postsig action: blocked sig %d", sig));
3175 * Set the new mask value and also defer further
3176 * occurrences of this signal.
3178 * Special case: user has done a sigsuspend. Here the
3179 * current mask is not of interest, but rather the
3180 * mask from before the sigsuspend is what we want
3181 * restored after the signal processing is completed.
3183 if (td->td_pflags & TDP_OLDMASK) {
3184 returnmask = td->td_oldsigmask;
3185 td->td_pflags &= ~TDP_OLDMASK;
3187 returnmask = td->td_sigmask;
3189 if (p->p_sig == sig) {
3192 (*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask);
3193 postsig_done(sig, td, ps);
3199 sig_ast_checksusp(struct thread *td)
3205 PROC_LOCK_ASSERT(p, MA_OWNED);
3207 if ((td->td_flags & TDF_NEEDSUSPCHK) == 0)
3210 ret = thread_suspend_check(1);
3211 MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
3216 sig_ast_needsigchk(struct thread *td)
3223 PROC_LOCK_ASSERT(p, MA_OWNED);
3225 if ((td->td_flags & TDF_NEEDSIGCHK) == 0)
3229 mtx_lock(&ps->ps_mtx);
3232 mtx_unlock(&ps->ps_mtx);
3233 KASSERT((td->td_flags & TDF_SBDRY) != 0, ("lost TDF_SBDRY"));
3234 KASSERT(TD_SBDRY_INTR(td),
3235 ("lost TDF_SERESTART of TDF_SEINTR"));
3236 KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) !=
3237 (TDF_SEINTR | TDF_SERESTART),
3238 ("both TDF_SEINTR and TDF_SERESTART"));
3239 ret = TD_SBDRY_ERRNO(td);
3240 } else if (sig != 0) {
3241 ret = SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR : ERESTART;
3242 mtx_unlock(&ps->ps_mtx);
3244 mtx_unlock(&ps->ps_mtx);
3249 * Do not go into sleep if this thread was the ptrace(2)
3250 * attach leader. cursig() consumed SIGSTOP from PT_ATTACH,
3251 * but we usually act on the signal by interrupting sleep, and
3252 * should do that here as well.
3254 if ((td->td_dbgflags & TDB_FSTP) != 0) {
3257 td->td_dbgflags &= ~TDB_FSTP;
3271 if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) == 0)
3277 ret = sig_ast_checksusp(td);
3279 ret = sig_ast_needsigchk(td);
3285 proc_wkilled(struct proc *p)
3288 PROC_LOCK_ASSERT(p, MA_OWNED);
3289 if ((p->p_flag & P_WKILLED) == 0) {
3290 p->p_flag |= P_WKILLED;
3292 * Notify swapper that there is a process to swap in.
3293 * The notification is racy, at worst it would take 10
3294 * seconds for the swapper process to notice.
3296 if ((p->p_flag & (P_INMEM | P_SWAPPINGIN)) == 0)
3302 * Kill the current process for stated reason.
3305 killproc(struct proc *p, const char *why)
3308 PROC_LOCK_ASSERT(p, MA_OWNED);
3309 CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)", p, p->p_pid,
3311 log(LOG_ERR, "pid %d (%s), jid %d, uid %d, was killed: %s\n",
3312 p->p_pid, p->p_comm, p->p_ucred->cr_prison->pr_id,
3313 p->p_ucred->cr_uid, why);
3315 kern_psignal(p, SIGKILL);
3319 * Force the current process to exit with the specified signal, dumping core
3320 * if appropriate. We bypass the normal tests for masked and caught signals,
3321 * allowing unrecoverable failures to terminate the process without changing
3322 * signal state. Mark the accounting record with the signal termination.
3323 * If dumping core, save the signal number for the debugger. Calls exit and
3327 sigexit(struct thread *td, int sig)
3329 struct proc *p = td->td_proc;
3331 PROC_LOCK_ASSERT(p, MA_OWNED);
3332 p->p_acflag |= AXSIG;
3334 * We must be single-threading to generate a core dump. This
3335 * ensures that the registers in the core file are up-to-date.
3336 * Also, the ELF dump handler assumes that the thread list doesn't
3337 * change out from under it.
3339 * XXX If another thread attempts to single-thread before us
3340 * (e.g. via fork()), we won't get a dump at all.
3342 if ((sigprop(sig) & SIGPROP_CORE) &&
3343 thread_single(p, SINGLE_NO_EXIT) == 0) {
3346 * Log signals which would cause core dumps
3347 * (Log as LOG_INFO to appease those who don't want
3349 * XXX : Todo, as well as euid, write out ruid too
3350 * Note that coredump() drops proc lock.
3352 if (coredump(td) == 0)
3354 if (kern_logsigexit)
3356 "pid %d (%s), jid %d, uid %d: exited on "
3357 "signal %d%s\n", p->p_pid, p->p_comm,
3358 p->p_ucred->cr_prison->pr_id,
3359 td->td_ucred->cr_uid,
3361 sig & WCOREFLAG ? " (core dumped)" : "");
3369 * Send queued SIGCHLD to parent when child process's state
3373 sigparent(struct proc *p, int reason, int status)
3375 PROC_LOCK_ASSERT(p, MA_OWNED);
3376 PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
3378 if (p->p_ksi != NULL) {
3379 p->p_ksi->ksi_signo = SIGCHLD;
3380 p->p_ksi->ksi_code = reason;
3381 p->p_ksi->ksi_status = status;
3382 p->p_ksi->ksi_pid = p->p_pid;
3383 p->p_ksi->ksi_uid = p->p_ucred->cr_ruid;
3384 if (KSI_ONQ(p->p_ksi))
3387 pksignal(p->p_pptr, SIGCHLD, p->p_ksi);
3391 childproc_jobstate(struct proc *p, int reason, int sig)
3395 PROC_LOCK_ASSERT(p, MA_OWNED);
3396 PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
3399 * Wake up parent sleeping in kern_wait(), also send
3400 * SIGCHLD to parent, but SIGCHLD does not guarantee
3401 * that parent will awake, because parent may masked
3404 p->p_pptr->p_flag |= P_STATCHILD;
3407 ps = p->p_pptr->p_sigacts;
3408 mtx_lock(&ps->ps_mtx);
3409 if ((ps->ps_flag & PS_NOCLDSTOP) == 0) {
3410 mtx_unlock(&ps->ps_mtx);
3411 sigparent(p, reason, sig);
3413 mtx_unlock(&ps->ps_mtx);
3417 childproc_stopped(struct proc *p, int reason)
3420 childproc_jobstate(p, reason, p->p_xsig);
3424 childproc_continued(struct proc *p)
3426 childproc_jobstate(p, CLD_CONTINUED, SIGCONT);
3430 childproc_exited(struct proc *p)
3434 if (WCOREDUMP(p->p_xsig)) {
3435 reason = CLD_DUMPED;
3436 status = WTERMSIG(p->p_xsig);
3437 } else if (WIFSIGNALED(p->p_xsig)) {
3438 reason = CLD_KILLED;
3439 status = WTERMSIG(p->p_xsig);
3441 reason = CLD_EXITED;
3442 status = p->p_xexit;
3445 * XXX avoid calling wakeup(p->p_pptr), the work is
3448 sigparent(p, reason, status);
3451 #define MAX_NUM_CORE_FILES 100000
3452 #ifndef NUM_CORE_FILES
3453 #define NUM_CORE_FILES 5
3455 CTASSERT(NUM_CORE_FILES >= 0 && NUM_CORE_FILES <= MAX_NUM_CORE_FILES);
3456 static int num_cores = NUM_CORE_FILES;
3459 sysctl_debug_num_cores_check (SYSCTL_HANDLER_ARGS)
3464 new_val = num_cores;
3465 error = sysctl_handle_int(oidp, &new_val, 0, req);
3466 if (error != 0 || req->newptr == NULL)
3468 if (new_val > MAX_NUM_CORE_FILES)
3469 new_val = MAX_NUM_CORE_FILES;
3472 num_cores = new_val;
3475 SYSCTL_PROC(_debug, OID_AUTO, ncores,
3476 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, sizeof(int),
3477 sysctl_debug_num_cores_check, "I",
3478 "Maximum number of generated process corefiles while using index format");
3480 #define GZIP_SUFFIX ".gz"
3481 #define ZSTD_SUFFIX ".zst"
3483 int compress_user_cores = 0;
3486 sysctl_compress_user_cores(SYSCTL_HANDLER_ARGS)
3490 val = compress_user_cores;
3491 error = sysctl_handle_int(oidp, &val, 0, req);
3492 if (error != 0 || req->newptr == NULL)
3494 if (val != 0 && !compressor_avail(val))
3496 compress_user_cores = val;
3499 SYSCTL_PROC(_kern, OID_AUTO, compress_user_cores,
3500 CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, 0, sizeof(int),
3501 sysctl_compress_user_cores, "I",
3502 "Enable compression of user corefiles ("
3503 __XSTRING(COMPRESS_GZIP) " = gzip, "
3504 __XSTRING(COMPRESS_ZSTD) " = zstd)");
3506 int compress_user_cores_level = 6;
3507 SYSCTL_INT(_kern, OID_AUTO, compress_user_cores_level, CTLFLAG_RWTUN,
3508 &compress_user_cores_level, 0,
3509 "Corefile compression level");
3512 * Protect the access to corefilename[] by allproc_lock.
3514 #define corefilename_lock allproc_lock
3516 static char corefilename[MAXPATHLEN] = {"%N.core"};
3517 TUNABLE_STR("kern.corefile", corefilename, sizeof(corefilename));
3520 sysctl_kern_corefile(SYSCTL_HANDLER_ARGS)
3524 sx_xlock(&corefilename_lock);
3525 error = sysctl_handle_string(oidp, corefilename, sizeof(corefilename),
3527 sx_xunlock(&corefilename_lock);
3531 SYSCTL_PROC(_kern, OID_AUTO, corefile, CTLTYPE_STRING | CTLFLAG_RW |
3532 CTLFLAG_MPSAFE, 0, 0, sysctl_kern_corefile, "A",
3533 "Process corefile name format string");
3536 vnode_close_locked(struct thread *td, struct vnode *vp)
3540 vn_close(vp, FWRITE, td->td_ucred, td);
3544 * If the core format has a %I in it, then we need to check
3545 * for existing corefiles before defining a name.
3546 * To do this we iterate over 0..ncores to find a
3547 * non-existing core file name to use. If all core files are
3548 * already used we choose the oldest one.
3551 corefile_open_last(struct thread *td, char *name, int indexpos,
3552 int indexlen, int ncores, struct vnode **vpp)
3554 struct vnode *oldvp, *nextvp, *vp;
3556 struct nameidata nd;
3557 int error, i, flags, oflags, cmode;
3559 struct timespec lasttime;
3561 nextvp = oldvp = NULL;
3562 cmode = S_IRUSR | S_IWUSR;
3563 oflags = VN_OPEN_NOAUDIT | VN_OPEN_NAMECACHE |
3564 (capmode_coredump ? VN_OPEN_NOCAPCHECK : 0);
3566 for (i = 0; i < ncores; i++) {
3567 flags = O_CREAT | FWRITE | O_NOFOLLOW;
3569 ch = name[indexpos + indexlen];
3570 (void)snprintf(name + indexpos, indexlen + 1, "%.*u", indexlen,
3572 name[indexpos + indexlen] = ch;
3574 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td);
3575 error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred,
3581 NDFREE(&nd, NDF_ONLY_PNBUF);
3582 if ((flags & O_CREAT) == O_CREAT) {
3587 error = VOP_GETATTR(vp, &vattr, td->td_ucred);
3589 vnode_close_locked(td, vp);
3593 if (oldvp == NULL ||
3594 lasttime.tv_sec > vattr.va_mtime.tv_sec ||
3595 (lasttime.tv_sec == vattr.va_mtime.tv_sec &&
3596 lasttime.tv_nsec >= vattr.va_mtime.tv_nsec)) {
3598 vn_close(oldvp, FWRITE, td->td_ucred, td);
3601 lasttime = vattr.va_mtime;
3603 vnode_close_locked(td, vp);
3607 if (oldvp != NULL) {
3608 if (nextvp == NULL) {
3609 if ((td->td_proc->p_flag & P_SUGID) != 0) {
3611 vn_close(oldvp, FWRITE, td->td_ucred, td);
3614 error = vn_lock(nextvp, LK_EXCLUSIVE);
3616 vn_close(nextvp, FWRITE, td->td_ucred,
3622 vn_close(oldvp, FWRITE, td->td_ucred, td);
3627 vnode_close_locked(td, oldvp);
3636 * corefile_open(comm, uid, pid, td, compress, vpp, namep)
3637 * Expand the name described in corefilename, using name, uid, and pid
3638 * and open/create core file.
3639 * corefilename is a printf-like string, with three format specifiers:
3640 * %N name of process ("name")
3641 * %P process id (pid)
3643 * For example, "%N.core" is the default; they can be disabled completely
3644 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
3645 * This is controlled by the sysctl variable kern.corefile (see above).
3648 corefile_open(const char *comm, uid_t uid, pid_t pid, struct thread *td,
3649 int compress, int signum, struct vnode **vpp, char **namep)
3652 struct nameidata nd;
3654 char *hostname, *name;
3655 int cmode, error, flags, i, indexpos, indexlen, oflags, ncores;
3658 format = corefilename;
3659 name = malloc(MAXPATHLEN, M_TEMP, M_WAITOK | M_ZERO);
3663 (void)sbuf_new(&sb, name, MAXPATHLEN, SBUF_FIXEDLEN);
3664 sx_slock(&corefilename_lock);
3665 for (i = 0; format[i] != '\0'; i++) {
3666 switch (format[i]) {
3667 case '%': /* Format character */
3669 switch (format[i]) {
3671 sbuf_putc(&sb, '%');
3673 case 'H': /* hostname */
3674 if (hostname == NULL) {
3675 hostname = malloc(MAXHOSTNAMELEN,
3678 getcredhostname(td->td_ucred, hostname,
3680 sbuf_printf(&sb, "%s", hostname);
3682 case 'I': /* autoincrementing index */
3683 if (indexpos != -1) {
3684 sbuf_printf(&sb, "%%I");
3688 indexpos = sbuf_len(&sb);
3689 sbuf_printf(&sb, "%u", ncores - 1);
3690 indexlen = sbuf_len(&sb) - indexpos;
3692 case 'N': /* process name */
3693 sbuf_printf(&sb, "%s", comm);
3695 case 'P': /* process id */
3696 sbuf_printf(&sb, "%u", pid);
3698 case 'S': /* signal number */
3699 sbuf_printf(&sb, "%i", signum);
3701 case 'U': /* user id */
3702 sbuf_printf(&sb, "%u", uid);
3706 "Unknown format character %c in "
3707 "corename `%s'\n", format[i], format);
3712 sbuf_putc(&sb, format[i]);
3716 sx_sunlock(&corefilename_lock);
3717 free(hostname, M_TEMP);
3718 if (compress == COMPRESS_GZIP)
3719 sbuf_printf(&sb, GZIP_SUFFIX);
3720 else if (compress == COMPRESS_ZSTD)
3721 sbuf_printf(&sb, ZSTD_SUFFIX);
3722 if (sbuf_error(&sb) != 0) {
3723 log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too "
3724 "long\n", (long)pid, comm, (u_long)uid);
3732 if (indexpos != -1) {
3733 error = corefile_open_last(td, name, indexpos, indexlen, ncores,
3737 "pid %d (%s), uid (%u): Path `%s' failed "
3738 "on initial open test, error = %d\n",
3739 pid, comm, uid, name, error);
3742 cmode = S_IRUSR | S_IWUSR;
3743 oflags = VN_OPEN_NOAUDIT | VN_OPEN_NAMECACHE |
3744 (capmode_coredump ? VN_OPEN_NOCAPCHECK : 0);
3745 flags = O_CREAT | FWRITE | O_NOFOLLOW;
3746 if ((td->td_proc->p_flag & P_SUGID) != 0)
3749 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td);
3750 error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred,
3754 NDFREE(&nd, NDF_ONLY_PNBUF);
3760 audit_proc_coredump(td, name, error);
3770 * Dump a process' core. The main routine does some
3771 * policy checking, and creates the name of the coredump;
3772 * then it passes on a vnode and a size limit to the process-specific
3773 * coredump routine if there is one; if there _is not_ one, it returns
3774 * ENOSYS; otherwise it returns the error from the process-specific routine.
3778 coredump(struct thread *td)
3780 struct proc *p = td->td_proc;
3781 struct ucred *cred = td->td_ucred;
3785 size_t fullpathsize;
3786 int error, error1, locked;
3787 char *name; /* name of corefile */
3790 char *fullpath, *freepath = NULL;
3793 PROC_LOCK_ASSERT(p, MA_OWNED);
3794 MPASS((p->p_flag & P_HADTHREADS) == 0 || p->p_singlethread == td);
3796 if (!do_coredump || (!sugid_coredump && (p->p_flag & P_SUGID) != 0) ||
3797 (p->p_flag2 & P2_NOTRACE) != 0) {
3803 * Note that the bulk of limit checking is done after
3804 * the corefile is created. The exception is if the limit
3805 * for corefiles is 0, in which case we don't bother
3806 * creating the corefile at all. This layout means that
3807 * a corefile is truncated instead of not being created,
3808 * if it is larger than the limit.
3810 limit = (off_t)lim_cur(td, RLIMIT_CORE);
3811 if (limit == 0 || racct_get_available(p, RACCT_CORE) == 0) {
3817 error = corefile_open(p->p_comm, cred->cr_uid, p->p_pid, td,
3818 compress_user_cores, p->p_sig, &vp, &name);
3823 * Don't dump to non-regular files or files with links.
3824 * Do not dump into system files. Effective user must own the corefile.
3826 if (vp->v_type != VREG || VOP_GETATTR(vp, &vattr, cred) != 0 ||
3827 vattr.va_nlink != 1 || (vp->v_vflag & VV_SYSTEM) != 0 ||
3828 vattr.va_uid != cred->cr_uid) {
3836 /* Postpone other writers, including core dumps of other processes. */
3837 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX);
3839 lf.l_whence = SEEK_SET;
3842 lf.l_type = F_WRLCK;
3843 locked = (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK) == 0);
3847 if (set_core_nodump_flag)
3848 vattr.va_flags = UF_NODUMP;
3849 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3850 VOP_SETATTR(vp, &vattr, cred);
3853 p->p_acflag |= ACORE;
3856 if (p->p_sysent->sv_coredump != NULL) {
3857 error = p->p_sysent->sv_coredump(td, vp, limit, 0);
3863 lf.l_type = F_UNLCK;
3864 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
3866 vn_rangelock_unlock(vp, rl_cookie);
3869 * Notify the userland helper that a process triggered a core dump.
3870 * This allows the helper to run an automated debugging session.
3872 if (error != 0 || coredump_devctl == 0)
3874 sb = sbuf_new_auto();
3875 if (vn_fullpath_global(p->p_textvp, &fullpath, &freepath) != 0)
3877 sbuf_printf(sb, "comm=\"");
3878 devctl_safe_quote_sb(sb, fullpath);
3879 free(freepath, M_TEMP);
3880 sbuf_printf(sb, "\" core=\"");
3883 * We can't lookup core file vp directly. When we're replacing a core, and
3884 * other random times, we flush the name cache, so it will fail. Instead,
3885 * if the path of the core is relative, add the current dir in front if it.
3887 if (name[0] != '/') {
3888 fullpathsize = MAXPATHLEN;
3889 freepath = malloc(fullpathsize, M_TEMP, M_WAITOK);
3890 if (vn_getcwd(freepath, &fullpath, &fullpathsize) != 0) {
3891 free(freepath, M_TEMP);
3894 devctl_safe_quote_sb(sb, fullpath);
3895 free(freepath, M_TEMP);
3898 devctl_safe_quote_sb(sb, name);
3899 sbuf_printf(sb, "\"");
3900 if (sbuf_finish(sb) == 0)
3901 devctl_notify("kernel", "signal", "coredump", sbuf_data(sb));
3905 error1 = vn_close(vp, FWRITE, cred, td);
3909 audit_proc_coredump(td, name, error);
3916 * Nonexistent system call-- signal process (may want to handle it). Flag
3917 * error in case process won't see signal immediately (blocked or ignored).
3919 #ifndef _SYS_SYSPROTO_H_
3926 nosys(struct thread *td, struct nosys_args *args)
3933 tdsignal(td, SIGSYS);
3935 if (kern_lognosys == 1 || kern_lognosys == 3) {
3936 uprintf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm,
3939 if (kern_lognosys == 2 || kern_lognosys == 3 ||
3940 (p->p_pid == 1 && (kern_lognosys & 3) == 0)) {
3941 printf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm,
3948 * Send a SIGIO or SIGURG signal to a process or process group using stored
3949 * credentials rather than those of the current process.
3952 pgsigio(struct sigio **sigiop, int sig, int checkctty)
3955 struct sigio *sigio;
3957 ksiginfo_init(&ksi);
3958 ksi.ksi_signo = sig;
3959 ksi.ksi_code = SI_KERNEL;
3963 if (sigio == NULL) {
3967 if (sigio->sio_pgid > 0) {
3968 PROC_LOCK(sigio->sio_proc);
3969 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred))
3970 kern_psignal(sigio->sio_proc, sig);
3971 PROC_UNLOCK(sigio->sio_proc);
3972 } else if (sigio->sio_pgid < 0) {
3975 PGRP_LOCK(sigio->sio_pgrp);
3976 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) {
3978 if (p->p_state == PRS_NORMAL &&
3979 CANSIGIO(sigio->sio_ucred, p->p_ucred) &&
3980 (checkctty == 0 || (p->p_flag & P_CONTROLT)))
3981 kern_psignal(p, sig);
3984 PGRP_UNLOCK(sigio->sio_pgrp);
3990 filt_sigattach(struct knote *kn)
3992 struct proc *p = curproc;
3994 kn->kn_ptr.p_proc = p;
3995 kn->kn_flags |= EV_CLEAR; /* automatically set */
3997 knlist_add(p->p_klist, kn, 0);
4003 filt_sigdetach(struct knote *kn)
4005 struct proc *p = kn->kn_ptr.p_proc;
4007 knlist_remove(p->p_klist, kn, 0);
4011 * signal knotes are shared with proc knotes, so we apply a mask to
4012 * the hint in order to differentiate them from process hints. This
4013 * could be avoided by using a signal-specific knote list, but probably
4014 * isn't worth the trouble.
4017 filt_signal(struct knote *kn, long hint)
4020 if (hint & NOTE_SIGNAL) {
4021 hint &= ~NOTE_SIGNAL;
4023 if (kn->kn_id == hint)
4026 return (kn->kn_data != 0);
4034 ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO);
4035 refcount_init(&ps->ps_refcnt, 1);
4036 mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF);
4041 sigacts_free(struct sigacts *ps)
4044 if (refcount_release(&ps->ps_refcnt) == 0)
4046 mtx_destroy(&ps->ps_mtx);
4047 free(ps, M_SUBPROC);
4051 sigacts_hold(struct sigacts *ps)
4054 refcount_acquire(&ps->ps_refcnt);
4059 sigacts_copy(struct sigacts *dest, struct sigacts *src)
4062 KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest"));
4063 mtx_lock(&src->ps_mtx);
4064 bcopy(src, dest, offsetof(struct sigacts, ps_refcnt));
4065 mtx_unlock(&src->ps_mtx);
4069 sigacts_shared(struct sigacts *ps)
4072 return (ps->ps_refcnt > 1);
4076 sig_drop_caught(struct proc *p)
4082 PROC_LOCK_ASSERT(p, MA_OWNED);
4083 mtx_assert(&ps->ps_mtx, MA_OWNED);
4084 while (SIGNOTEMPTY(ps->ps_sigcatch)) {
4085 sig = sig_ffs(&ps->ps_sigcatch);
4087 if ((sigprop(sig) & SIGPROP_IGNORE) != 0)
4088 sigqueue_delete_proc(p, sig);
4093 sigfastblock_failed(struct thread *td, bool sendsig, bool write)
4098 * Prevent further fetches and SIGSEGVs, allowing thread to
4099 * issue syscalls despite corruption.
4101 sigfastblock_clear(td);
4105 ksiginfo_init_trap(&ksi);
4106 ksi.ksi_signo = SIGSEGV;
4107 ksi.ksi_code = write ? SEGV_ACCERR : SEGV_MAPERR;
4108 ksi.ksi_addr = td->td_sigblock_ptr;
4109 trapsignal(td, &ksi);
4113 sigfastblock_fetch_sig(struct thread *td, bool sendsig, uint32_t *valp)
4117 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0)
4119 if (fueword32((void *)td->td_sigblock_ptr, &res) == -1) {
4120 sigfastblock_failed(td, sendsig, false);
4124 td->td_sigblock_val = res & ~SIGFASTBLOCK_FLAGS;
4129 sigfastblock_resched(struct thread *td, bool resched)
4136 reschedule_signals(p, td->td_sigmask, 0);
4140 td->td_flags |= TDF_ASTPENDING | TDF_NEEDSIGCHK;
4145 sys_sigfastblock(struct thread *td, struct sigfastblock_args *uap)
4154 case SIGFASTBLOCK_SETPTR:
4155 if ((td->td_pflags & TDP_SIGFASTBLOCK) != 0) {
4159 if (((uintptr_t)(uap->ptr) & (sizeof(uint32_t) - 1)) != 0) {
4163 td->td_pflags |= TDP_SIGFASTBLOCK;
4164 td->td_sigblock_ptr = uap->ptr;
4167 case SIGFASTBLOCK_UNBLOCK:
4168 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) {
4174 res = casueword32(td->td_sigblock_ptr,
4175 SIGFASTBLOCK_PEND, &oldval, 0);
4178 sigfastblock_failed(td, false, true);
4184 if (oldval != SIGFASTBLOCK_PEND) {
4188 error = thread_check_susp(td, false);
4196 * td_sigblock_val is cleared there, but not on a
4197 * syscall exit. The end effect is that a single
4198 * interruptible sleep, while user sigblock word is
4199 * set, might return EINTR or ERESTART to usermode
4200 * without delivering signal. All further sleeps,
4201 * until userspace clears the word and does
4202 * sigfastblock(UNBLOCK), observe current word and no
4203 * longer get interrupted. It is slight
4204 * non-conformance, with alternative to have read the
4205 * sigblock word on each syscall entry.
4207 td->td_sigblock_val = 0;
4210 * Rely on normal ast mechanism to deliver pending
4211 * signals to current thread. But notify others about
4214 sigfastblock_resched(td, error == 0 && p->p_numthreads != 1);
4218 case SIGFASTBLOCK_UNSETPTR:
4219 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) {
4223 if (!sigfastblock_fetch_sig(td, false, &oldval)) {
4227 if (oldval != 0 && oldval != SIGFASTBLOCK_PEND) {
4231 sigfastblock_clear(td);
4242 sigfastblock_clear(struct thread *td)
4246 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0)
4248 td->td_sigblock_val = 0;
4249 resched = (td->td_pflags & TDP_SIGFASTPENDING) != 0 ||
4251 td->td_pflags &= ~(TDP_SIGFASTBLOCK | TDP_SIGFASTPENDING);
4252 sigfastblock_resched(td, resched);
4256 sigfastblock_fetch(struct thread *td)
4260 (void)sigfastblock_fetch_sig(td, true, &val);
4264 sigfastblock_setpend1(struct thread *td)
4269 if ((td->td_pflags & TDP_SIGFASTPENDING) == 0)
4271 res = fueword32((void *)td->td_sigblock_ptr, &oldval);
4273 sigfastblock_failed(td, true, false);
4277 res = casueword32(td->td_sigblock_ptr, oldval, &oldval,
4278 oldval | SIGFASTBLOCK_PEND);
4280 sigfastblock_failed(td, true, true);
4284 td->td_sigblock_val = oldval & ~SIGFASTBLOCK_FLAGS;
4285 td->td_pflags &= ~TDP_SIGFASTPENDING;
4289 if (thread_check_susp(td, false) != 0)
4295 sigfastblock_setpend(struct thread *td, bool resched)
4299 sigfastblock_setpend1(td);
4303 reschedule_signals(p, fastblock_mask, SIGPROCMASK_FASTBLK);