2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
42 #include "opt_capsicum.h"
43 #include "opt_ktrace.h"
45 #include <sys/param.h>
46 #include <sys/capsicum.h>
47 #include <sys/ctype.h>
48 #include <sys/systm.h>
49 #include <sys/signalvar.h>
50 #include <sys/vnode.h>
52 #include <sys/capsicum.h>
53 #include <sys/compressor.h>
54 #include <sys/condvar.h>
55 #include <sys/devctl.h>
56 #include <sys/event.h>
57 #include <sys/fcntl.h>
58 #include <sys/imgact.h>
59 #include <sys/kernel.h>
61 #include <sys/ktrace.h>
62 #include <sys/limits.h>
64 #include <sys/malloc.h>
65 #include <sys/mutex.h>
66 #include <sys/refcount.h>
67 #include <sys/namei.h>
69 #include <sys/procdesc.h>
70 #include <sys/ptrace.h>
71 #include <sys/posix4.h>
72 #include <sys/racct.h>
73 #include <sys/resourcevar.h>
76 #include <sys/sleepqueue.h>
80 #include <sys/syscall.h>
81 #include <sys/syscallsubr.h>
82 #include <sys/sysctl.h>
83 #include <sys/sysent.h>
84 #include <sys/syslog.h>
85 #include <sys/sysproto.h>
86 #include <sys/timers.h>
87 #include <sys/unistd.h>
88 #include <sys/vmmeter.h>
91 #include <vm/vm_extern.h>
96 #include <machine/cpu.h>
98 #include <security/audit/audit.h>
100 #define ONSIG 32 /* NSIG for osig* syscalls. XXX. */
102 SDT_PROVIDER_DECLARE(proc);
103 SDT_PROBE_DEFINE3(proc, , , signal__send,
104 "struct thread *", "struct proc *", "int");
105 SDT_PROBE_DEFINE2(proc, , , signal__clear,
106 "int", "ksiginfo_t *");
107 SDT_PROBE_DEFINE3(proc, , , signal__discard,
108 "struct thread *", "struct proc *", "int");
110 static int coredump(struct thread *);
111 static int killpg1(struct thread *td, int sig, int pgid, int all,
113 static int issignal(struct thread *td);
114 static void reschedule_signals(struct proc *p, sigset_t block, int flags);
115 static int sigprop(int sig);
116 static void tdsigwakeup(struct thread *, int, sig_t, int);
117 static int sig_suspend_threads(struct thread *, struct proc *);
118 static int filt_sigattach(struct knote *kn);
119 static void filt_sigdetach(struct knote *kn);
120 static int filt_signal(struct knote *kn, long hint);
121 static struct thread *sigtd(struct proc *p, int sig, bool fast_sigblock);
122 static void sigqueue_start(void);
123 static void sigfastblock_setpend(struct thread *td, bool resched);
124 static void sigexit1(struct thread *td, int sig, ksiginfo_t *ksi) __dead2;
126 static uma_zone_t ksiginfo_zone = NULL;
127 struct filterops sig_filtops = {
129 .f_attach = filt_sigattach,
130 .f_detach = filt_sigdetach,
131 .f_event = filt_signal,
134 static int kern_logsigexit = 1;
135 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,
137 "Log processes quitting on abnormal signals to syslog(3)");
139 static int kern_forcesigexit = 1;
140 SYSCTL_INT(_kern, OID_AUTO, forcesigexit, CTLFLAG_RW,
141 &kern_forcesigexit, 0, "Force trap signal to be handled");
143 static SYSCTL_NODE(_kern, OID_AUTO, sigqueue, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
144 "POSIX real time signal");
146 static int max_pending_per_proc = 128;
147 SYSCTL_INT(_kern_sigqueue, OID_AUTO, max_pending_per_proc, CTLFLAG_RW,
148 &max_pending_per_proc, 0, "Max pending signals per proc");
150 static int preallocate_siginfo = 1024;
151 SYSCTL_INT(_kern_sigqueue, OID_AUTO, preallocate, CTLFLAG_RDTUN,
152 &preallocate_siginfo, 0, "Preallocated signal memory size");
154 static int signal_overflow = 0;
155 SYSCTL_INT(_kern_sigqueue, OID_AUTO, overflow, CTLFLAG_RD,
156 &signal_overflow, 0, "Number of signals overflew");
158 static int signal_alloc_fail = 0;
159 SYSCTL_INT(_kern_sigqueue, OID_AUTO, alloc_fail, CTLFLAG_RD,
160 &signal_alloc_fail, 0, "signals failed to be allocated");
162 static int kern_lognosys = 0;
163 SYSCTL_INT(_kern, OID_AUTO, lognosys, CTLFLAG_RWTUN, &kern_lognosys, 0,
164 "Log invalid syscalls");
166 __read_frequently bool sigfastblock_fetch_always = false;
167 SYSCTL_BOOL(_kern, OID_AUTO, sigfastblock_fetch_always, CTLFLAG_RWTUN,
168 &sigfastblock_fetch_always, 0,
169 "Fetch sigfastblock word on each syscall entry for proper "
170 "blocking semantic");
172 static bool kern_sig_discard_ign = true;
173 SYSCTL_BOOL(_kern, OID_AUTO, sig_discard_ign, CTLFLAG_RWTUN,
174 &kern_sig_discard_ign, 0,
175 "Discard ignored signals on delivery, otherwise queue them to "
178 SYSINIT(signal, SI_SUB_P1003_1B, SI_ORDER_FIRST+3, sigqueue_start, NULL);
181 * Policy -- Can ucred cr1 send SIGIO to process cr2?
182 * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG
183 * in the right situations.
185 #define CANSIGIO(cr1, cr2) \
186 ((cr1)->cr_uid == 0 || \
187 (cr1)->cr_ruid == (cr2)->cr_ruid || \
188 (cr1)->cr_uid == (cr2)->cr_ruid || \
189 (cr1)->cr_ruid == (cr2)->cr_uid || \
190 (cr1)->cr_uid == (cr2)->cr_uid)
192 static int sugid_coredump;
193 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RWTUN,
194 &sugid_coredump, 0, "Allow setuid and setgid processes to dump core");
196 static int capmode_coredump;
197 SYSCTL_INT(_kern, OID_AUTO, capmode_coredump, CTLFLAG_RWTUN,
198 &capmode_coredump, 0, "Allow processes in capability mode to dump core");
200 static int do_coredump = 1;
201 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,
202 &do_coredump, 0, "Enable/Disable coredumps");
204 static int set_core_nodump_flag = 0;
205 SYSCTL_INT(_kern, OID_AUTO, nodump_coredump, CTLFLAG_RW, &set_core_nodump_flag,
206 0, "Enable setting the NODUMP flag on coredump files");
208 static int coredump_devctl = 0;
209 SYSCTL_INT(_kern, OID_AUTO, coredump_devctl, CTLFLAG_RW, &coredump_devctl,
210 0, "Generate a devctl notification when processes coredump");
213 * Signal properties and actions.
214 * The array below categorizes the signals and their default actions
215 * according to the following properties:
217 #define SIGPROP_KILL 0x01 /* terminates process by default */
218 #define SIGPROP_CORE 0x02 /* ditto and coredumps */
219 #define SIGPROP_STOP 0x04 /* suspend process */
220 #define SIGPROP_TTYSTOP 0x08 /* ditto, from tty */
221 #define SIGPROP_IGNORE 0x10 /* ignore by default */
222 #define SIGPROP_CONT 0x20 /* continue if suspended */
224 static const int sigproptbl[NSIG] = {
225 [SIGHUP] = SIGPROP_KILL,
226 [SIGINT] = SIGPROP_KILL,
227 [SIGQUIT] = SIGPROP_KILL | SIGPROP_CORE,
228 [SIGILL] = SIGPROP_KILL | SIGPROP_CORE,
229 [SIGTRAP] = SIGPROP_KILL | SIGPROP_CORE,
230 [SIGABRT] = SIGPROP_KILL | SIGPROP_CORE,
231 [SIGEMT] = SIGPROP_KILL | SIGPROP_CORE,
232 [SIGFPE] = SIGPROP_KILL | SIGPROP_CORE,
233 [SIGKILL] = SIGPROP_KILL,
234 [SIGBUS] = SIGPROP_KILL | SIGPROP_CORE,
235 [SIGSEGV] = SIGPROP_KILL | SIGPROP_CORE,
236 [SIGSYS] = SIGPROP_KILL | SIGPROP_CORE,
237 [SIGPIPE] = SIGPROP_KILL,
238 [SIGALRM] = SIGPROP_KILL,
239 [SIGTERM] = SIGPROP_KILL,
240 [SIGURG] = SIGPROP_IGNORE,
241 [SIGSTOP] = SIGPROP_STOP,
242 [SIGTSTP] = SIGPROP_STOP | SIGPROP_TTYSTOP,
243 [SIGCONT] = SIGPROP_IGNORE | SIGPROP_CONT,
244 [SIGCHLD] = SIGPROP_IGNORE,
245 [SIGTTIN] = SIGPROP_STOP | SIGPROP_TTYSTOP,
246 [SIGTTOU] = SIGPROP_STOP | SIGPROP_TTYSTOP,
247 [SIGIO] = SIGPROP_IGNORE,
248 [SIGXCPU] = SIGPROP_KILL,
249 [SIGXFSZ] = SIGPROP_KILL,
250 [SIGVTALRM] = SIGPROP_KILL,
251 [SIGPROF] = SIGPROP_KILL,
252 [SIGWINCH] = SIGPROP_IGNORE,
253 [SIGINFO] = SIGPROP_IGNORE,
254 [SIGUSR1] = SIGPROP_KILL,
255 [SIGUSR2] = SIGPROP_KILL,
258 #define _SIG_FOREACH_ADVANCE(i, set) ({ \
262 int __sig = ffs(__bits); \
263 __bits &= ~(1u << (__sig - 1)); \
264 sig = __i * sizeof((set)->__bits[0]) * NBBY + __sig; \
268 if (++__i == _SIG_WORDS) { \
272 __bits = (set)->__bits[__i]; \
277 #define SIG_FOREACH(i, set) \
278 for (int32_t __i = -1, __bits = 0; \
279 _SIG_FOREACH_ADVANCE(i, set); ) \
281 static sigset_t fastblock_mask;
284 ast_sig(struct thread *td, int tda)
287 int old_boundary, sig;
293 if (p->p_numthreads == 1 && (tda & (TDAI(TDA_SIG) |
294 TDAI(TDA_AST))) == 0) {
298 * Note that TDA_SIG should be re-read from
299 * td_ast, since signal might have been delivered
300 * after we cleared td_flags above. This is one of
301 * the reason for looping check for AST condition.
302 * See comment in userret() about P_PPWAIT.
304 if ((p->p_flag & P_PPWAIT) == 0 &&
305 (td->td_pflags & TDP_SIGFASTBLOCK) == 0) {
306 if (SIGPENDING(td) && ((tda | td->td_ast) &
307 (TDAI(TDA_SIG) | TDAI(TDA_AST))) == 0) {
308 thread_unlock(td); /* fix dumps */
310 "failed2 to set signal flags for ast p %p "
311 "td %p tda %#x td_ast %#x fl %#x",
312 p, td, tda, td->td_ast, td->td_flags);
321 * Check for signals. Unlocked reads of p_pendingcnt or
322 * p_siglist might cause process-directed signal to be handled
325 if ((tda & TDAI(TDA_SIG)) != 0 || p->p_pendingcnt > 0 ||
326 !SIGISEMPTY(p->p_siglist)) {
327 sigfastblock_fetch(td);
329 old_boundary = ~TDB_BOUNDARY | (td->td_dbgflags & TDB_BOUNDARY);
330 td->td_dbgflags |= TDB_BOUNDARY;
331 mtx_lock(&p->p_sigacts->ps_mtx);
332 while ((sig = cursig(td)) != 0) {
333 KASSERT(sig >= 0, ("sig %d", sig));
336 mtx_unlock(&p->p_sigacts->ps_mtx);
337 td->td_dbgflags &= old_boundary;
341 resched_sigs = false;
345 * Handle deferred update of the fast sigblock value, after
346 * the postsig() loop was performed.
348 sigfastblock_setpend(td, resched_sigs);
352 ast_sigsuspend(struct thread *td, int tda __unused)
354 MPASS((td->td_pflags & TDP_OLDMASK) != 0);
355 td->td_pflags &= ~TDP_OLDMASK;
356 kern_sigprocmask(td, SIG_SETMASK, &td->td_oldsigmask, NULL, 0);
362 ksiginfo_zone = uma_zcreate("ksiginfo", sizeof(ksiginfo_t),
363 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
364 uma_prealloc(ksiginfo_zone, preallocate_siginfo);
365 p31b_setcfg(CTL_P1003_1B_REALTIME_SIGNALS, _POSIX_REALTIME_SIGNALS);
366 p31b_setcfg(CTL_P1003_1B_RTSIG_MAX, SIGRTMAX - SIGRTMIN + 1);
367 p31b_setcfg(CTL_P1003_1B_SIGQUEUE_MAX, max_pending_per_proc);
368 SIGFILLSET(fastblock_mask);
369 SIG_CANTMASK(fastblock_mask);
370 ast_register(TDA_SIG, ASTR_UNCOND, 0, ast_sig);
371 ast_register(TDA_SIGSUSPEND, ASTR_ASTF_REQUIRED | ASTR_TDP,
372 TDP_OLDMASK, ast_sigsuspend);
376 sig_handle_killpg(struct proc *p, ksiginfo_t *ksi)
378 if ((ksi->ksi_flags & KSI_KILLPG) != 0 && p != NULL) {
379 MPASS(atomic_load_int(&p->p_killpg_cnt) > 0);
380 atomic_add_int(&p->p_killpg_cnt, -1);
385 ksiginfo_alloc(int mwait)
387 MPASS(mwait == M_WAITOK || mwait == M_NOWAIT);
389 if (ksiginfo_zone == NULL)
391 return (uma_zalloc(ksiginfo_zone, mwait | M_ZERO));
395 ksiginfo_free(ksiginfo_t *ksi)
397 uma_zfree(ksiginfo_zone, ksi);
401 ksiginfo_tryfree(ksiginfo_t *ksi)
403 if ((ksi->ksi_flags & KSI_EXT) == 0) {
404 uma_zfree(ksiginfo_zone, ksi);
411 sigqueue_init(sigqueue_t *list, struct proc *p)
413 SIGEMPTYSET(list->sq_signals);
414 SIGEMPTYSET(list->sq_kill);
415 SIGEMPTYSET(list->sq_ptrace);
416 TAILQ_INIT(&list->sq_list);
418 list->sq_flags = SQ_INIT;
422 * Get a signal's ksiginfo.
424 * 0 - signal not found
425 * others - signal number
428 sigqueue_get(sigqueue_t *sq, int signo, ksiginfo_t *si)
430 struct proc *p = sq->sq_proc;
431 struct ksiginfo *ksi, *next;
434 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
436 if (!SIGISMEMBER(sq->sq_signals, signo))
439 if (SIGISMEMBER(sq->sq_ptrace, signo)) {
441 SIGDELSET(sq->sq_ptrace, signo);
442 si->ksi_flags |= KSI_PTRACE;
444 if (SIGISMEMBER(sq->sq_kill, signo)) {
447 SIGDELSET(sq->sq_kill, signo);
450 TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
451 if (ksi->ksi_signo == signo) {
453 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
454 ksi->ksi_sigq = NULL;
455 ksiginfo_copy(ksi, si);
456 if (ksiginfo_tryfree(ksi) && p != NULL)
465 SIGDELSET(sq->sq_signals, signo);
466 si->ksi_signo = signo;
471 sigqueue_take(ksiginfo_t *ksi)
477 if (ksi == NULL || (sq = ksi->ksi_sigq) == NULL)
481 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
482 ksi->ksi_sigq = NULL;
483 sig_handle_killpg(p, ksi);
484 if (!(ksi->ksi_flags & KSI_EXT) && p != NULL)
487 for (kp = TAILQ_FIRST(&sq->sq_list); kp != NULL;
488 kp = TAILQ_NEXT(kp, ksi_link)) {
489 if (kp->ksi_signo == ksi->ksi_signo)
492 if (kp == NULL && !SIGISMEMBER(sq->sq_kill, ksi->ksi_signo) &&
493 !SIGISMEMBER(sq->sq_ptrace, ksi->ksi_signo))
494 SIGDELSET(sq->sq_signals, ksi->ksi_signo);
498 sigqueue_add(sigqueue_t *sq, int signo, ksiginfo_t *si)
500 struct proc *p = sq->sq_proc;
501 struct ksiginfo *ksi;
504 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
507 * SIGKILL/SIGSTOP cannot be caught or masked, so take the fast path
510 if (signo == SIGKILL || signo == SIGSTOP || si == NULL) {
511 SIGADDSET(sq->sq_kill, signo);
515 /* directly insert the ksi, don't copy it */
516 if (si->ksi_flags & KSI_INS) {
517 if (si->ksi_flags & KSI_HEAD)
518 TAILQ_INSERT_HEAD(&sq->sq_list, si, ksi_link);
520 TAILQ_INSERT_TAIL(&sq->sq_list, si, ksi_link);
525 if (__predict_false(ksiginfo_zone == NULL)) {
526 SIGADDSET(sq->sq_kill, signo);
530 if (p != NULL && p->p_pendingcnt >= max_pending_per_proc) {
533 } else if ((ksi = ksiginfo_alloc(M_NOWAIT)) == NULL) {
539 ksiginfo_copy(si, ksi);
540 ksi->ksi_signo = signo;
541 if (si->ksi_flags & KSI_HEAD)
542 TAILQ_INSERT_HEAD(&sq->sq_list, ksi, ksi_link);
544 TAILQ_INSERT_TAIL(&sq->sq_list, ksi, ksi_link);
549 if ((si->ksi_flags & KSI_PTRACE) != 0) {
550 SIGADDSET(sq->sq_ptrace, signo);
553 } else if ((si->ksi_flags & KSI_TRAP) != 0 ||
554 (si->ksi_flags & KSI_SIGQ) == 0) {
555 SIGADDSET(sq->sq_kill, signo);
563 SIGADDSET(sq->sq_signals, signo);
568 sigqueue_flush(sigqueue_t *sq)
570 struct proc *p = sq->sq_proc;
573 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
576 PROC_LOCK_ASSERT(p, MA_OWNED);
578 while ((ksi = TAILQ_FIRST(&sq->sq_list)) != NULL) {
579 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
580 ksi->ksi_sigq = NULL;
581 sig_handle_killpg(p, ksi);
582 if (ksiginfo_tryfree(ksi) && p != NULL)
586 SIGEMPTYSET(sq->sq_signals);
587 SIGEMPTYSET(sq->sq_kill);
588 SIGEMPTYSET(sq->sq_ptrace);
592 sigqueue_move_set(sigqueue_t *src, sigqueue_t *dst, const sigset_t *set)
595 struct proc *p1, *p2;
596 ksiginfo_t *ksi, *next;
598 KASSERT(src->sq_flags & SQ_INIT, ("src sigqueue not inited"));
599 KASSERT(dst->sq_flags & SQ_INIT, ("dst sigqueue not inited"));
602 /* Move siginfo to target list */
603 TAILQ_FOREACH_SAFE(ksi, &src->sq_list, ksi_link, next) {
604 if (SIGISMEMBER(*set, ksi->ksi_signo)) {
605 TAILQ_REMOVE(&src->sq_list, ksi, ksi_link);
608 TAILQ_INSERT_TAIL(&dst->sq_list, ksi, ksi_link);
615 /* Move pending bits to target list */
617 SIGSETAND(tmp, *set);
618 SIGSETOR(dst->sq_kill, tmp);
619 SIGSETNAND(src->sq_kill, tmp);
621 tmp = src->sq_ptrace;
622 SIGSETAND(tmp, *set);
623 SIGSETOR(dst->sq_ptrace, tmp);
624 SIGSETNAND(src->sq_ptrace, tmp);
626 tmp = src->sq_signals;
627 SIGSETAND(tmp, *set);
628 SIGSETOR(dst->sq_signals, tmp);
629 SIGSETNAND(src->sq_signals, tmp);
634 sigqueue_move(sigqueue_t *src, sigqueue_t *dst, int signo)
639 SIGADDSET(set, signo);
640 sigqueue_move_set(src, dst, &set);
645 sigqueue_delete_set(sigqueue_t *sq, const sigset_t *set)
647 struct proc *p = sq->sq_proc;
648 ksiginfo_t *ksi, *next;
650 KASSERT(sq->sq_flags & SQ_INIT, ("src sigqueue not inited"));
652 /* Remove siginfo queue */
653 TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
654 if (SIGISMEMBER(*set, ksi->ksi_signo)) {
655 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
656 ksi->ksi_sigq = NULL;
657 sig_handle_killpg(p, ksi);
658 if (ksiginfo_tryfree(ksi) && p != NULL)
662 SIGSETNAND(sq->sq_kill, *set);
663 SIGSETNAND(sq->sq_ptrace, *set);
664 SIGSETNAND(sq->sq_signals, *set);
668 sigqueue_delete(sigqueue_t *sq, int signo)
673 SIGADDSET(set, signo);
674 sigqueue_delete_set(sq, &set);
677 /* Remove a set of signals for a process */
679 sigqueue_delete_set_proc(struct proc *p, const sigset_t *set)
684 PROC_LOCK_ASSERT(p, MA_OWNED);
686 sigqueue_init(&worklist, p);
687 sigqueue_move_set(&p->p_sigqueue, &worklist, set);
689 FOREACH_THREAD_IN_PROC(p, td0)
690 sigqueue_move_set(&td0->td_sigqueue, &worklist, set);
692 sigqueue_flush(&worklist);
696 sigqueue_delete_proc(struct proc *p, int signo)
701 SIGADDSET(set, signo);
702 sigqueue_delete_set_proc(p, &set);
706 sigqueue_delete_stopmask_proc(struct proc *p)
711 SIGADDSET(set, SIGSTOP);
712 SIGADDSET(set, SIGTSTP);
713 SIGADDSET(set, SIGTTIN);
714 SIGADDSET(set, SIGTTOU);
715 sigqueue_delete_set_proc(p, &set);
719 * Determine signal that should be delivered to thread td, the current
720 * thread, 0 if none. If there is a pending stop signal with default
721 * action, the process stops in issignal().
724 cursig(struct thread *td)
726 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
727 mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED);
728 THREAD_LOCK_ASSERT(td, MA_NOTOWNED);
729 return (SIGPENDING(td) ? issignal(td) : 0);
733 * Arrange for ast() to handle unmasked pending signals on return to user
734 * mode. This must be called whenever a signal is added to td_sigqueue or
735 * unmasked in td_sigmask.
738 signotify(struct thread *td)
741 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
744 ast_sched(td, TDA_SIG);
748 * Returns 1 (true) if altstack is configured for the thread, and the
749 * passed stack bottom address falls into the altstack range. Handles
750 * the 43 compat special case where the alt stack size is zero.
753 sigonstack(size_t sp)
758 if ((td->td_pflags & TDP_ALTSTACK) == 0)
760 #if defined(COMPAT_43)
761 if (SV_PROC_FLAG(td->td_proc, SV_AOUT) && td->td_sigstk.ss_size == 0)
762 return ((td->td_sigstk.ss_flags & SS_ONSTACK) != 0);
764 return (sp >= (size_t)td->td_sigstk.ss_sp &&
765 sp < td->td_sigstk.ss_size + (size_t)td->td_sigstk.ss_sp);
772 if (sig > 0 && sig < nitems(sigproptbl))
773 return (sigproptbl[sig]);
778 sigact_flag_test(const struct sigaction *act, int flag)
782 * SA_SIGINFO is reset when signal disposition is set to
783 * ignore or default. Other flags are kept according to user
786 return ((act->sa_flags & flag) != 0 && (flag != SA_SIGINFO ||
787 ((__sighandler_t *)act->sa_sigaction != SIG_IGN &&
788 (__sighandler_t *)act->sa_sigaction != SIG_DFL)));
798 kern_sigaction(struct thread *td, int sig, const struct sigaction *act,
799 struct sigaction *oact, int flags)
802 struct proc *p = td->td_proc;
804 if (!_SIG_VALID(sig))
806 if (act != NULL && act->sa_handler != SIG_DFL &&
807 act->sa_handler != SIG_IGN && (act->sa_flags & ~(SA_ONSTACK |
808 SA_RESTART | SA_RESETHAND | SA_NOCLDSTOP | SA_NODEFER |
809 SA_NOCLDWAIT | SA_SIGINFO)) != 0)
814 mtx_lock(&ps->ps_mtx);
816 memset(oact, 0, sizeof(*oact));
817 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)];
818 if (SIGISMEMBER(ps->ps_sigonstack, sig))
819 oact->sa_flags |= SA_ONSTACK;
820 if (!SIGISMEMBER(ps->ps_sigintr, sig))
821 oact->sa_flags |= SA_RESTART;
822 if (SIGISMEMBER(ps->ps_sigreset, sig))
823 oact->sa_flags |= SA_RESETHAND;
824 if (SIGISMEMBER(ps->ps_signodefer, sig))
825 oact->sa_flags |= SA_NODEFER;
826 if (SIGISMEMBER(ps->ps_siginfo, sig)) {
827 oact->sa_flags |= SA_SIGINFO;
829 (__siginfohandler_t *)ps->ps_sigact[_SIG_IDX(sig)];
831 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)];
832 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP)
833 oact->sa_flags |= SA_NOCLDSTOP;
834 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT)
835 oact->sa_flags |= SA_NOCLDWAIT;
838 if ((sig == SIGKILL || sig == SIGSTOP) &&
839 act->sa_handler != SIG_DFL) {
840 mtx_unlock(&ps->ps_mtx);
846 * Change setting atomically.
849 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask;
850 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]);
851 if (sigact_flag_test(act, SA_SIGINFO)) {
852 ps->ps_sigact[_SIG_IDX(sig)] =
853 (__sighandler_t *)act->sa_sigaction;
854 SIGADDSET(ps->ps_siginfo, sig);
856 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler;
857 SIGDELSET(ps->ps_siginfo, sig);
859 if (!sigact_flag_test(act, SA_RESTART))
860 SIGADDSET(ps->ps_sigintr, sig);
862 SIGDELSET(ps->ps_sigintr, sig);
863 if (sigact_flag_test(act, SA_ONSTACK))
864 SIGADDSET(ps->ps_sigonstack, sig);
866 SIGDELSET(ps->ps_sigonstack, sig);
867 if (sigact_flag_test(act, SA_RESETHAND))
868 SIGADDSET(ps->ps_sigreset, sig);
870 SIGDELSET(ps->ps_sigreset, sig);
871 if (sigact_flag_test(act, SA_NODEFER))
872 SIGADDSET(ps->ps_signodefer, sig);
874 SIGDELSET(ps->ps_signodefer, sig);
875 if (sig == SIGCHLD) {
876 if (act->sa_flags & SA_NOCLDSTOP)
877 ps->ps_flag |= PS_NOCLDSTOP;
879 ps->ps_flag &= ~PS_NOCLDSTOP;
880 if (act->sa_flags & SA_NOCLDWAIT) {
882 * Paranoia: since SA_NOCLDWAIT is implemented
883 * by reparenting the dying child to PID 1 (and
884 * trust it to reap the zombie), PID 1 itself
885 * is forbidden to set SA_NOCLDWAIT.
888 ps->ps_flag &= ~PS_NOCLDWAIT;
890 ps->ps_flag |= PS_NOCLDWAIT;
892 ps->ps_flag &= ~PS_NOCLDWAIT;
893 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
894 ps->ps_flag |= PS_CLDSIGIGN;
896 ps->ps_flag &= ~PS_CLDSIGIGN;
899 * Set bit in ps_sigignore for signals that are set to SIG_IGN,
900 * and for signals set to SIG_DFL where the default is to
901 * ignore. However, don't put SIGCONT in ps_sigignore, as we
902 * have to restart the process.
904 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
905 (sigprop(sig) & SIGPROP_IGNORE &&
906 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
907 /* never to be seen again */
908 sigqueue_delete_proc(p, sig);
910 /* easier in psignal */
911 SIGADDSET(ps->ps_sigignore, sig);
912 SIGDELSET(ps->ps_sigcatch, sig);
914 SIGDELSET(ps->ps_sigignore, sig);
915 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)
916 SIGDELSET(ps->ps_sigcatch, sig);
918 SIGADDSET(ps->ps_sigcatch, sig);
920 #ifdef COMPAT_FREEBSD4
921 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
922 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
923 (flags & KSA_FREEBSD4) == 0)
924 SIGDELSET(ps->ps_freebsd4, sig);
926 SIGADDSET(ps->ps_freebsd4, sig);
929 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
930 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
931 (flags & KSA_OSIGSET) == 0)
932 SIGDELSET(ps->ps_osigset, sig);
934 SIGADDSET(ps->ps_osigset, sig);
937 mtx_unlock(&ps->ps_mtx);
942 #ifndef _SYS_SYSPROTO_H_
943 struct sigaction_args {
945 struct sigaction *act;
946 struct sigaction *oact;
950 sys_sigaction(struct thread *td, struct sigaction_args *uap)
952 struct sigaction act, oact;
953 struct sigaction *actp, *oactp;
956 actp = (uap->act != NULL) ? &act : NULL;
957 oactp = (uap->oact != NULL) ? &oact : NULL;
959 error = copyin(uap->act, actp, sizeof(act));
963 error = kern_sigaction(td, uap->sig, actp, oactp, 0);
965 error = copyout(oactp, uap->oact, sizeof(oact));
969 #ifdef COMPAT_FREEBSD4
970 #ifndef _SYS_SYSPROTO_H_
971 struct freebsd4_sigaction_args {
973 struct sigaction *act;
974 struct sigaction *oact;
978 freebsd4_sigaction(struct thread *td, struct freebsd4_sigaction_args *uap)
980 struct sigaction act, oact;
981 struct sigaction *actp, *oactp;
984 actp = (uap->act != NULL) ? &act : NULL;
985 oactp = (uap->oact != NULL) ? &oact : NULL;
987 error = copyin(uap->act, actp, sizeof(act));
991 error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4);
993 error = copyout(oactp, uap->oact, sizeof(oact));
996 #endif /* COMAPT_FREEBSD4 */
998 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
999 #ifndef _SYS_SYSPROTO_H_
1000 struct osigaction_args {
1002 struct osigaction *nsa;
1003 struct osigaction *osa;
1007 osigaction(struct thread *td, struct osigaction_args *uap)
1009 struct osigaction sa;
1010 struct sigaction nsa, osa;
1011 struct sigaction *nsap, *osap;
1014 if (uap->signum <= 0 || uap->signum >= ONSIG)
1017 nsap = (uap->nsa != NULL) ? &nsa : NULL;
1018 osap = (uap->osa != NULL) ? &osa : NULL;
1021 error = copyin(uap->nsa, &sa, sizeof(sa));
1024 nsap->sa_handler = sa.sa_handler;
1025 nsap->sa_flags = sa.sa_flags;
1026 OSIG2SIG(sa.sa_mask, nsap->sa_mask);
1028 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
1029 if (osap && !error) {
1030 sa.sa_handler = osap->sa_handler;
1031 sa.sa_flags = osap->sa_flags;
1032 SIG2OSIG(osap->sa_mask, sa.sa_mask);
1033 error = copyout(&sa, uap->osa, sizeof(sa));
1038 #if !defined(__i386__)
1039 /* Avoid replicating the same stub everywhere */
1041 osigreturn(struct thread *td, struct osigreturn_args *uap)
1044 return (nosys(td, (struct nosys_args *)uap));
1047 #endif /* COMPAT_43 */
1050 * Initialize signal state for process 0;
1051 * set to ignore signals that are ignored by default.
1054 siginit(struct proc *p)
1061 mtx_lock(&ps->ps_mtx);
1062 for (i = 1; i <= NSIG; i++) {
1063 if (sigprop(i) & SIGPROP_IGNORE && i != SIGCONT) {
1064 SIGADDSET(ps->ps_sigignore, i);
1067 mtx_unlock(&ps->ps_mtx);
1072 * Reset specified signal to the default disposition.
1075 sigdflt(struct sigacts *ps, int sig)
1078 mtx_assert(&ps->ps_mtx, MA_OWNED);
1079 SIGDELSET(ps->ps_sigcatch, sig);
1080 if ((sigprop(sig) & SIGPROP_IGNORE) != 0 && sig != SIGCONT)
1081 SIGADDSET(ps->ps_sigignore, sig);
1082 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
1083 SIGDELSET(ps->ps_siginfo, sig);
1087 * Reset signals for an exec of the specified process.
1090 execsigs(struct proc *p)
1096 * Reset caught signals. Held signals remain held
1097 * through td_sigmask (unless they were caught,
1098 * and are now ignored by default).
1100 PROC_LOCK_ASSERT(p, MA_OWNED);
1102 mtx_lock(&ps->ps_mtx);
1106 * Reset stack state to the user stack.
1107 * Clear set of signals caught on the signal stack.
1110 MPASS(td->td_proc == p);
1111 td->td_sigstk.ss_flags = SS_DISABLE;
1112 td->td_sigstk.ss_size = 0;
1113 td->td_sigstk.ss_sp = 0;
1114 td->td_pflags &= ~TDP_ALTSTACK;
1116 * Reset no zombies if child dies flag as Solaris does.
1118 ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN);
1119 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
1120 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL;
1121 mtx_unlock(&ps->ps_mtx);
1125 * kern_sigprocmask()
1127 * Manipulate signal mask.
1130 kern_sigprocmask(struct thread *td, int how, sigset_t *set, sigset_t *oset,
1133 sigset_t new_block, oset1;
1138 if ((flags & SIGPROCMASK_PROC_LOCKED) != 0)
1139 PROC_LOCK_ASSERT(p, MA_OWNED);
1142 mtx_assert(&p->p_sigacts->ps_mtx, (flags & SIGPROCMASK_PS_LOCKED) != 0
1143 ? MA_OWNED : MA_NOTOWNED);
1145 *oset = td->td_sigmask;
1152 oset1 = td->td_sigmask;
1153 SIGSETOR(td->td_sigmask, *set);
1154 new_block = td->td_sigmask;
1155 SIGSETNAND(new_block, oset1);
1158 SIGSETNAND(td->td_sigmask, *set);
1163 oset1 = td->td_sigmask;
1164 if (flags & SIGPROCMASK_OLD)
1165 SIGSETLO(td->td_sigmask, *set);
1167 td->td_sigmask = *set;
1168 new_block = td->td_sigmask;
1169 SIGSETNAND(new_block, oset1);
1178 * The new_block set contains signals that were not previously
1179 * blocked, but are blocked now.
1181 * In case we block any signal that was not previously blocked
1182 * for td, and process has the signal pending, try to schedule
1183 * signal delivery to some thread that does not block the
1184 * signal, possibly waking it up.
1186 if (p->p_numthreads != 1)
1187 reschedule_signals(p, new_block, flags);
1191 if (!(flags & SIGPROCMASK_PROC_LOCKED))
1196 #ifndef _SYS_SYSPROTO_H_
1197 struct sigprocmask_args {
1199 const sigset_t *set;
1204 sys_sigprocmask(struct thread *td, struct sigprocmask_args *uap)
1207 sigset_t *setp, *osetp;
1210 setp = (uap->set != NULL) ? &set : NULL;
1211 osetp = (uap->oset != NULL) ? &oset : NULL;
1213 error = copyin(uap->set, setp, sizeof(set));
1217 error = kern_sigprocmask(td, uap->how, setp, osetp, 0);
1218 if (osetp && !error) {
1219 error = copyout(osetp, uap->oset, sizeof(oset));
1224 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1225 #ifndef _SYS_SYSPROTO_H_
1226 struct osigprocmask_args {
1232 osigprocmask(struct thread *td, struct osigprocmask_args *uap)
1237 OSIG2SIG(uap->mask, set);
1238 error = kern_sigprocmask(td, uap->how, &set, &oset, 1);
1239 SIG2OSIG(oset, td->td_retval[0]);
1242 #endif /* COMPAT_43 */
1245 sys_sigwait(struct thread *td, struct sigwait_args *uap)
1251 error = copyin(uap->set, &set, sizeof(set));
1253 td->td_retval[0] = error;
1257 error = kern_sigtimedwait(td, set, &ksi, NULL);
1260 * sigwait() function shall not return EINTR, but
1261 * the syscall does. Non-ancient libc provides the
1262 * wrapper which hides EINTR. Otherwise, EINTR return
1263 * is used by libthr to handle required cancellation
1264 * point in the sigwait().
1266 if (error == EINTR && td->td_proc->p_osrel < P_OSREL_SIGWAIT)
1268 td->td_retval[0] = error;
1272 error = copyout(&ksi.ksi_signo, uap->sig, sizeof(ksi.ksi_signo));
1273 td->td_retval[0] = error;
1278 sys_sigtimedwait(struct thread *td, struct sigtimedwait_args *uap)
1281 struct timespec *timeout;
1287 error = copyin(uap->timeout, &ts, sizeof(ts));
1295 error = copyin(uap->set, &set, sizeof(set));
1299 error = kern_sigtimedwait(td, set, &ksi, timeout);
1304 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
1307 td->td_retval[0] = ksi.ksi_signo;
1312 sys_sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap)
1318 error = copyin(uap->set, &set, sizeof(set));
1322 error = kern_sigtimedwait(td, set, &ksi, NULL);
1327 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
1330 td->td_retval[0] = ksi.ksi_signo;
1335 proc_td_siginfo_capture(struct thread *td, siginfo_t *si)
1339 FOREACH_THREAD_IN_PROC(td->td_proc, thr) {
1343 thr->td_si.si_signo = 0;
1348 kern_sigtimedwait(struct thread *td, sigset_t waitset, ksiginfo_t *ksi,
1349 struct timespec *timeout)
1352 sigset_t saved_mask, new_block;
1354 int error, sig, timevalid = 0;
1355 sbintime_t sbt, precision, tsbt;
1363 /* Ensure the sigfastblock value is up to date. */
1364 sigfastblock_fetch(td);
1366 if (timeout != NULL) {
1367 if (timeout->tv_nsec >= 0 && timeout->tv_nsec < 1000000000) {
1370 if (ts.tv_sec < INT32_MAX / 2) {
1373 precision >>= tc_precexp;
1374 if (TIMESEL(&sbt, tsbt))
1378 precision = sbt = 0;
1381 precision = sbt = 0;
1383 /* Some signals can not be waited for. */
1384 SIG_CANTMASK(waitset);
1387 saved_mask = td->td_sigmask;
1388 SIGSETNAND(td->td_sigmask, waitset);
1389 if ((p->p_sysent->sv_flags & SV_SIG_DISCIGN) != 0 ||
1390 !kern_sig_discard_ign) {
1392 td->td_flags |= TDF_SIGWAIT;
1396 mtx_lock(&ps->ps_mtx);
1398 mtx_unlock(&ps->ps_mtx);
1399 KASSERT(sig >= 0, ("sig %d", sig));
1400 if (sig != 0 && SIGISMEMBER(waitset, sig)) {
1401 if (sigqueue_get(&td->td_sigqueue, sig, ksi) != 0 ||
1402 sigqueue_get(&p->p_sigqueue, sig, ksi) != 0) {
1412 * POSIX says this must be checked after looking for pending
1415 if (timeout != NULL && !timevalid) {
1425 error = msleep_sbt(&p->p_sigacts, &p->p_mtx, PPAUSE | PCATCH,
1426 "sigwait", sbt, precision, C_ABSOLUTE);
1428 /* The syscalls can not be restarted. */
1429 if (error == ERESTART)
1433 * If PTRACE_SCE or PTRACE_SCX were set after
1434 * userspace entered the syscall, return spurious
1435 * EINTR after wait was done. Only do this as last
1436 * resort after rechecking for possible queued signals
1437 * and expired timeouts.
1439 if (error == 0 && (p->p_ptevents & PTRACE_SYSCALL) != 0)
1443 td->td_flags &= ~TDF_SIGWAIT;
1446 new_block = saved_mask;
1447 SIGSETNAND(new_block, td->td_sigmask);
1448 td->td_sigmask = saved_mask;
1450 * Fewer signals can be delivered to us, reschedule signal
1453 if (p->p_numthreads != 1)
1454 reschedule_signals(p, new_block, 0);
1457 SDT_PROBE2(proc, , , signal__clear, sig, ksi);
1459 if (ksi->ksi_code == SI_TIMER)
1460 itimer_accept(p, ksi->ksi_timerid, ksi);
1463 if (KTRPOINT(td, KTR_PSIG)) {
1466 mtx_lock(&ps->ps_mtx);
1467 action = ps->ps_sigact[_SIG_IDX(sig)];
1468 mtx_unlock(&ps->ps_mtx);
1469 ktrpsig(sig, action, &td->td_sigmask, ksi->ksi_code);
1472 if (sig == SIGKILL) {
1473 proc_td_siginfo_capture(td, &ksi->ksi_info);
1474 sigexit1(td, sig, ksi);
1481 #ifndef _SYS_SYSPROTO_H_
1482 struct sigpending_args {
1487 sys_sigpending(struct thread *td, struct sigpending_args *uap)
1489 struct proc *p = td->td_proc;
1493 pending = p->p_sigqueue.sq_signals;
1494 SIGSETOR(pending, td->td_sigqueue.sq_signals);
1496 return (copyout(&pending, uap->set, sizeof(sigset_t)));
1499 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1500 #ifndef _SYS_SYSPROTO_H_
1501 struct osigpending_args {
1506 osigpending(struct thread *td, struct osigpending_args *uap)
1508 struct proc *p = td->td_proc;
1512 pending = p->p_sigqueue.sq_signals;
1513 SIGSETOR(pending, td->td_sigqueue.sq_signals);
1515 SIG2OSIG(pending, td->td_retval[0]);
1518 #endif /* COMPAT_43 */
1520 #if defined(COMPAT_43)
1522 * Generalized interface signal handler, 4.3-compatible.
1524 #ifndef _SYS_SYSPROTO_H_
1525 struct osigvec_args {
1533 osigvec(struct thread *td, struct osigvec_args *uap)
1536 struct sigaction nsa, osa;
1537 struct sigaction *nsap, *osap;
1540 if (uap->signum <= 0 || uap->signum >= ONSIG)
1542 nsap = (uap->nsv != NULL) ? &nsa : NULL;
1543 osap = (uap->osv != NULL) ? &osa : NULL;
1545 error = copyin(uap->nsv, &vec, sizeof(vec));
1548 nsap->sa_handler = vec.sv_handler;
1549 OSIG2SIG(vec.sv_mask, nsap->sa_mask);
1550 nsap->sa_flags = vec.sv_flags;
1551 nsap->sa_flags ^= SA_RESTART; /* opposite of SV_INTERRUPT */
1553 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
1554 if (osap && !error) {
1555 vec.sv_handler = osap->sa_handler;
1556 SIG2OSIG(osap->sa_mask, vec.sv_mask);
1557 vec.sv_flags = osap->sa_flags;
1558 vec.sv_flags &= ~SA_NOCLDWAIT;
1559 vec.sv_flags ^= SA_RESTART;
1560 error = copyout(&vec, uap->osv, sizeof(vec));
1565 #ifndef _SYS_SYSPROTO_H_
1566 struct osigblock_args {
1571 osigblock(struct thread *td, struct osigblock_args *uap)
1575 OSIG2SIG(uap->mask, set);
1576 kern_sigprocmask(td, SIG_BLOCK, &set, &oset, 0);
1577 SIG2OSIG(oset, td->td_retval[0]);
1581 #ifndef _SYS_SYSPROTO_H_
1582 struct osigsetmask_args {
1587 osigsetmask(struct thread *td, struct osigsetmask_args *uap)
1591 OSIG2SIG(uap->mask, set);
1592 kern_sigprocmask(td, SIG_SETMASK, &set, &oset, 0);
1593 SIG2OSIG(oset, td->td_retval[0]);
1596 #endif /* COMPAT_43 */
1599 * Suspend calling thread until signal, providing mask to be set in the
1602 #ifndef _SYS_SYSPROTO_H_
1603 struct sigsuspend_args {
1604 const sigset_t *sigmask;
1609 sys_sigsuspend(struct thread *td, struct sigsuspend_args *uap)
1614 error = copyin(uap->sigmask, &mask, sizeof(mask));
1617 return (kern_sigsuspend(td, mask));
1621 kern_sigsuspend(struct thread *td, sigset_t mask)
1623 struct proc *p = td->td_proc;
1626 /* Ensure the sigfastblock value is up to date. */
1627 sigfastblock_fetch(td);
1630 * When returning from sigsuspend, we want
1631 * the old mask to be restored after the
1632 * signal handler has finished. Thus, we
1633 * save it here and mark the sigacts structure
1637 kern_sigprocmask(td, SIG_SETMASK, &mask, &td->td_oldsigmask,
1638 SIGPROCMASK_PROC_LOCKED);
1639 td->td_pflags |= TDP_OLDMASK;
1640 ast_sched(td, TDA_SIGSUSPEND);
1643 * Process signals now. Otherwise, we can get spurious wakeup
1644 * due to signal entered process queue, but delivered to other
1645 * thread. But sigsuspend should return only on signal
1648 (p->p_sysent->sv_set_syscall_retval)(td, EINTR);
1649 for (has_sig = 0; !has_sig;) {
1650 while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause",
1653 thread_suspend_check(0);
1654 mtx_lock(&p->p_sigacts->ps_mtx);
1655 while ((sig = cursig(td)) != 0) {
1656 KASSERT(sig >= 0, ("sig %d", sig));
1657 has_sig += postsig(sig);
1659 mtx_unlock(&p->p_sigacts->ps_mtx);
1662 * If PTRACE_SCE or PTRACE_SCX were set after
1663 * userspace entered the syscall, return spurious
1666 if ((p->p_ptevents & PTRACE_SYSCALL) != 0)
1670 td->td_errno = EINTR;
1671 td->td_pflags |= TDP_NERRNO;
1672 return (EJUSTRETURN);
1675 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1677 * Compatibility sigsuspend call for old binaries. Note nonstandard calling
1678 * convention: libc stub passes mask, not pointer, to save a copyin.
1680 #ifndef _SYS_SYSPROTO_H_
1681 struct osigsuspend_args {
1687 osigsuspend(struct thread *td, struct osigsuspend_args *uap)
1691 OSIG2SIG(uap->mask, mask);
1692 return (kern_sigsuspend(td, mask));
1694 #endif /* COMPAT_43 */
1696 #if defined(COMPAT_43)
1697 #ifndef _SYS_SYSPROTO_H_
1698 struct osigstack_args {
1699 struct sigstack *nss;
1700 struct sigstack *oss;
1705 osigstack(struct thread *td, struct osigstack_args *uap)
1707 struct sigstack nss, oss;
1710 if (uap->nss != NULL) {
1711 error = copyin(uap->nss, &nss, sizeof(nss));
1715 oss.ss_sp = td->td_sigstk.ss_sp;
1716 oss.ss_onstack = sigonstack(cpu_getstack(td));
1717 if (uap->nss != NULL) {
1718 td->td_sigstk.ss_sp = nss.ss_sp;
1719 td->td_sigstk.ss_size = 0;
1720 td->td_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK;
1721 td->td_pflags |= TDP_ALTSTACK;
1723 if (uap->oss != NULL)
1724 error = copyout(&oss, uap->oss, sizeof(oss));
1728 #endif /* COMPAT_43 */
1730 #ifndef _SYS_SYSPROTO_H_
1731 struct sigaltstack_args {
1738 sys_sigaltstack(struct thread *td, struct sigaltstack_args *uap)
1743 if (uap->ss != NULL) {
1744 error = copyin(uap->ss, &ss, sizeof(ss));
1748 error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL,
1749 (uap->oss != NULL) ? &oss : NULL);
1752 if (uap->oss != NULL)
1753 error = copyout(&oss, uap->oss, sizeof(stack_t));
1758 kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss)
1760 struct proc *p = td->td_proc;
1763 oonstack = sigonstack(cpu_getstack(td));
1766 *oss = td->td_sigstk;
1767 oss->ss_flags = (td->td_pflags & TDP_ALTSTACK)
1768 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
1774 if ((ss->ss_flags & ~SS_DISABLE) != 0)
1776 if (!(ss->ss_flags & SS_DISABLE)) {
1777 if (ss->ss_size < p->p_sysent->sv_minsigstksz)
1780 td->td_sigstk = *ss;
1781 td->td_pflags |= TDP_ALTSTACK;
1783 td->td_pflags &= ~TDP_ALTSTACK;
1789 struct killpg1_ctx {
1799 killpg1_sendsig_locked(struct proc *p, struct killpg1_ctx *arg)
1803 err = p_cansignal(arg->td, p, arg->sig);
1804 if (err == 0 && arg->sig != 0)
1805 pksignal(p, arg->sig, arg->ksi);
1810 else if (arg->ret == 0 && err != ESRCH && err != EPERM)
1815 killpg1_sendsig(struct proc *p, bool notself, struct killpg1_ctx *arg)
1818 if (p->p_pid <= 1 || (p->p_flag & P_SYSTEM) != 0 ||
1819 (notself && p == arg->td->td_proc) || p->p_state == PRS_NEW)
1823 killpg1_sendsig_locked(p, arg);
1828 kill_processes_prison_cb(struct proc *p, void *arg)
1830 struct killpg1_ctx *ctx = arg;
1832 if (p->p_pid <= 1 || (p->p_flag & P_SYSTEM) != 0 ||
1833 (p == ctx->td->td_proc) || p->p_state == PRS_NEW)
1836 killpg1_sendsig_locked(p, ctx);
1840 * Common code for kill process group/broadcast kill.
1841 * td is the calling thread, as usual.
1844 killpg1(struct thread *td, int sig, int pgid, int all, ksiginfo_t *ksi)
1848 struct killpg1_ctx arg;
1860 prison_proc_iterate(td->td_ucred->cr_prison,
1861 kill_processes_prison_cb, &arg);
1864 sx_slock(&proctree_lock);
1867 * zero pgid means send to my process group.
1869 pgrp = td->td_proc->p_pgrp;
1872 pgrp = pgfind(pgid);
1874 sx_sunlock(&proctree_lock);
1878 sx_sunlock(&proctree_lock);
1879 if (!sx_try_xlock(&pgrp->pg_killsx)) {
1881 sx_xlock(&pgrp->pg_killsx);
1882 sx_xunlock(&pgrp->pg_killsx);
1885 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1886 killpg1_sendsig(p, false, &arg);
1889 sx_xunlock(&pgrp->pg_killsx);
1891 MPASS(arg.ret != 0 || arg.found || !arg.sent);
1892 if (arg.ret == 0 && !arg.sent)
1893 arg.ret = arg.found ? EPERM : ESRCH;
1897 #ifndef _SYS_SYSPROTO_H_
1905 sys_kill(struct thread *td, struct kill_args *uap)
1908 return (kern_kill(td, uap->pid, uap->signum));
1912 kern_kill(struct thread *td, pid_t pid, int signum)
1919 * A process in capability mode can send signals only to himself.
1920 * The main rationale behind this is that abort(3) is implemented as
1921 * kill(getpid(), SIGABRT).
1923 if (IN_CAPABILITY_MODE(td) && pid != td->td_proc->p_pid)
1926 AUDIT_ARG_SIGNUM(signum);
1928 if ((u_int)signum > _SIG_MAXSIG)
1931 ksiginfo_init(&ksi);
1932 ksi.ksi_signo = signum;
1933 ksi.ksi_code = SI_USER;
1934 ksi.ksi_pid = td->td_proc->p_pid;
1935 ksi.ksi_uid = td->td_ucred->cr_ruid;
1938 /* kill single process */
1939 if ((p = pfind_any(pid)) == NULL)
1941 AUDIT_ARG_PROCESS(p);
1942 error = p_cansignal(td, p, signum);
1943 if (error == 0 && signum)
1944 pksignal(p, signum, &ksi);
1949 case -1: /* broadcast signal */
1950 return (killpg1(td, signum, 0, 1, &ksi));
1951 case 0: /* signal own process group */
1952 ksi.ksi_flags |= KSI_KILLPG;
1953 return (killpg1(td, signum, 0, 0, &ksi));
1954 default: /* negative explicit process group */
1955 ksi.ksi_flags |= KSI_KILLPG;
1956 return (killpg1(td, signum, -pid, 0, &ksi));
1962 sys_pdkill(struct thread *td, struct pdkill_args *uap)
1967 AUDIT_ARG_SIGNUM(uap->signum);
1968 AUDIT_ARG_FD(uap->fd);
1969 if ((u_int)uap->signum > _SIG_MAXSIG)
1972 error = procdesc_find(td, uap->fd, &cap_pdkill_rights, &p);
1975 AUDIT_ARG_PROCESS(p);
1976 error = p_cansignal(td, p, uap->signum);
1977 if (error == 0 && uap->signum)
1978 kern_psignal(p, uap->signum);
1983 #if defined(COMPAT_43)
1984 #ifndef _SYS_SYSPROTO_H_
1985 struct okillpg_args {
1992 okillpg(struct thread *td, struct okillpg_args *uap)
1996 AUDIT_ARG_SIGNUM(uap->signum);
1997 AUDIT_ARG_PID(uap->pgid);
1998 if ((u_int)uap->signum > _SIG_MAXSIG)
2001 ksiginfo_init(&ksi);
2002 ksi.ksi_signo = uap->signum;
2003 ksi.ksi_code = SI_USER;
2004 ksi.ksi_pid = td->td_proc->p_pid;
2005 ksi.ksi_uid = td->td_ucred->cr_ruid;
2006 ksi.ksi_flags |= KSI_KILLPG;
2007 return (killpg1(td, uap->signum, uap->pgid, 0, &ksi));
2009 #endif /* COMPAT_43 */
2011 #ifndef _SYS_SYSPROTO_H_
2012 struct sigqueue_args {
2015 /* union sigval */ void *value;
2019 sys_sigqueue(struct thread *td, struct sigqueue_args *uap)
2023 sv.sival_ptr = uap->value;
2025 return (kern_sigqueue(td, uap->pid, uap->signum, &sv));
2029 kern_sigqueue(struct thread *td, pid_t pid, int signum, union sigval *value)
2035 if ((u_int)signum > _SIG_MAXSIG)
2039 * Specification says sigqueue can only send signal to
2045 if ((p = pfind_any(pid)) == NULL)
2047 error = p_cansignal(td, p, signum);
2048 if (error == 0 && signum != 0) {
2049 ksiginfo_init(&ksi);
2050 ksi.ksi_flags = KSI_SIGQ;
2051 ksi.ksi_signo = signum;
2052 ksi.ksi_code = SI_QUEUE;
2053 ksi.ksi_pid = td->td_proc->p_pid;
2054 ksi.ksi_uid = td->td_ucred->cr_ruid;
2055 ksi.ksi_value = *value;
2056 error = pksignal(p, ksi.ksi_signo, &ksi);
2063 * Send a signal to a process group. If checktty is 1,
2064 * limit to members which have a controlling terminal.
2067 pgsignal(struct pgrp *pgrp, int sig, int checkctty, ksiginfo_t *ksi)
2072 PGRP_LOCK_ASSERT(pgrp, MA_OWNED);
2073 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
2075 if (p->p_state == PRS_NORMAL &&
2076 (checkctty == 0 || p->p_flag & P_CONTROLT))
2077 pksignal(p, sig, ksi);
2084 * Recalculate the signal mask and reset the signal disposition after
2085 * usermode frame for delivery is formed. Should be called after
2086 * mach-specific routine, because sysent->sv_sendsig() needs correct
2087 * ps_siginfo and signal mask.
2090 postsig_done(int sig, struct thread *td, struct sigacts *ps)
2094 mtx_assert(&ps->ps_mtx, MA_OWNED);
2095 td->td_ru.ru_nsignals++;
2096 mask = ps->ps_catchmask[_SIG_IDX(sig)];
2097 if (!SIGISMEMBER(ps->ps_signodefer, sig))
2098 SIGADDSET(mask, sig);
2099 kern_sigprocmask(td, SIG_BLOCK, &mask, NULL,
2100 SIGPROCMASK_PROC_LOCKED | SIGPROCMASK_PS_LOCKED);
2101 if (SIGISMEMBER(ps->ps_sigreset, sig))
2106 * Send a signal caused by a trap to the current thread. If it will be
2107 * caught immediately, deliver it with correct code. Otherwise, post it
2111 trapsignal(struct thread *td, ksiginfo_t *ksi)
2119 sig = ksi->ksi_signo;
2120 KASSERT(_SIG_VALID(sig), ("invalid signal"));
2122 sigfastblock_fetch(td);
2125 mtx_lock(&ps->ps_mtx);
2126 sigmask = td->td_sigmask;
2127 if (td->td_sigblock_val != 0)
2128 SIGSETOR(sigmask, fastblock_mask);
2129 if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) &&
2130 !SIGISMEMBER(sigmask, sig)) {
2132 if (KTRPOINT(curthread, KTR_PSIG))
2133 ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
2134 &td->td_sigmask, ksi->ksi_code);
2136 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)],
2137 ksi, &td->td_sigmask);
2138 postsig_done(sig, td, ps);
2139 mtx_unlock(&ps->ps_mtx);
2142 * Avoid a possible infinite loop if the thread
2143 * masking the signal or process is ignoring the
2146 if (kern_forcesigexit && (SIGISMEMBER(sigmask, sig) ||
2147 ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN)) {
2148 SIGDELSET(td->td_sigmask, sig);
2149 SIGDELSET(ps->ps_sigcatch, sig);
2150 SIGDELSET(ps->ps_sigignore, sig);
2151 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
2152 td->td_pflags &= ~TDP_SIGFASTBLOCK;
2153 td->td_sigblock_val = 0;
2155 mtx_unlock(&ps->ps_mtx);
2156 p->p_sig = sig; /* XXX to verify code */
2157 tdsendsignal(p, td, sig, ksi);
2162 static struct thread *
2163 sigtd(struct proc *p, int sig, bool fast_sigblock)
2165 struct thread *td, *signal_td;
2167 PROC_LOCK_ASSERT(p, MA_OWNED);
2168 MPASS(!fast_sigblock || p == curproc);
2171 * Check if current thread can handle the signal without
2172 * switching context to another thread.
2174 if (curproc == p && !SIGISMEMBER(curthread->td_sigmask, sig) &&
2175 (!fast_sigblock || curthread->td_sigblock_val == 0))
2178 FOREACH_THREAD_IN_PROC(p, td) {
2179 if (!SIGISMEMBER(td->td_sigmask, sig) && (!fast_sigblock ||
2180 td != curthread || td->td_sigblock_val == 0)) {
2185 if (signal_td == NULL)
2186 signal_td = FIRST_THREAD_IN_PROC(p);
2191 * Send the signal to the process. If the signal has an action, the action
2192 * is usually performed by the target process rather than the caller; we add
2193 * the signal to the set of pending signals for the process.
2196 * o When a stop signal is sent to a sleeping process that takes the
2197 * default action, the process is stopped without awakening it.
2198 * o SIGCONT restarts stopped processes (or puts them back to sleep)
2199 * regardless of the signal action (eg, blocked or ignored).
2201 * Other ignored signals are discarded immediately.
2203 * NB: This function may be entered from the debugger via the "kill" DDB
2204 * command. There is little that can be done to mitigate the possibly messy
2205 * side effects of this unwise possibility.
2208 kern_psignal(struct proc *p, int sig)
2212 ksiginfo_init(&ksi);
2213 ksi.ksi_signo = sig;
2214 ksi.ksi_code = SI_KERNEL;
2215 (void) tdsendsignal(p, NULL, sig, &ksi);
2219 pksignal(struct proc *p, int sig, ksiginfo_t *ksi)
2222 return (tdsendsignal(p, NULL, sig, ksi));
2225 /* Utility function for finding a thread to send signal event to. */
2227 sigev_findtd(struct proc *p, struct sigevent *sigev, struct thread **ttd)
2231 if (sigev->sigev_notify == SIGEV_THREAD_ID) {
2232 td = tdfind(sigev->sigev_notify_thread_id, p->p_pid);
2244 tdsignal(struct thread *td, int sig)
2248 ksiginfo_init(&ksi);
2249 ksi.ksi_signo = sig;
2250 ksi.ksi_code = SI_KERNEL;
2251 (void) tdsendsignal(td->td_proc, td, sig, &ksi);
2255 tdksignal(struct thread *td, int sig, ksiginfo_t *ksi)
2258 (void) tdsendsignal(td->td_proc, td, sig, ksi);
2262 sig_sleepq_abort(struct thread *td, int intrval)
2264 THREAD_LOCK_ASSERT(td, MA_OWNED);
2266 if (intrval == 0 && (td->td_flags & TDF_SIGWAIT) == 0) {
2270 return (sleepq_abort(td, intrval));
2274 tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
2277 sigqueue_t *sigqueue;
2284 MPASS(td == NULL || p == td->td_proc);
2285 PROC_LOCK_ASSERT(p, MA_OWNED);
2287 if (!_SIG_VALID(sig))
2288 panic("%s(): invalid signal %d", __func__, sig);
2290 KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("%s: ksi on queue", __func__));
2293 * IEEE Std 1003.1-2001: return success when killing a zombie.
2295 if (p->p_state == PRS_ZOMBIE) {
2296 if (ksi != NULL && (ksi->ksi_flags & KSI_INS) != 0)
2297 ksiginfo_tryfree(ksi);
2302 KNOTE_LOCKED(p->p_klist, NOTE_SIGNAL | sig);
2303 prop = sigprop(sig);
2306 td = sigtd(p, sig, false);
2307 sigqueue = &p->p_sigqueue;
2309 sigqueue = &td->td_sigqueue;
2311 SDT_PROBE3(proc, , , signal__send, td, p, sig);
2314 * If the signal is being ignored, then we forget about it
2315 * immediately, except when the target process executes
2316 * sigwait(). (Note: we don't set SIGCONT in ps_sigignore,
2317 * and if it is set to SIG_IGN, action will be SIG_DFL here.)
2319 mtx_lock(&ps->ps_mtx);
2320 if (SIGISMEMBER(ps->ps_sigignore, sig)) {
2321 if (kern_sig_discard_ign &&
2322 (p->p_sysent->sv_flags & SV_SIG_DISCIGN) == 0) {
2323 SDT_PROBE3(proc, , , signal__discard, td, p, sig);
2325 mtx_unlock(&ps->ps_mtx);
2326 if (ksi != NULL && (ksi->ksi_flags & KSI_INS) != 0)
2327 ksiginfo_tryfree(ksi);
2334 if (SIGISMEMBER(td->td_sigmask, sig))
2336 else if (SIGISMEMBER(ps->ps_sigcatch, sig))
2340 if (SIGISMEMBER(ps->ps_sigintr, sig))
2345 mtx_unlock(&ps->ps_mtx);
2347 if (prop & SIGPROP_CONT)
2348 sigqueue_delete_stopmask_proc(p);
2349 else if (prop & SIGPROP_STOP) {
2351 * If sending a tty stop signal to a member of an orphaned
2352 * process group, discard the signal here if the action
2353 * is default; don't stop the process below if sleeping,
2354 * and don't clear any pending SIGCONT.
2356 if ((prop & SIGPROP_TTYSTOP) != 0 &&
2357 (p->p_pgrp->pg_flags & PGRP_ORPHANED) != 0 &&
2358 action == SIG_DFL) {
2359 if (ksi != NULL && (ksi->ksi_flags & KSI_INS) != 0)
2360 ksiginfo_tryfree(ksi);
2363 sigqueue_delete_proc(p, SIGCONT);
2364 if (p->p_flag & P_CONTINUED) {
2365 p->p_flag &= ~P_CONTINUED;
2366 PROC_LOCK(p->p_pptr);
2367 sigqueue_take(p->p_ksi);
2368 PROC_UNLOCK(p->p_pptr);
2372 ret = sigqueue_add(sigqueue, sig, ksi);
2375 if ((ksi->ksi_flags & KSI_KILLPG) != 0) {
2376 sx_assert(&p->p_pgrp->pg_killsx, SX_XLOCKED);
2377 atomic_add_int(&p->p_killpg_cnt, 1);
2381 * Defer further processing for signals which are held,
2382 * except that stopped processes must be continued by SIGCONT.
2384 if (action == SIG_HOLD &&
2385 !((prop & SIGPROP_CONT) && (p->p_flag & P_STOPPED_SIG)))
2391 * Some signals have a process-wide effect and a per-thread
2392 * component. Most processing occurs when the process next
2393 * tries to cross the user boundary, however there are some
2394 * times when processing needs to be done immediately, such as
2395 * waking up threads so that they can cross the user boundary.
2396 * We try to do the per-process part here.
2398 if (P_SHOULDSTOP(p)) {
2399 KASSERT(!(p->p_flag & P_WEXIT),
2400 ("signal to stopped but exiting process"));
2401 if (sig == SIGKILL) {
2403 * If traced process is already stopped,
2404 * then no further action is necessary.
2406 if (p->p_flag & P_TRACED)
2409 * SIGKILL sets process running.
2410 * It will die elsewhere.
2411 * All threads must be restarted.
2413 p->p_flag &= ~P_STOPPED_SIG;
2417 if (prop & SIGPROP_CONT) {
2419 * If traced process is already stopped,
2420 * then no further action is necessary.
2422 if (p->p_flag & P_TRACED)
2425 * If SIGCONT is default (or ignored), we continue the
2426 * process but don't leave the signal in sigqueue as
2427 * it has no further action. If SIGCONT is held, we
2428 * continue the process and leave the signal in
2429 * sigqueue. If the process catches SIGCONT, let it
2430 * handle the signal itself. If it isn't waiting on
2431 * an event, it goes back to run state.
2432 * Otherwise, process goes back to sleep state.
2434 p->p_flag &= ~P_STOPPED_SIG;
2436 if (p->p_numthreads == p->p_suspcount) {
2438 p->p_flag |= P_CONTINUED;
2439 p->p_xsig = SIGCONT;
2440 PROC_LOCK(p->p_pptr);
2441 childproc_continued(p);
2442 PROC_UNLOCK(p->p_pptr);
2445 if (action == SIG_DFL) {
2446 thread_unsuspend(p);
2448 sigqueue_delete(sigqueue, sig);
2451 if (action == SIG_CATCH) {
2453 * The process wants to catch it so it needs
2454 * to run at least one thread, but which one?
2460 * The signal is not ignored or caught.
2462 thread_unsuspend(p);
2467 if (prop & SIGPROP_STOP) {
2469 * If traced process is already stopped,
2470 * then no further action is necessary.
2472 if (p->p_flag & P_TRACED)
2475 * Already stopped, don't need to stop again
2476 * (If we did the shell could get confused).
2477 * Just make sure the signal STOP bit set.
2479 p->p_flag |= P_STOPPED_SIG;
2480 sigqueue_delete(sigqueue, sig);
2485 * All other kinds of signals:
2486 * If a thread is sleeping interruptibly, simulate a
2487 * wakeup so that when it is continued it will be made
2488 * runnable and can look at the signal. However, don't make
2489 * the PROCESS runnable, leave it stopped.
2490 * It may run a bit until it hits a thread_suspend_check().
2494 if (TD_CAN_ABORT(td))
2495 wakeup_swapper = sig_sleepq_abort(td, intrval);
2501 * Mutexes are short lived. Threads waiting on them will
2502 * hit thread_suspend_check() soon.
2504 } else if (p->p_state == PRS_NORMAL) {
2505 if (p->p_flag & P_TRACED || action == SIG_CATCH) {
2506 tdsigwakeup(td, sig, action, intrval);
2510 MPASS(action == SIG_DFL);
2512 if (prop & SIGPROP_STOP) {
2513 if (p->p_flag & (P_PPWAIT|P_WEXIT))
2515 p->p_flag |= P_STOPPED_SIG;
2518 wakeup_swapper = sig_suspend_threads(td, p);
2519 if (p->p_numthreads == p->p_suspcount) {
2521 * only thread sending signal to another
2522 * process can reach here, if thread is sending
2523 * signal to its process, because thread does
2524 * not suspend itself here, p_numthreads
2525 * should never be equal to p_suspcount.
2529 sigqueue_delete_proc(p, p->p_xsig);
2535 /* Not in "NORMAL" state. discard the signal. */
2536 sigqueue_delete(sigqueue, sig);
2541 * The process is not stopped so we need to apply the signal to all the
2545 tdsigwakeup(td, sig, action, intrval);
2547 thread_unsuspend(p);
2550 itimer_proc_continue(p);
2551 kqtimer_proc_continue(p);
2553 /* If we jump here, proc slock should not be owned. */
2554 PROC_SLOCK_ASSERT(p, MA_NOTOWNED);
2562 * The force of a signal has been directed against a single
2563 * thread. We need to see what we can do about knocking it
2564 * out of any sleep it may be in etc.
2567 tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
2569 struct proc *p = td->td_proc;
2570 int prop, wakeup_swapper;
2572 PROC_LOCK_ASSERT(p, MA_OWNED);
2573 prop = sigprop(sig);
2578 * Bring the priority of a thread up if we want it to get
2579 * killed in this lifetime. Be careful to avoid bumping the
2580 * priority of the idle thread, since we still allow to signal
2583 if (action == SIG_DFL && (prop & SIGPROP_KILL) != 0 &&
2584 td->td_priority > PUSER && !TD_IS_IDLETHREAD(td))
2585 sched_prio(td, PUSER);
2586 if (TD_ON_SLEEPQ(td)) {
2588 * If thread is sleeping uninterruptibly
2589 * we can't interrupt the sleep... the signal will
2590 * be noticed when the process returns through
2591 * trap() or syscall().
2593 if ((td->td_flags & TDF_SINTR) == 0)
2596 * If SIGCONT is default (or ignored) and process is
2597 * asleep, we are finished; the process should not
2600 if ((prop & SIGPROP_CONT) && action == SIG_DFL) {
2603 sigqueue_delete(&p->p_sigqueue, sig);
2605 * It may be on either list in this state.
2606 * Remove from both for now.
2608 sigqueue_delete(&td->td_sigqueue, sig);
2613 * Don't awaken a sleeping thread for SIGSTOP if the
2614 * STOP signal is deferred.
2616 if ((prop & SIGPROP_STOP) != 0 && (td->td_flags & (TDF_SBDRY |
2617 TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY)
2621 * Give low priority threads a better chance to run.
2623 if (td->td_priority > PUSER && !TD_IS_IDLETHREAD(td))
2624 sched_prio(td, PUSER);
2626 wakeup_swapper = sig_sleepq_abort(td, intrval);
2634 * Other states do nothing with the signal immediately,
2635 * other than kicking ourselves if we are running.
2636 * It will either never be noticed, or noticed very soon.
2639 if (TD_IS_RUNNING(td) && td != curthread)
2649 ptrace_coredumpreq(struct thread *td, struct proc *p,
2650 struct thr_coredump_req *tcq)
2654 if (p->p_sysent->sv_coredump == NULL) {
2655 tcq->tc_error = ENOSYS;
2659 rl_cookie = vn_rangelock_wlock(tcq->tc_vp, 0, OFF_MAX);
2660 tcq->tc_error = p->p_sysent->sv_coredump(td, tcq->tc_vp,
2661 tcq->tc_limit, tcq->tc_flags);
2662 vn_rangelock_unlock(tcq->tc_vp, rl_cookie);
2666 ptrace_syscallreq(struct thread *td, struct proc *p,
2667 struct thr_syscall_req *tsr)
2669 struct sysentvec *sv;
2671 register_t rv_saved[2];
2674 bool audited, sy_thr_static;
2677 if (sv->sv_table == NULL || sv->sv_size < tsr->ts_sa.code) {
2678 tsr->ts_ret.sr_error = ENOSYS;
2682 sc = tsr->ts_sa.code;
2683 if (sc == SYS_syscall || sc == SYS___syscall) {
2684 sc = tsr->ts_sa.args[0];
2685 memmove(&tsr->ts_sa.args[0], &tsr->ts_sa.args[1],
2686 sizeof(register_t) * (tsr->ts_nargs - 1));
2689 tsr->ts_sa.callp = se = &sv->sv_table[sc];
2691 VM_CNT_INC(v_syscall);
2693 if (__predict_false(td->td_cowgen != atomic_load_int(
2694 &td->td_proc->p_cowgen)))
2695 thread_cow_update(td);
2697 #ifdef CAPABILITY_MODE
2698 if (IN_CAPABILITY_MODE(td) && (se->sy_flags & SYF_CAPENABLED) == 0) {
2699 tsr->ts_ret.sr_error = ECAPMODE;
2704 sy_thr_static = (se->sy_thrcnt & SY_THR_STATIC) != 0;
2705 audited = AUDIT_SYSCALL_ENTER(sc, td) != 0;
2707 if (!sy_thr_static) {
2708 error = syscall_thread_enter(td, se);
2710 tsr->ts_ret.sr_error = error;
2715 rv_saved[0] = td->td_retval[0];
2716 rv_saved[1] = td->td_retval[1];
2717 nerror = td->td_errno;
2718 td->td_retval[0] = 0;
2719 td->td_retval[1] = 0;
2721 #ifdef KDTRACE_HOOKS
2722 if (se->sy_entry != 0)
2723 (*systrace_probe_func)(&tsr->ts_sa, SYSTRACE_ENTRY, 0);
2725 tsr->ts_ret.sr_error = se->sy_call(td, tsr->ts_sa.args);
2726 #ifdef KDTRACE_HOOKS
2727 if (se->sy_return != 0)
2728 (*systrace_probe_func)(&tsr->ts_sa, SYSTRACE_RETURN,
2729 tsr->ts_ret.sr_error != 0 ? -1 : td->td_retval[0]);
2732 tsr->ts_ret.sr_retval[0] = td->td_retval[0];
2733 tsr->ts_ret.sr_retval[1] = td->td_retval[1];
2734 td->td_retval[0] = rv_saved[0];
2735 td->td_retval[1] = rv_saved[1];
2736 td->td_errno = nerror;
2739 AUDIT_SYSCALL_EXIT(error, td);
2741 syscall_thread_exit(td, se);
2745 ptrace_remotereq(struct thread *td, int flag)
2749 MPASS(td == curthread);
2751 PROC_LOCK_ASSERT(p, MA_OWNED);
2752 if ((td->td_dbgflags & flag) == 0)
2754 KASSERT((p->p_flag & P_STOPPED_TRACE) != 0, ("not stopped"));
2755 KASSERT(td->td_remotereq != NULL, ("td_remotereq is NULL"));
2759 case TDB_COREDUMPREQ:
2760 ptrace_coredumpreq(td, p, td->td_remotereq);
2762 case TDB_SCREMOTEREQ:
2763 ptrace_syscallreq(td, p, td->td_remotereq);
2770 MPASS((td->td_dbgflags & flag) != 0);
2771 td->td_dbgflags &= ~flag;
2772 td->td_remotereq = NULL;
2777 sig_suspend_threads(struct thread *td, struct proc *p)
2782 PROC_LOCK_ASSERT(p, MA_OWNED);
2783 PROC_SLOCK_ASSERT(p, MA_OWNED);
2786 FOREACH_THREAD_IN_PROC(p, td2) {
2788 ast_sched_locked(td2, TDA_SUSPEND);
2789 if ((TD_IS_SLEEPING(td2) || TD_IS_SWAPPED(td2)) &&
2790 (td2->td_flags & TDF_SINTR)) {
2791 if (td2->td_flags & TDF_SBDRY) {
2793 * Once a thread is asleep with
2794 * TDF_SBDRY and without TDF_SERESTART
2795 * or TDF_SEINTR set, it should never
2796 * become suspended due to this check.
2798 KASSERT(!TD_IS_SUSPENDED(td2),
2799 ("thread with deferred stops suspended"));
2800 if (TD_SBDRY_INTR(td2)) {
2801 wakeup_swapper |= sleepq_abort(td2,
2802 TD_SBDRY_ERRNO(td2));
2805 } else if (!TD_IS_SUSPENDED(td2))
2806 thread_suspend_one(td2);
2807 } else if (!TD_IS_SUSPENDED(td2)) {
2809 if (TD_IS_RUNNING(td2) && td2 != td)
2810 forward_signal(td2);
2815 return (wakeup_swapper);
2819 * Stop the process for an event deemed interesting to the debugger. If si is
2820 * non-NULL, this is a signal exchange; the new signal requested by the
2821 * debugger will be returned for handling. If si is NULL, this is some other
2822 * type of interesting event. The debugger may request a signal be delivered in
2823 * that case as well, however it will be deferred until it can be handled.
2826 ptracestop(struct thread *td, int sig, ksiginfo_t *si)
2828 struct proc *p = td->td_proc;
2832 PROC_LOCK_ASSERT(p, MA_OWNED);
2833 KASSERT(!(p->p_flag & P_WEXIT), ("Stopping exiting process"));
2834 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
2835 &p->p_mtx.lock_object, "Stopping for traced signal");
2839 if (si == NULL || (si->ksi_flags & KSI_PTRACE) == 0) {
2840 td->td_dbgflags |= TDB_XSIG;
2841 CTR4(KTR_PTRACE, "ptracestop: tid %d (pid %d) flags %#x sig %d",
2842 td->td_tid, p->p_pid, td->td_dbgflags, sig);
2844 while ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_XSIG)) {
2847 * Ensure that, if we've been PT_KILLed, the
2848 * exit status reflects that. Another thread
2849 * may also be in ptracestop(), having just
2850 * received the SIGKILL, but this thread was
2851 * unsuspended first.
2853 td->td_dbgflags &= ~TDB_XSIG;
2854 td->td_xsig = SIGKILL;
2858 if (p->p_flag & P_SINGLE_EXIT &&
2859 !(td->td_dbgflags & TDB_EXIT)) {
2861 * Ignore ptrace stops except for thread exit
2862 * events when the process exits.
2864 td->td_dbgflags &= ~TDB_XSIG;
2870 * Make wait(2) work. Ensure that right after the
2871 * attach, the thread which was decided to become the
2872 * leader of attach gets reported to the waiter.
2873 * Otherwise, just avoid overwriting another thread's
2874 * assignment to p_xthread. If another thread has
2875 * already set p_xthread, the current thread will get
2876 * a chance to report itself upon the next iteration.
2878 if ((td->td_dbgflags & TDB_FSTP) != 0 ||
2879 ((p->p_flag2 & P2_PTRACE_FSTP) == 0 &&
2880 p->p_xthread == NULL)) {
2885 * If we are on sleepqueue already,
2886 * let sleepqueue code decide if it
2887 * needs to go sleep after attach.
2889 if (td->td_wchan == NULL)
2890 td->td_dbgflags &= ~TDB_FSTP;
2892 p->p_flag2 &= ~P2_PTRACE_FSTP;
2893 p->p_flag |= P_STOPPED_SIG | P_STOPPED_TRACE;
2894 sig_suspend_threads(td, p);
2896 if ((td->td_dbgflags & TDB_STOPATFORK) != 0) {
2897 td->td_dbgflags &= ~TDB_STOPATFORK;
2900 td->td_dbgflags |= TDB_SSWITCH;
2901 thread_suspend_switch(td, p);
2902 td->td_dbgflags &= ~TDB_SSWITCH;
2903 if ((td->td_dbgflags & (TDB_COREDUMPREQ |
2904 TDB_SCREMOTEREQ)) != 0) {
2905 MPASS((td->td_dbgflags & (TDB_COREDUMPREQ |
2906 TDB_SCREMOTEREQ)) !=
2907 (TDB_COREDUMPREQ | TDB_SCREMOTEREQ));
2909 ptrace_remotereq(td, td->td_dbgflags &
2910 (TDB_COREDUMPREQ | TDB_SCREMOTEREQ));
2914 if (p->p_xthread == td)
2915 p->p_xthread = NULL;
2916 if (!(p->p_flag & P_TRACED))
2918 if (td->td_dbgflags & TDB_SUSPEND) {
2919 if (p->p_flag & P_SINGLE_EXIT)
2927 if (si != NULL && sig == td->td_xsig) {
2928 /* Parent wants us to take the original signal unchanged. */
2929 si->ksi_flags |= KSI_HEAD;
2930 if (sigqueue_add(&td->td_sigqueue, sig, si) != 0)
2932 } else if (td->td_xsig != 0) {
2934 * If parent wants us to take a new signal, then it will leave
2935 * it in td->td_xsig; otherwise we just look for signals again.
2937 ksiginfo_init(&ksi);
2938 ksi.ksi_signo = td->td_xsig;
2939 ksi.ksi_flags |= KSI_PTRACE;
2940 td2 = sigtd(p, td->td_xsig, false);
2941 tdsendsignal(p, td2, td->td_xsig, &ksi);
2946 return (td->td_xsig);
2950 reschedule_signals(struct proc *p, sigset_t block, int flags)
2955 bool fastblk, pslocked;
2957 PROC_LOCK_ASSERT(p, MA_OWNED);
2959 pslocked = (flags & SIGPROCMASK_PS_LOCKED) != 0;
2960 mtx_assert(&ps->ps_mtx, pslocked ? MA_OWNED : MA_NOTOWNED);
2961 if (SIGISEMPTY(p->p_siglist))
2963 SIGSETAND(block, p->p_siglist);
2964 fastblk = (flags & SIGPROCMASK_FASTBLK) != 0;
2965 SIG_FOREACH(sig, &block) {
2966 td = sigtd(p, sig, fastblk);
2969 * If sigtd() selected us despite sigfastblock is
2970 * blocking, do not activate AST or wake us, to avoid
2971 * loop in AST handler.
2973 if (fastblk && td == curthread)
2978 mtx_lock(&ps->ps_mtx);
2979 if (p->p_flag & P_TRACED ||
2980 (SIGISMEMBER(ps->ps_sigcatch, sig) &&
2981 !SIGISMEMBER(td->td_sigmask, sig))) {
2982 tdsigwakeup(td, sig, SIG_CATCH,
2983 (SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR :
2987 mtx_unlock(&ps->ps_mtx);
2992 tdsigcleanup(struct thread *td)
2998 PROC_LOCK_ASSERT(p, MA_OWNED);
3000 sigqueue_flush(&td->td_sigqueue);
3001 if (p->p_numthreads == 1)
3005 * Since we cannot handle signals, notify signal post code
3006 * about this by filling the sigmask.
3008 * Also, if needed, wake up thread(s) that do not block the
3009 * same signals as the exiting thread, since the thread might
3010 * have been selected for delivery and woken up.
3012 SIGFILLSET(unblocked);
3013 SIGSETNAND(unblocked, td->td_sigmask);
3014 SIGFILLSET(td->td_sigmask);
3015 reschedule_signals(p, unblocked, 0);
3020 sigdeferstop_curr_flags(int cflags)
3023 MPASS((cflags & (TDF_SEINTR | TDF_SERESTART)) == 0 ||
3024 (cflags & TDF_SBDRY) != 0);
3025 return (cflags & (TDF_SBDRY | TDF_SEINTR | TDF_SERESTART));
3029 * Defer the delivery of SIGSTOP for the current thread, according to
3030 * the requested mode. Returns previous flags, which must be restored
3031 * by sigallowstop().
3033 * TDF_SBDRY, TDF_SEINTR, and TDF_SERESTART flags are only set and
3034 * cleared by the current thread, which allow the lock-less read-only
3038 sigdeferstop_impl(int mode)
3044 cflags = sigdeferstop_curr_flags(td->td_flags);
3046 case SIGDEFERSTOP_NOP:
3049 case SIGDEFERSTOP_OFF:
3052 case SIGDEFERSTOP_SILENT:
3053 nflags = (cflags | TDF_SBDRY) & ~(TDF_SEINTR | TDF_SERESTART);
3055 case SIGDEFERSTOP_EINTR:
3056 nflags = (cflags | TDF_SBDRY | TDF_SEINTR) & ~TDF_SERESTART;
3058 case SIGDEFERSTOP_ERESTART:
3059 nflags = (cflags | TDF_SBDRY | TDF_SERESTART) & ~TDF_SEINTR;
3062 panic("sigdeferstop: invalid mode %x", mode);
3065 if (cflags == nflags)
3066 return (SIGDEFERSTOP_VAL_NCHG);
3068 td->td_flags = (td->td_flags & ~cflags) | nflags;
3074 * Restores the STOP handling mode, typically permitting the delivery
3075 * of SIGSTOP for the current thread. This does not immediately
3076 * suspend if a stop was posted. Instead, the thread will suspend
3077 * either via ast() or a subsequent interruptible sleep.
3080 sigallowstop_impl(int prev)
3085 KASSERT(prev != SIGDEFERSTOP_VAL_NCHG, ("failed sigallowstop"));
3086 KASSERT((prev & ~(TDF_SBDRY | TDF_SEINTR | TDF_SERESTART)) == 0,
3087 ("sigallowstop: incorrect previous mode %x", prev));
3089 cflags = sigdeferstop_curr_flags(td->td_flags);
3090 if (cflags != prev) {
3092 td->td_flags = (td->td_flags & ~cflags) | prev;
3101 SIGSTATUS_SBDRY_STOP,
3105 * The thread has signal "sig" pending. Figure out what to do with it:
3107 * _HANDLE -> the caller should handle the signal
3108 * _HANDLED -> handled internally, reload pending signal set
3109 * _IGNORE -> ignored, remove from the set of pending signals and try the
3110 * next pending signal
3111 * _SBDRY_STOP -> the signal should stop the thread but this is not
3112 * permitted in the current context
3114 static enum sigstatus
3115 sigprocess(struct thread *td, int sig)
3119 struct sigqueue *queue;
3123 KASSERT(_SIG_VALID(sig), ("%s: invalid signal %d", __func__, sig));
3127 mtx_assert(&ps->ps_mtx, MA_OWNED);
3128 PROC_LOCK_ASSERT(p, MA_OWNED);
3131 * We should allow pending but ignored signals below
3132 * if there is sigwait() active, or P_TRACED was
3133 * on when they were posted.
3135 if (SIGISMEMBER(ps->ps_sigignore, sig) &&
3136 (p->p_flag & P_TRACED) == 0 &&
3137 (td->td_flags & TDF_SIGWAIT) == 0) {
3138 return (SIGSTATUS_IGNORE);
3142 * If the process is going to single-thread mode to prepare
3143 * for exit, there is no sense in delivering any signal
3144 * to usermode. Another important consequence is that
3145 * msleep(..., PCATCH, ...) now is only interruptible by a
3148 if ((p->p_flag2 & P2_WEXIT) != 0)
3149 return (SIGSTATUS_IGNORE);
3151 if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED) {
3153 * If traced, always stop.
3154 * Remove old signal from queue before the stop.
3155 * XXX shrug off debugger, it causes siginfo to
3158 queue = &td->td_sigqueue;
3159 ksiginfo_init(&ksi);
3160 if (sigqueue_get(queue, sig, &ksi) == 0) {
3161 queue = &p->p_sigqueue;
3162 sigqueue_get(queue, sig, &ksi);
3164 td->td_si = ksi.ksi_info;
3166 mtx_unlock(&ps->ps_mtx);
3167 sig = ptracestop(td, sig, &ksi);
3168 mtx_lock(&ps->ps_mtx);
3170 td->td_si.si_signo = 0;
3173 * Keep looking if the debugger discarded or
3174 * replaced the signal.
3177 return (SIGSTATUS_HANDLED);
3180 * If the signal became masked, re-queue it.
3182 if (SIGISMEMBER(td->td_sigmask, sig)) {
3183 ksi.ksi_flags |= KSI_HEAD;
3184 sigqueue_add(&p->p_sigqueue, sig, &ksi);
3185 return (SIGSTATUS_HANDLED);
3189 * If the traced bit got turned off, requeue the signal and
3190 * reload the set of pending signals. This ensures that p_sig*
3191 * and p_sigact are consistent.
3193 if ((p->p_flag & P_TRACED) == 0) {
3194 if ((ksi.ksi_flags & KSI_PTRACE) == 0) {
3195 ksi.ksi_flags |= KSI_HEAD;
3196 sigqueue_add(queue, sig, &ksi);
3198 return (SIGSTATUS_HANDLED);
3203 * Decide whether the signal should be returned.
3204 * Return the signal's number, or fall through
3205 * to clear it from the pending mask.
3207 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) {
3208 case (intptr_t)SIG_DFL:
3210 * Don't take default actions on system processes.
3212 if (p->p_pid <= 1) {
3215 * Are you sure you want to ignore SIGSEGV
3218 printf("Process (pid %lu) got signal %d\n",
3219 (u_long)p->p_pid, sig);
3221 return (SIGSTATUS_IGNORE);
3225 * If there is a pending stop signal to process with
3226 * default action, stop here, then clear the signal.
3227 * Traced or exiting processes should ignore stops.
3228 * Additionally, a member of an orphaned process group
3229 * should ignore tty stops.
3231 prop = sigprop(sig);
3232 if (prop & SIGPROP_STOP) {
3233 mtx_unlock(&ps->ps_mtx);
3234 if ((p->p_flag & (P_TRACED | P_WEXIT |
3235 P_SINGLE_EXIT)) != 0 || ((p->p_pgrp->
3236 pg_flags & PGRP_ORPHANED) != 0 &&
3237 (prop & SIGPROP_TTYSTOP) != 0)) {
3238 mtx_lock(&ps->ps_mtx);
3239 return (SIGSTATUS_IGNORE);
3241 if (TD_SBDRY_INTR(td)) {
3242 KASSERT((td->td_flags & TDF_SBDRY) != 0,
3243 ("lost TDF_SBDRY"));
3244 mtx_lock(&ps->ps_mtx);
3245 return (SIGSTATUS_SBDRY_STOP);
3247 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
3248 &p->p_mtx.lock_object, "Catching SIGSTOP");
3249 sigqueue_delete(&td->td_sigqueue, sig);
3250 sigqueue_delete(&p->p_sigqueue, sig);
3251 p->p_flag |= P_STOPPED_SIG;
3254 sig_suspend_threads(td, p);
3255 thread_suspend_switch(td, p);
3257 mtx_lock(&ps->ps_mtx);
3258 return (SIGSTATUS_HANDLED);
3259 } else if ((prop & SIGPROP_IGNORE) != 0 &&
3260 (td->td_flags & TDF_SIGWAIT) == 0) {
3262 * Default action is to ignore; drop it if
3263 * not in kern_sigtimedwait().
3265 return (SIGSTATUS_IGNORE);
3267 return (SIGSTATUS_HANDLE);
3270 case (intptr_t)SIG_IGN:
3271 if ((td->td_flags & TDF_SIGWAIT) == 0)
3272 return (SIGSTATUS_IGNORE);
3274 return (SIGSTATUS_HANDLE);
3278 * This signal has an action, let postsig() process it.
3280 return (SIGSTATUS_HANDLE);
3285 * If the current process has received a signal (should be caught or cause
3286 * termination, should interrupt current syscall), return the signal number.
3287 * Stop signals with default action are processed immediately, then cleared;
3288 * they aren't returned. This is checked after each entry to the system for
3289 * a syscall or trap (though this can usually be done without calling
3290 * issignal by checking the pending signal masks in cursig.) The normal call
3293 * while (sig = cursig(curthread))
3297 issignal(struct thread *td)
3300 sigset_t sigpending;
3304 PROC_LOCK_ASSERT(p, MA_OWNED);
3307 sigpending = td->td_sigqueue.sq_signals;
3308 SIGSETOR(sigpending, p->p_sigqueue.sq_signals);
3309 SIGSETNAND(sigpending, td->td_sigmask);
3311 if ((p->p_flag & P_PPWAIT) != 0 || (td->td_flags &
3312 (TDF_SBDRY | TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY)
3313 SIG_STOPSIGMASK(sigpending);
3314 if (SIGISEMPTY(sigpending)) /* no signal to send */
3318 * Do fast sigblock if requested by usermode. Since
3319 * we do know that there was a signal pending at this
3320 * point, set the FAST_SIGBLOCK_PEND as indicator for
3321 * usermode to perform a dummy call to
3322 * FAST_SIGBLOCK_UNBLOCK, which causes immediate
3323 * delivery of postponed pending signal.
3325 if ((td->td_pflags & TDP_SIGFASTBLOCK) != 0) {
3326 if (td->td_sigblock_val != 0)
3327 SIGSETNAND(sigpending, fastblock_mask);
3328 if (SIGISEMPTY(sigpending)) {
3329 td->td_pflags |= TDP_SIGFASTPENDING;
3334 if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED &&
3335 (p->p_flag2 & P2_PTRACE_FSTP) != 0 &&
3336 SIGISMEMBER(sigpending, SIGSTOP)) {
3338 * If debugger just attached, always consume
3339 * SIGSTOP from ptrace(PT_ATTACH) first, to
3340 * execute the debugger attach ritual in
3343 td->td_dbgflags |= TDB_FSTP;
3344 SIGEMPTYSET(sigpending);
3345 SIGADDSET(sigpending, SIGSTOP);
3348 SIG_FOREACH(sig, &sigpending) {
3349 switch (sigprocess(td, sig)) {
3350 case SIGSTATUS_HANDLE:
3352 case SIGSTATUS_HANDLED:
3354 case SIGSTATUS_IGNORE:
3355 sigqueue_delete(&td->td_sigqueue, sig);
3356 sigqueue_delete(&p->p_sigqueue, sig);
3358 case SIGSTATUS_SBDRY_STOP:
3367 thread_stopped(struct proc *p)
3371 PROC_LOCK_ASSERT(p, MA_OWNED);
3372 PROC_SLOCK_ASSERT(p, MA_OWNED);
3376 if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) {
3378 p->p_flag &= ~P_WAITED;
3379 PROC_LOCK(p->p_pptr);
3380 childproc_stopped(p, (p->p_flag & P_TRACED) ?
3381 CLD_TRAPPED : CLD_STOPPED);
3382 PROC_UNLOCK(p->p_pptr);
3388 * Take the action for the specified signal
3389 * from the current set of pending signals.
3399 sigset_t returnmask;
3401 KASSERT(sig != 0, ("postsig"));
3405 PROC_LOCK_ASSERT(p, MA_OWNED);
3407 mtx_assert(&ps->ps_mtx, MA_OWNED);
3408 ksiginfo_init(&ksi);
3409 if (sigqueue_get(&td->td_sigqueue, sig, &ksi) == 0 &&
3410 sigqueue_get(&p->p_sigqueue, sig, &ksi) == 0)
3412 ksi.ksi_signo = sig;
3413 if (ksi.ksi_code == SI_TIMER)
3414 itimer_accept(p, ksi.ksi_timerid, &ksi);
3415 action = ps->ps_sigact[_SIG_IDX(sig)];
3417 if (KTRPOINT(td, KTR_PSIG))
3418 ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ?
3419 &td->td_oldsigmask : &td->td_sigmask, ksi.ksi_code);
3422 if (action == SIG_DFL) {
3424 * Default action, where the default is to kill
3425 * the process. (Other cases were ignored above.)
3427 mtx_unlock(&ps->ps_mtx);
3428 proc_td_siginfo_capture(td, &ksi.ksi_info);
3429 sigexit1(td, sig, &ksi);
3433 * If we get here, the signal must be caught.
3435 KASSERT(action != SIG_IGN, ("postsig action %p", action));
3436 KASSERT(!SIGISMEMBER(td->td_sigmask, sig),
3437 ("postsig action: blocked sig %d", sig));
3440 * Set the new mask value and also defer further
3441 * occurrences of this signal.
3443 * Special case: user has done a sigsuspend. Here the
3444 * current mask is not of interest, but rather the
3445 * mask from before the sigsuspend is what we want
3446 * restored after the signal processing is completed.
3448 if (td->td_pflags & TDP_OLDMASK) {
3449 returnmask = td->td_oldsigmask;
3450 td->td_pflags &= ~TDP_OLDMASK;
3452 returnmask = td->td_sigmask;
3454 if (p->p_sig == sig) {
3457 sig_handle_killpg(p, &ksi);
3458 (*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask);
3459 postsig_done(sig, td, ps);
3465 sig_ast_checksusp(struct thread *td)
3467 struct proc *p __diagused;
3471 PROC_LOCK_ASSERT(p, MA_OWNED);
3473 if (!td_ast_pending(td, TDA_SUSPEND))
3476 ret = thread_suspend_check(1);
3477 MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
3482 sig_ast_needsigchk(struct thread *td)
3489 PROC_LOCK_ASSERT(p, MA_OWNED);
3491 if (!td_ast_pending(td, TDA_SIG))
3495 mtx_lock(&ps->ps_mtx);
3498 mtx_unlock(&ps->ps_mtx);
3499 KASSERT((td->td_flags & TDF_SBDRY) != 0, ("lost TDF_SBDRY"));
3500 KASSERT(TD_SBDRY_INTR(td),
3501 ("lost TDF_SERESTART of TDF_SEINTR"));
3502 KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) !=
3503 (TDF_SEINTR | TDF_SERESTART),
3504 ("both TDF_SEINTR and TDF_SERESTART"));
3505 ret = TD_SBDRY_ERRNO(td);
3506 } else if (sig != 0) {
3507 ret = SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR : ERESTART;
3508 mtx_unlock(&ps->ps_mtx);
3510 mtx_unlock(&ps->ps_mtx);
3515 * Do not go into sleep if this thread was the ptrace(2)
3516 * attach leader. cursig() consumed SIGSTOP from PT_ATTACH,
3517 * but we usually act on the signal by interrupting sleep, and
3518 * should do that here as well.
3520 if ((td->td_dbgflags & TDB_FSTP) != 0) {
3523 td->td_dbgflags &= ~TDB_FSTP;
3537 if (!td_ast_pending(td, TDA_SIG) && !td_ast_pending(td, TDA_SUSPEND))
3543 ret = sig_ast_checksusp(td);
3545 ret = sig_ast_needsigchk(td);
3551 curproc_sigkilled(void)
3559 if (!td_ast_pending(td, TDA_SIG))
3565 mtx_lock(&ps->ps_mtx);
3566 res = SIGISMEMBER(td->td_sigqueue.sq_signals, SIGKILL) ||
3567 SIGISMEMBER(p->p_sigqueue.sq_signals, SIGKILL);
3568 mtx_unlock(&ps->ps_mtx);
3574 proc_wkilled(struct proc *p)
3577 PROC_LOCK_ASSERT(p, MA_OWNED);
3578 if ((p->p_flag & P_WKILLED) == 0) {
3579 p->p_flag |= P_WKILLED;
3581 * Notify swapper that there is a process to swap in.
3582 * The notification is racy, at worst it would take 10
3583 * seconds for the swapper process to notice.
3585 if ((p->p_flag & (P_INMEM | P_SWAPPINGIN)) == 0)
3591 * Kill the current process for stated reason.
3594 killproc(struct proc *p, const char *why)
3597 PROC_LOCK_ASSERT(p, MA_OWNED);
3598 CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)", p, p->p_pid,
3600 log(LOG_ERR, "pid %d (%s), jid %d, uid %d, was killed: %s\n",
3601 p->p_pid, p->p_comm, p->p_ucred->cr_prison->pr_id,
3602 p->p_ucred->cr_uid, why);
3604 kern_psignal(p, SIGKILL);
3608 * Force the current process to exit with the specified signal, dumping core
3609 * if appropriate. We bypass the normal tests for masked and caught signals,
3610 * allowing unrecoverable failures to terminate the process without changing
3611 * signal state. Mark the accounting record with the signal termination.
3612 * If dumping core, save the signal number for the debugger. Calls exit and
3616 sigexit1(struct thread *td, int sig, ksiginfo_t *ksi)
3618 struct proc *p = td->td_proc;
3620 PROC_LOCK_ASSERT(p, MA_OWNED);
3621 proc_set_p2_wexit(p);
3623 p->p_acflag |= AXSIG;
3625 * We must be single-threading to generate a core dump. This
3626 * ensures that the registers in the core file are up-to-date.
3627 * Also, the ELF dump handler assumes that the thread list doesn't
3628 * change out from under it.
3630 * XXX If another thread attempts to single-thread before us
3631 * (e.g. via fork()), we won't get a dump at all.
3633 if ((sigprop(sig) & SIGPROP_CORE) &&
3634 thread_single(p, SINGLE_NO_EXIT) == 0) {
3637 * Log signals which would cause core dumps
3638 * (Log as LOG_INFO to appease those who don't want
3640 * XXX : Todo, as well as euid, write out ruid too
3641 * Note that coredump() drops proc lock.
3643 if (coredump(td) == 0)
3645 if (kern_logsigexit)
3647 "pid %d (%s), jid %d, uid %d: exited on "
3648 "signal %d%s\n", p->p_pid, p->p_comm,
3649 p->p_ucred->cr_prison->pr_id,
3650 td->td_ucred->cr_uid,
3652 sig & WCOREFLAG ? " (core dumped)" : "");
3655 exit2(td, 0, sig, ksi != NULL && (ksi->ksi_flags & KSI_KILLPG) != 0);
3660 sigexit(struct thread *td, int sig)
3662 sigexit1(td, sig, NULL);
3666 * Send queued SIGCHLD to parent when child process's state
3670 sigparent(struct proc *p, int reason, int status)
3672 PROC_LOCK_ASSERT(p, MA_OWNED);
3673 PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
3675 if (p->p_ksi != NULL) {
3676 p->p_ksi->ksi_signo = SIGCHLD;
3677 p->p_ksi->ksi_code = reason;
3678 p->p_ksi->ksi_status = status;
3679 p->p_ksi->ksi_pid = p->p_pid;
3680 p->p_ksi->ksi_uid = p->p_ucred->cr_ruid;
3681 if (KSI_ONQ(p->p_ksi))
3684 pksignal(p->p_pptr, SIGCHLD, p->p_ksi);
3688 childproc_jobstate(struct proc *p, int reason, int sig)
3692 PROC_LOCK_ASSERT(p, MA_OWNED);
3693 PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
3696 * Wake up parent sleeping in kern_wait(), also send
3697 * SIGCHLD to parent, but SIGCHLD does not guarantee
3698 * that parent will awake, because parent may masked
3701 p->p_pptr->p_flag |= P_STATCHILD;
3704 ps = p->p_pptr->p_sigacts;
3705 mtx_lock(&ps->ps_mtx);
3706 if ((ps->ps_flag & PS_NOCLDSTOP) == 0) {
3707 mtx_unlock(&ps->ps_mtx);
3708 sigparent(p, reason, sig);
3710 mtx_unlock(&ps->ps_mtx);
3714 childproc_stopped(struct proc *p, int reason)
3717 childproc_jobstate(p, reason, p->p_xsig);
3721 childproc_continued(struct proc *p)
3723 childproc_jobstate(p, CLD_CONTINUED, SIGCONT);
3727 childproc_exited(struct proc *p)
3731 if (WCOREDUMP(p->p_xsig)) {
3732 reason = CLD_DUMPED;
3733 status = WTERMSIG(p->p_xsig);
3734 } else if (WIFSIGNALED(p->p_xsig)) {
3735 reason = CLD_KILLED;
3736 status = WTERMSIG(p->p_xsig);
3738 reason = CLD_EXITED;
3739 status = p->p_xexit;
3742 * XXX avoid calling wakeup(p->p_pptr), the work is
3745 sigparent(p, reason, status);
3748 #define MAX_NUM_CORE_FILES 100000
3749 #ifndef NUM_CORE_FILES
3750 #define NUM_CORE_FILES 5
3752 CTASSERT(NUM_CORE_FILES >= 0 && NUM_CORE_FILES <= MAX_NUM_CORE_FILES);
3753 static int num_cores = NUM_CORE_FILES;
3756 sysctl_debug_num_cores_check (SYSCTL_HANDLER_ARGS)
3761 new_val = num_cores;
3762 error = sysctl_handle_int(oidp, &new_val, 0, req);
3763 if (error != 0 || req->newptr == NULL)
3765 if (new_val > MAX_NUM_CORE_FILES)
3766 new_val = MAX_NUM_CORE_FILES;
3769 num_cores = new_val;
3772 SYSCTL_PROC(_debug, OID_AUTO, ncores,
3773 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, sizeof(int),
3774 sysctl_debug_num_cores_check, "I",
3775 "Maximum number of generated process corefiles while using index format");
3777 #define GZIP_SUFFIX ".gz"
3778 #define ZSTD_SUFFIX ".zst"
3780 int compress_user_cores = 0;
3783 sysctl_compress_user_cores(SYSCTL_HANDLER_ARGS)
3787 val = compress_user_cores;
3788 error = sysctl_handle_int(oidp, &val, 0, req);
3789 if (error != 0 || req->newptr == NULL)
3791 if (val != 0 && !compressor_avail(val))
3793 compress_user_cores = val;
3796 SYSCTL_PROC(_kern, OID_AUTO, compress_user_cores,
3797 CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, 0, sizeof(int),
3798 sysctl_compress_user_cores, "I",
3799 "Enable compression of user corefiles ("
3800 __XSTRING(COMPRESS_GZIP) " = gzip, "
3801 __XSTRING(COMPRESS_ZSTD) " = zstd)");
3803 int compress_user_cores_level = 6;
3804 SYSCTL_INT(_kern, OID_AUTO, compress_user_cores_level, CTLFLAG_RWTUN,
3805 &compress_user_cores_level, 0,
3806 "Corefile compression level");
3809 * Protect the access to corefilename[] by allproc_lock.
3811 #define corefilename_lock allproc_lock
3813 static char corefilename[MAXPATHLEN] = {"%N.core"};
3814 TUNABLE_STR("kern.corefile", corefilename, sizeof(corefilename));
3817 sysctl_kern_corefile(SYSCTL_HANDLER_ARGS)
3821 sx_xlock(&corefilename_lock);
3822 error = sysctl_handle_string(oidp, corefilename, sizeof(corefilename),
3824 sx_xunlock(&corefilename_lock);
3828 SYSCTL_PROC(_kern, OID_AUTO, corefile, CTLTYPE_STRING | CTLFLAG_RW |
3829 CTLFLAG_MPSAFE, 0, 0, sysctl_kern_corefile, "A",
3830 "Process corefile name format string");
3833 vnode_close_locked(struct thread *td, struct vnode *vp)
3837 vn_close(vp, FWRITE, td->td_ucred, td);
3841 * If the core format has a %I in it, then we need to check
3842 * for existing corefiles before defining a name.
3843 * To do this we iterate over 0..ncores to find a
3844 * non-existing core file name to use. If all core files are
3845 * already used we choose the oldest one.
3848 corefile_open_last(struct thread *td, char *name, int indexpos,
3849 int indexlen, int ncores, struct vnode **vpp)
3851 struct vnode *oldvp, *nextvp, *vp;
3853 struct nameidata nd;
3854 int error, i, flags, oflags, cmode;
3856 struct timespec lasttime;
3858 nextvp = oldvp = NULL;
3859 cmode = S_IRUSR | S_IWUSR;
3860 oflags = VN_OPEN_NOAUDIT | VN_OPEN_NAMECACHE |
3861 (capmode_coredump ? VN_OPEN_NOCAPCHECK : 0);
3863 for (i = 0; i < ncores; i++) {
3864 flags = O_CREAT | FWRITE | O_NOFOLLOW;
3866 ch = name[indexpos + indexlen];
3867 (void)snprintf(name + indexpos, indexlen + 1, "%.*u", indexlen,
3869 name[indexpos + indexlen] = ch;
3871 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name);
3872 error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred,
3879 if ((flags & O_CREAT) == O_CREAT) {
3884 error = VOP_GETATTR(vp, &vattr, td->td_ucred);
3886 vnode_close_locked(td, vp);
3890 if (oldvp == NULL ||
3891 lasttime.tv_sec > vattr.va_mtime.tv_sec ||
3892 (lasttime.tv_sec == vattr.va_mtime.tv_sec &&
3893 lasttime.tv_nsec >= vattr.va_mtime.tv_nsec)) {
3895 vn_close(oldvp, FWRITE, td->td_ucred, td);
3898 lasttime = vattr.va_mtime;
3900 vnode_close_locked(td, vp);
3904 if (oldvp != NULL) {
3905 if (nextvp == NULL) {
3906 if ((td->td_proc->p_flag & P_SUGID) != 0) {
3908 vn_close(oldvp, FWRITE, td->td_ucred, td);
3911 error = vn_lock(nextvp, LK_EXCLUSIVE);
3913 vn_close(nextvp, FWRITE, td->td_ucred,
3919 vn_close(oldvp, FWRITE, td->td_ucred, td);
3924 vnode_close_locked(td, oldvp);
3933 * corefile_open(comm, uid, pid, td, compress, vpp, namep)
3934 * Expand the name described in corefilename, using name, uid, and pid
3935 * and open/create core file.
3936 * corefilename is a printf-like string, with three format specifiers:
3937 * %N name of process ("name")
3938 * %P process id (pid)
3940 * For example, "%N.core" is the default; they can be disabled completely
3941 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
3942 * This is controlled by the sysctl variable kern.corefile (see above).
3945 corefile_open(const char *comm, uid_t uid, pid_t pid, struct thread *td,
3946 int compress, int signum, struct vnode **vpp, char **namep)
3949 struct nameidata nd;
3951 char *hostname, *name;
3952 int cmode, error, flags, i, indexpos, indexlen, oflags, ncores;
3955 format = corefilename;
3956 name = malloc(MAXPATHLEN, M_TEMP, M_WAITOK | M_ZERO);
3960 (void)sbuf_new(&sb, name, MAXPATHLEN, SBUF_FIXEDLEN);
3961 sx_slock(&corefilename_lock);
3962 for (i = 0; format[i] != '\0'; i++) {
3963 switch (format[i]) {
3964 case '%': /* Format character */
3966 switch (format[i]) {
3968 sbuf_putc(&sb, '%');
3970 case 'H': /* hostname */
3971 if (hostname == NULL) {
3972 hostname = malloc(MAXHOSTNAMELEN,
3975 getcredhostname(td->td_ucred, hostname,
3977 sbuf_printf(&sb, "%s", hostname);
3979 case 'I': /* autoincrementing index */
3980 if (indexpos != -1) {
3981 sbuf_printf(&sb, "%%I");
3985 indexpos = sbuf_len(&sb);
3986 sbuf_printf(&sb, "%u", ncores - 1);
3987 indexlen = sbuf_len(&sb) - indexpos;
3989 case 'N': /* process name */
3990 sbuf_printf(&sb, "%s", comm);
3992 case 'P': /* process id */
3993 sbuf_printf(&sb, "%u", pid);
3995 case 'S': /* signal number */
3996 sbuf_printf(&sb, "%i", signum);
3998 case 'U': /* user id */
3999 sbuf_printf(&sb, "%u", uid);
4003 "Unknown format character %c in "
4004 "corename `%s'\n", format[i], format);
4009 sbuf_putc(&sb, format[i]);
4013 sx_sunlock(&corefilename_lock);
4014 free(hostname, M_TEMP);
4015 if (compress == COMPRESS_GZIP)
4016 sbuf_printf(&sb, GZIP_SUFFIX);
4017 else if (compress == COMPRESS_ZSTD)
4018 sbuf_printf(&sb, ZSTD_SUFFIX);
4019 if (sbuf_error(&sb) != 0) {
4020 log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too "
4021 "long\n", (long)pid, comm, (u_long)uid);
4029 if (indexpos != -1) {
4030 error = corefile_open_last(td, name, indexpos, indexlen, ncores,
4034 "pid %d (%s), uid (%u): Path `%s' failed "
4035 "on initial open test, error = %d\n",
4036 pid, comm, uid, name, error);
4039 cmode = S_IRUSR | S_IWUSR;
4040 oflags = VN_OPEN_NOAUDIT | VN_OPEN_NAMECACHE |
4041 (capmode_coredump ? VN_OPEN_NOCAPCHECK : 0);
4042 flags = O_CREAT | FWRITE | O_NOFOLLOW;
4043 if ((td->td_proc->p_flag & P_SUGID) != 0)
4046 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name);
4047 error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred,
4057 audit_proc_coredump(td, name, error);
4067 * Dump a process' core. The main routine does some
4068 * policy checking, and creates the name of the coredump;
4069 * then it passes on a vnode and a size limit to the process-specific
4070 * coredump routine if there is one; if there _is not_ one, it returns
4071 * ENOSYS; otherwise it returns the error from the process-specific routine.
4075 coredump(struct thread *td)
4077 struct proc *p = td->td_proc;
4078 struct ucred *cred = td->td_ucred;
4082 size_t fullpathsize;
4083 int error, error1, locked;
4084 char *name; /* name of corefile */
4087 char *fullpath, *freepath = NULL;
4090 PROC_LOCK_ASSERT(p, MA_OWNED);
4091 MPASS((p->p_flag & P_HADTHREADS) == 0 || p->p_singlethread == td);
4093 if (!do_coredump || (!sugid_coredump && (p->p_flag & P_SUGID) != 0) ||
4094 (p->p_flag2 & P2_NOTRACE) != 0) {
4100 * Note that the bulk of limit checking is done after
4101 * the corefile is created. The exception is if the limit
4102 * for corefiles is 0, in which case we don't bother
4103 * creating the corefile at all. This layout means that
4104 * a corefile is truncated instead of not being created,
4105 * if it is larger than the limit.
4107 limit = (off_t)lim_cur(td, RLIMIT_CORE);
4108 if (limit == 0 || racct_get_available(p, RACCT_CORE) == 0) {
4114 error = corefile_open(p->p_comm, cred->cr_uid, p->p_pid, td,
4115 compress_user_cores, p->p_sig, &vp, &name);
4120 * Don't dump to non-regular files or files with links.
4121 * Do not dump into system files. Effective user must own the corefile.
4123 if (vp->v_type != VREG || VOP_GETATTR(vp, &vattr, cred) != 0 ||
4124 vattr.va_nlink != 1 || (vp->v_vflag & VV_SYSTEM) != 0 ||
4125 vattr.va_uid != cred->cr_uid) {
4133 /* Postpone other writers, including core dumps of other processes. */
4134 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX);
4136 lf.l_whence = SEEK_SET;
4139 lf.l_type = F_WRLCK;
4140 locked = (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK) == 0);
4144 if (set_core_nodump_flag)
4145 vattr.va_flags = UF_NODUMP;
4146 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
4147 VOP_SETATTR(vp, &vattr, cred);
4150 p->p_acflag |= ACORE;
4153 if (p->p_sysent->sv_coredump != NULL) {
4154 error = p->p_sysent->sv_coredump(td, vp, limit, 0);
4160 lf.l_type = F_UNLCK;
4161 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
4163 vn_rangelock_unlock(vp, rl_cookie);
4166 * Notify the userland helper that a process triggered a core dump.
4167 * This allows the helper to run an automated debugging session.
4169 if (error != 0 || coredump_devctl == 0)
4171 sb = sbuf_new_auto();
4172 if (vn_fullpath_global(p->p_textvp, &fullpath, &freepath) != 0)
4174 sbuf_printf(sb, "comm=\"");
4175 devctl_safe_quote_sb(sb, fullpath);
4176 free(freepath, M_TEMP);
4177 sbuf_printf(sb, "\" core=\"");
4180 * We can't lookup core file vp directly. When we're replacing a core, and
4181 * other random times, we flush the name cache, so it will fail. Instead,
4182 * if the path of the core is relative, add the current dir in front if it.
4184 if (name[0] != '/') {
4185 fullpathsize = MAXPATHLEN;
4186 freepath = malloc(fullpathsize, M_TEMP, M_WAITOK);
4187 if (vn_getcwd(freepath, &fullpath, &fullpathsize) != 0) {
4188 free(freepath, M_TEMP);
4191 devctl_safe_quote_sb(sb, fullpath);
4192 free(freepath, M_TEMP);
4195 devctl_safe_quote_sb(sb, name);
4196 sbuf_printf(sb, "\"");
4197 if (sbuf_finish(sb) == 0)
4198 devctl_notify("kernel", "signal", "coredump", sbuf_data(sb));
4202 error1 = vn_close(vp, FWRITE, cred, td);
4206 audit_proc_coredump(td, name, error);
4213 * Nonexistent system call-- signal process (may want to handle it). Flag
4214 * error in case process won't see signal immediately (blocked or ignored).
4216 #ifndef _SYS_SYSPROTO_H_
4223 nosys(struct thread *td, struct nosys_args *args)
4230 tdsignal(td, SIGSYS);
4232 if (kern_lognosys == 1 || kern_lognosys == 3) {
4233 uprintf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm,
4236 if (kern_lognosys == 2 || kern_lognosys == 3 ||
4237 (p->p_pid == 1 && (kern_lognosys & 3) == 0)) {
4238 printf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm,
4245 * Send a SIGIO or SIGURG signal to a process or process group using stored
4246 * credentials rather than those of the current process.
4249 pgsigio(struct sigio **sigiop, int sig, int checkctty)
4252 struct sigio *sigio;
4254 ksiginfo_init(&ksi);
4255 ksi.ksi_signo = sig;
4256 ksi.ksi_code = SI_KERNEL;
4260 if (sigio == NULL) {
4264 if (sigio->sio_pgid > 0) {
4265 PROC_LOCK(sigio->sio_proc);
4266 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred))
4267 kern_psignal(sigio->sio_proc, sig);
4268 PROC_UNLOCK(sigio->sio_proc);
4269 } else if (sigio->sio_pgid < 0) {
4272 PGRP_LOCK(sigio->sio_pgrp);
4273 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) {
4275 if (p->p_state == PRS_NORMAL &&
4276 CANSIGIO(sigio->sio_ucred, p->p_ucred) &&
4277 (checkctty == 0 || (p->p_flag & P_CONTROLT)))
4278 kern_psignal(p, sig);
4281 PGRP_UNLOCK(sigio->sio_pgrp);
4287 filt_sigattach(struct knote *kn)
4289 struct proc *p = curproc;
4291 kn->kn_ptr.p_proc = p;
4292 kn->kn_flags |= EV_CLEAR; /* automatically set */
4294 knlist_add(p->p_klist, kn, 0);
4300 filt_sigdetach(struct knote *kn)
4302 struct proc *p = kn->kn_ptr.p_proc;
4304 knlist_remove(p->p_klist, kn, 0);
4308 * signal knotes are shared with proc knotes, so we apply a mask to
4309 * the hint in order to differentiate them from process hints. This
4310 * could be avoided by using a signal-specific knote list, but probably
4311 * isn't worth the trouble.
4314 filt_signal(struct knote *kn, long hint)
4317 if (hint & NOTE_SIGNAL) {
4318 hint &= ~NOTE_SIGNAL;
4320 if (kn->kn_id == hint)
4323 return (kn->kn_data != 0);
4331 ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO);
4332 refcount_init(&ps->ps_refcnt, 1);
4333 mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF);
4338 sigacts_free(struct sigacts *ps)
4341 if (refcount_release(&ps->ps_refcnt) == 0)
4343 mtx_destroy(&ps->ps_mtx);
4344 free(ps, M_SUBPROC);
4348 sigacts_hold(struct sigacts *ps)
4351 refcount_acquire(&ps->ps_refcnt);
4356 sigacts_copy(struct sigacts *dest, struct sigacts *src)
4359 KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest"));
4360 mtx_lock(&src->ps_mtx);
4361 bcopy(src, dest, offsetof(struct sigacts, ps_refcnt));
4362 mtx_unlock(&src->ps_mtx);
4366 sigacts_shared(struct sigacts *ps)
4369 return (ps->ps_refcnt > 1);
4373 sig_drop_caught(struct proc *p)
4379 PROC_LOCK_ASSERT(p, MA_OWNED);
4380 mtx_assert(&ps->ps_mtx, MA_OWNED);
4381 SIG_FOREACH(sig, &ps->ps_sigcatch) {
4383 if ((sigprop(sig) & SIGPROP_IGNORE) != 0)
4384 sigqueue_delete_proc(p, sig);
4389 sigfastblock_failed(struct thread *td, bool sendsig, bool write)
4394 * Prevent further fetches and SIGSEGVs, allowing thread to
4395 * issue syscalls despite corruption.
4397 sigfastblock_clear(td);
4401 ksiginfo_init_trap(&ksi);
4402 ksi.ksi_signo = SIGSEGV;
4403 ksi.ksi_code = write ? SEGV_ACCERR : SEGV_MAPERR;
4404 ksi.ksi_addr = td->td_sigblock_ptr;
4405 trapsignal(td, &ksi);
4409 sigfastblock_fetch_sig(struct thread *td, bool sendsig, uint32_t *valp)
4413 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0)
4415 if (fueword32((void *)td->td_sigblock_ptr, &res) == -1) {
4416 sigfastblock_failed(td, sendsig, false);
4420 td->td_sigblock_val = res & ~SIGFASTBLOCK_FLAGS;
4425 sigfastblock_resched(struct thread *td, bool resched)
4432 reschedule_signals(p, td->td_sigmask, 0);
4435 ast_sched(td, TDA_SIG);
4439 sys_sigfastblock(struct thread *td, struct sigfastblock_args *uap)
4448 case SIGFASTBLOCK_SETPTR:
4449 if ((td->td_pflags & TDP_SIGFASTBLOCK) != 0) {
4453 if (((uintptr_t)(uap->ptr) & (sizeof(uint32_t) - 1)) != 0) {
4457 td->td_pflags |= TDP_SIGFASTBLOCK;
4458 td->td_sigblock_ptr = uap->ptr;
4461 case SIGFASTBLOCK_UNBLOCK:
4462 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) {
4468 res = casueword32(td->td_sigblock_ptr,
4469 SIGFASTBLOCK_PEND, &oldval, 0);
4472 sigfastblock_failed(td, false, true);
4478 if (oldval != SIGFASTBLOCK_PEND) {
4482 error = thread_check_susp(td, false);
4490 * td_sigblock_val is cleared there, but not on a
4491 * syscall exit. The end effect is that a single
4492 * interruptible sleep, while user sigblock word is
4493 * set, might return EINTR or ERESTART to usermode
4494 * without delivering signal. All further sleeps,
4495 * until userspace clears the word and does
4496 * sigfastblock(UNBLOCK), observe current word and no
4497 * longer get interrupted. It is slight
4498 * non-conformance, with alternative to have read the
4499 * sigblock word on each syscall entry.
4501 td->td_sigblock_val = 0;
4504 * Rely on normal ast mechanism to deliver pending
4505 * signals to current thread. But notify others about
4508 sigfastblock_resched(td, error == 0 && p->p_numthreads != 1);
4512 case SIGFASTBLOCK_UNSETPTR:
4513 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) {
4517 if (!sigfastblock_fetch_sig(td, false, &oldval)) {
4521 if (oldval != 0 && oldval != SIGFASTBLOCK_PEND) {
4525 sigfastblock_clear(td);
4536 sigfastblock_clear(struct thread *td)
4540 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0)
4542 td->td_sigblock_val = 0;
4543 resched = (td->td_pflags & TDP_SIGFASTPENDING) != 0 ||
4545 td->td_pflags &= ~(TDP_SIGFASTBLOCK | TDP_SIGFASTPENDING);
4546 sigfastblock_resched(td, resched);
4550 sigfastblock_fetch(struct thread *td)
4554 (void)sigfastblock_fetch_sig(td, true, &val);
4558 sigfastblock_setpend1(struct thread *td)
4563 if ((td->td_pflags & TDP_SIGFASTPENDING) == 0)
4565 res = fueword32((void *)td->td_sigblock_ptr, &oldval);
4567 sigfastblock_failed(td, true, false);
4571 res = casueword32(td->td_sigblock_ptr, oldval, &oldval,
4572 oldval | SIGFASTBLOCK_PEND);
4574 sigfastblock_failed(td, true, true);
4578 td->td_sigblock_val = oldval & ~SIGFASTBLOCK_FLAGS;
4579 td->td_pflags &= ~TDP_SIGFASTPENDING;
4583 if (thread_check_susp(td, false) != 0)
4589 sigfastblock_setpend(struct thread *td, bool resched)
4593 sigfastblock_setpend1(td);
4597 reschedule_signals(p, fastblock_mask, SIGPROCMASK_FASTBLK);