2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
42 #include "opt_ktrace.h"
44 #include <sys/param.h>
45 #include <sys/ctype.h>
46 #include <sys/systm.h>
47 #include <sys/signalvar.h>
48 #include <sys/vnode.h>
51 #include <sys/capsicum.h>
52 #include <sys/compressor.h>
53 #include <sys/condvar.h>
54 #include <sys/event.h>
55 #include <sys/fcntl.h>
56 #include <sys/imgact.h>
57 #include <sys/kernel.h>
59 #include <sys/ktrace.h>
60 #include <sys/limits.h>
62 #include <sys/malloc.h>
63 #include <sys/mutex.h>
64 #include <sys/refcount.h>
65 #include <sys/namei.h>
67 #include <sys/procdesc.h>
68 #include <sys/posix4.h>
69 #include <sys/pioctl.h>
70 #include <sys/racct.h>
71 #include <sys/resourcevar.h>
74 #include <sys/sleepqueue.h>
78 #include <sys/syscallsubr.h>
79 #include <sys/sysctl.h>
80 #include <sys/sysent.h>
81 #include <sys/syslog.h>
82 #include <sys/sysproto.h>
83 #include <sys/timers.h>
84 #include <sys/unistd.h>
87 #include <vm/vm_extern.h>
92 #include <machine/cpu.h>
94 #include <security/audit/audit.h>
96 #define ONSIG 32 /* NSIG for osig* syscalls. XXX. */
98 SDT_PROVIDER_DECLARE(proc);
99 SDT_PROBE_DEFINE3(proc, , , signal__send,
100 "struct thread *", "struct proc *", "int");
101 SDT_PROBE_DEFINE2(proc, , , signal__clear,
102 "int", "ksiginfo_t *");
103 SDT_PROBE_DEFINE3(proc, , , signal__discard,
104 "struct thread *", "struct proc *", "int");
106 static int coredump(struct thread *);
107 static int killpg1(struct thread *td, int sig, int pgid, int all,
109 static int issignal(struct thread *td);
110 static int sigprop(int sig);
111 static void tdsigwakeup(struct thread *, int, sig_t, int);
112 static int sig_suspend_threads(struct thread *, struct proc *, int);
113 static int filt_sigattach(struct knote *kn);
114 static void filt_sigdetach(struct knote *kn);
115 static int filt_signal(struct knote *kn, long hint);
116 static struct thread *sigtd(struct proc *p, int sig, int prop);
117 static void sigqueue_start(void);
119 static uma_zone_t ksiginfo_zone = NULL;
120 struct filterops sig_filtops = {
122 .f_attach = filt_sigattach,
123 .f_detach = filt_sigdetach,
124 .f_event = filt_signal,
127 static int kern_logsigexit = 1;
128 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,
130 "Log processes quitting on abnormal signals to syslog(3)");
132 static int kern_forcesigexit = 1;
133 SYSCTL_INT(_kern, OID_AUTO, forcesigexit, CTLFLAG_RW,
134 &kern_forcesigexit, 0, "Force trap signal to be handled");
136 static SYSCTL_NODE(_kern, OID_AUTO, sigqueue, CTLFLAG_RW, 0,
137 "POSIX real time signal");
139 static int max_pending_per_proc = 128;
140 SYSCTL_INT(_kern_sigqueue, OID_AUTO, max_pending_per_proc, CTLFLAG_RW,
141 &max_pending_per_proc, 0, "Max pending signals per proc");
143 static int preallocate_siginfo = 1024;
144 SYSCTL_INT(_kern_sigqueue, OID_AUTO, preallocate, CTLFLAG_RDTUN,
145 &preallocate_siginfo, 0, "Preallocated signal memory size");
147 static int signal_overflow = 0;
148 SYSCTL_INT(_kern_sigqueue, OID_AUTO, overflow, CTLFLAG_RD,
149 &signal_overflow, 0, "Number of signals overflew");
151 static int signal_alloc_fail = 0;
152 SYSCTL_INT(_kern_sigqueue, OID_AUTO, alloc_fail, CTLFLAG_RD,
153 &signal_alloc_fail, 0, "signals failed to be allocated");
155 static int kern_lognosys = 0;
156 SYSCTL_INT(_kern, OID_AUTO, lognosys, CTLFLAG_RWTUN, &kern_lognosys, 0,
157 "Log invalid syscalls");
159 SYSINIT(signal, SI_SUB_P1003_1B, SI_ORDER_FIRST+3, sigqueue_start, NULL);
162 * Policy -- Can ucred cr1 send SIGIO to process cr2?
163 * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG
164 * in the right situations.
166 #define CANSIGIO(cr1, cr2) \
167 ((cr1)->cr_uid == 0 || \
168 (cr1)->cr_ruid == (cr2)->cr_ruid || \
169 (cr1)->cr_uid == (cr2)->cr_ruid || \
170 (cr1)->cr_ruid == (cr2)->cr_uid || \
171 (cr1)->cr_uid == (cr2)->cr_uid)
173 static int sugid_coredump;
174 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RWTUN,
175 &sugid_coredump, 0, "Allow setuid and setgid processes to dump core");
177 static int capmode_coredump;
178 SYSCTL_INT(_kern, OID_AUTO, capmode_coredump, CTLFLAG_RWTUN,
179 &capmode_coredump, 0, "Allow processes in capability mode to dump core");
181 static int do_coredump = 1;
182 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,
183 &do_coredump, 0, "Enable/Disable coredumps");
185 static int set_core_nodump_flag = 0;
186 SYSCTL_INT(_kern, OID_AUTO, nodump_coredump, CTLFLAG_RW, &set_core_nodump_flag,
187 0, "Enable setting the NODUMP flag on coredump files");
189 static int coredump_devctl = 0;
190 SYSCTL_INT(_kern, OID_AUTO, coredump_devctl, CTLFLAG_RW, &coredump_devctl,
191 0, "Generate a devctl notification when processes coredump");
194 * Signal properties and actions.
195 * The array below categorizes the signals and their default actions
196 * according to the following properties:
198 #define SIGPROP_KILL 0x01 /* terminates process by default */
199 #define SIGPROP_CORE 0x02 /* ditto and coredumps */
200 #define SIGPROP_STOP 0x04 /* suspend process */
201 #define SIGPROP_TTYSTOP 0x08 /* ditto, from tty */
202 #define SIGPROP_IGNORE 0x10 /* ignore by default */
203 #define SIGPROP_CONT 0x20 /* continue if suspended */
204 #define SIGPROP_CANTMASK 0x40 /* non-maskable, catchable */
206 static int sigproptbl[NSIG] = {
207 [SIGHUP] = SIGPROP_KILL,
208 [SIGINT] = SIGPROP_KILL,
209 [SIGQUIT] = SIGPROP_KILL | SIGPROP_CORE,
210 [SIGILL] = SIGPROP_KILL | SIGPROP_CORE,
211 [SIGTRAP] = SIGPROP_KILL | SIGPROP_CORE,
212 [SIGABRT] = SIGPROP_KILL | SIGPROP_CORE,
213 [SIGEMT] = SIGPROP_KILL | SIGPROP_CORE,
214 [SIGFPE] = SIGPROP_KILL | SIGPROP_CORE,
215 [SIGKILL] = SIGPROP_KILL,
216 [SIGBUS] = SIGPROP_KILL | SIGPROP_CORE,
217 [SIGSEGV] = SIGPROP_KILL | SIGPROP_CORE,
218 [SIGSYS] = SIGPROP_KILL | SIGPROP_CORE,
219 [SIGPIPE] = SIGPROP_KILL,
220 [SIGALRM] = SIGPROP_KILL,
221 [SIGTERM] = SIGPROP_KILL,
222 [SIGURG] = SIGPROP_IGNORE,
223 [SIGSTOP] = SIGPROP_STOP,
224 [SIGTSTP] = SIGPROP_STOP | SIGPROP_TTYSTOP,
225 [SIGCONT] = SIGPROP_IGNORE | SIGPROP_CONT,
226 [SIGCHLD] = SIGPROP_IGNORE,
227 [SIGTTIN] = SIGPROP_STOP | SIGPROP_TTYSTOP,
228 [SIGTTOU] = SIGPROP_STOP | SIGPROP_TTYSTOP,
229 [SIGIO] = SIGPROP_IGNORE,
230 [SIGXCPU] = SIGPROP_KILL,
231 [SIGXFSZ] = SIGPROP_KILL,
232 [SIGVTALRM] = SIGPROP_KILL,
233 [SIGPROF] = SIGPROP_KILL,
234 [SIGWINCH] = SIGPROP_IGNORE,
235 [SIGINFO] = SIGPROP_IGNORE,
236 [SIGUSR1] = SIGPROP_KILL,
237 [SIGUSR2] = SIGPROP_KILL,
240 static void reschedule_signals(struct proc *p, sigset_t block, int flags);
245 ksiginfo_zone = uma_zcreate("ksiginfo", sizeof(ksiginfo_t),
246 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
247 uma_prealloc(ksiginfo_zone, preallocate_siginfo);
248 p31b_setcfg(CTL_P1003_1B_REALTIME_SIGNALS, _POSIX_REALTIME_SIGNALS);
249 p31b_setcfg(CTL_P1003_1B_RTSIG_MAX, SIGRTMAX - SIGRTMIN + 1);
250 p31b_setcfg(CTL_P1003_1B_SIGQUEUE_MAX, max_pending_per_proc);
254 ksiginfo_alloc(int wait)
261 if (ksiginfo_zone != NULL)
262 return ((ksiginfo_t *)uma_zalloc(ksiginfo_zone, flags));
267 ksiginfo_free(ksiginfo_t *ksi)
269 uma_zfree(ksiginfo_zone, ksi);
273 ksiginfo_tryfree(ksiginfo_t *ksi)
275 if (!(ksi->ksi_flags & KSI_EXT)) {
276 uma_zfree(ksiginfo_zone, ksi);
283 sigqueue_init(sigqueue_t *list, struct proc *p)
285 SIGEMPTYSET(list->sq_signals);
286 SIGEMPTYSET(list->sq_kill);
287 SIGEMPTYSET(list->sq_ptrace);
288 TAILQ_INIT(&list->sq_list);
290 list->sq_flags = SQ_INIT;
294 * Get a signal's ksiginfo.
296 * 0 - signal not found
297 * others - signal number
300 sigqueue_get(sigqueue_t *sq, int signo, ksiginfo_t *si)
302 struct proc *p = sq->sq_proc;
303 struct ksiginfo *ksi, *next;
306 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
308 if (!SIGISMEMBER(sq->sq_signals, signo))
311 if (SIGISMEMBER(sq->sq_ptrace, signo)) {
313 SIGDELSET(sq->sq_ptrace, signo);
314 si->ksi_flags |= KSI_PTRACE;
316 if (SIGISMEMBER(sq->sq_kill, signo)) {
319 SIGDELSET(sq->sq_kill, signo);
322 TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
323 if (ksi->ksi_signo == signo) {
325 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
326 ksi->ksi_sigq = NULL;
327 ksiginfo_copy(ksi, si);
328 if (ksiginfo_tryfree(ksi) && p != NULL)
337 SIGDELSET(sq->sq_signals, signo);
338 si->ksi_signo = signo;
343 sigqueue_take(ksiginfo_t *ksi)
349 if (ksi == NULL || (sq = ksi->ksi_sigq) == NULL)
353 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
354 ksi->ksi_sigq = NULL;
355 if (!(ksi->ksi_flags & KSI_EXT) && p != NULL)
358 for (kp = TAILQ_FIRST(&sq->sq_list); kp != NULL;
359 kp = TAILQ_NEXT(kp, ksi_link)) {
360 if (kp->ksi_signo == ksi->ksi_signo)
363 if (kp == NULL && !SIGISMEMBER(sq->sq_kill, ksi->ksi_signo) &&
364 !SIGISMEMBER(sq->sq_ptrace, ksi->ksi_signo))
365 SIGDELSET(sq->sq_signals, ksi->ksi_signo);
369 sigqueue_add(sigqueue_t *sq, int signo, ksiginfo_t *si)
371 struct proc *p = sq->sq_proc;
372 struct ksiginfo *ksi;
375 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
378 * SIGKILL/SIGSTOP cannot be caught or masked, so take the fast path
381 if (signo == SIGKILL || signo == SIGSTOP || si == NULL) {
382 SIGADDSET(sq->sq_kill, signo);
386 /* directly insert the ksi, don't copy it */
387 if (si->ksi_flags & KSI_INS) {
388 if (si->ksi_flags & KSI_HEAD)
389 TAILQ_INSERT_HEAD(&sq->sq_list, si, ksi_link);
391 TAILQ_INSERT_TAIL(&sq->sq_list, si, ksi_link);
396 if (__predict_false(ksiginfo_zone == NULL)) {
397 SIGADDSET(sq->sq_kill, signo);
401 if (p != NULL && p->p_pendingcnt >= max_pending_per_proc) {
404 } else if ((ksi = ksiginfo_alloc(0)) == NULL) {
410 ksiginfo_copy(si, ksi);
411 ksi->ksi_signo = signo;
412 if (si->ksi_flags & KSI_HEAD)
413 TAILQ_INSERT_HEAD(&sq->sq_list, ksi, ksi_link);
415 TAILQ_INSERT_TAIL(&sq->sq_list, ksi, ksi_link);
420 if ((si->ksi_flags & KSI_PTRACE) != 0) {
421 SIGADDSET(sq->sq_ptrace, signo);
424 } else if ((si->ksi_flags & KSI_TRAP) != 0 ||
425 (si->ksi_flags & KSI_SIGQ) == 0) {
426 SIGADDSET(sq->sq_kill, signo);
434 SIGADDSET(sq->sq_signals, signo);
439 sigqueue_flush(sigqueue_t *sq)
441 struct proc *p = sq->sq_proc;
444 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
447 PROC_LOCK_ASSERT(p, MA_OWNED);
449 while ((ksi = TAILQ_FIRST(&sq->sq_list)) != NULL) {
450 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
451 ksi->ksi_sigq = NULL;
452 if (ksiginfo_tryfree(ksi) && p != NULL)
456 SIGEMPTYSET(sq->sq_signals);
457 SIGEMPTYSET(sq->sq_kill);
458 SIGEMPTYSET(sq->sq_ptrace);
462 sigqueue_move_set(sigqueue_t *src, sigqueue_t *dst, const sigset_t *set)
465 struct proc *p1, *p2;
466 ksiginfo_t *ksi, *next;
468 KASSERT(src->sq_flags & SQ_INIT, ("src sigqueue not inited"));
469 KASSERT(dst->sq_flags & SQ_INIT, ("dst sigqueue not inited"));
472 /* Move siginfo to target list */
473 TAILQ_FOREACH_SAFE(ksi, &src->sq_list, ksi_link, next) {
474 if (SIGISMEMBER(*set, ksi->ksi_signo)) {
475 TAILQ_REMOVE(&src->sq_list, ksi, ksi_link);
478 TAILQ_INSERT_TAIL(&dst->sq_list, ksi, ksi_link);
485 /* Move pending bits to target list */
487 SIGSETAND(tmp, *set);
488 SIGSETOR(dst->sq_kill, tmp);
489 SIGSETNAND(src->sq_kill, tmp);
491 tmp = src->sq_ptrace;
492 SIGSETAND(tmp, *set);
493 SIGSETOR(dst->sq_ptrace, tmp);
494 SIGSETNAND(src->sq_ptrace, tmp);
496 tmp = src->sq_signals;
497 SIGSETAND(tmp, *set);
498 SIGSETOR(dst->sq_signals, tmp);
499 SIGSETNAND(src->sq_signals, tmp);
504 sigqueue_move(sigqueue_t *src, sigqueue_t *dst, int signo)
509 SIGADDSET(set, signo);
510 sigqueue_move_set(src, dst, &set);
515 sigqueue_delete_set(sigqueue_t *sq, const sigset_t *set)
517 struct proc *p = sq->sq_proc;
518 ksiginfo_t *ksi, *next;
520 KASSERT(sq->sq_flags & SQ_INIT, ("src sigqueue not inited"));
522 /* Remove siginfo queue */
523 TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
524 if (SIGISMEMBER(*set, ksi->ksi_signo)) {
525 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
526 ksi->ksi_sigq = NULL;
527 if (ksiginfo_tryfree(ksi) && p != NULL)
531 SIGSETNAND(sq->sq_kill, *set);
532 SIGSETNAND(sq->sq_ptrace, *set);
533 SIGSETNAND(sq->sq_signals, *set);
537 sigqueue_delete(sigqueue_t *sq, int signo)
542 SIGADDSET(set, signo);
543 sigqueue_delete_set(sq, &set);
546 /* Remove a set of signals for a process */
548 sigqueue_delete_set_proc(struct proc *p, const sigset_t *set)
553 PROC_LOCK_ASSERT(p, MA_OWNED);
555 sigqueue_init(&worklist, NULL);
556 sigqueue_move_set(&p->p_sigqueue, &worklist, set);
558 FOREACH_THREAD_IN_PROC(p, td0)
559 sigqueue_move_set(&td0->td_sigqueue, &worklist, set);
561 sigqueue_flush(&worklist);
565 sigqueue_delete_proc(struct proc *p, int signo)
570 SIGADDSET(set, signo);
571 sigqueue_delete_set_proc(p, &set);
575 sigqueue_delete_stopmask_proc(struct proc *p)
580 SIGADDSET(set, SIGSTOP);
581 SIGADDSET(set, SIGTSTP);
582 SIGADDSET(set, SIGTTIN);
583 SIGADDSET(set, SIGTTOU);
584 sigqueue_delete_set_proc(p, &set);
588 * Determine signal that should be delivered to thread td, the current
589 * thread, 0 if none. If there is a pending stop signal with default
590 * action, the process stops in issignal().
593 cursig(struct thread *td)
595 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
596 mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED);
597 THREAD_LOCK_ASSERT(td, MA_NOTOWNED);
598 return (SIGPENDING(td) ? issignal(td) : 0);
602 * Arrange for ast() to handle unmasked pending signals on return to user
603 * mode. This must be called whenever a signal is added to td_sigqueue or
604 * unmasked in td_sigmask.
607 signotify(struct thread *td)
610 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
612 if (SIGPENDING(td)) {
614 td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING;
620 * Returns 1 (true) if altstack is configured for the thread, and the
621 * passed stack bottom address falls into the altstack range. Handles
622 * the 43 compat special case where the alt stack size is zero.
625 sigonstack(size_t sp)
630 if ((td->td_pflags & TDP_ALTSTACK) == 0)
632 #if defined(COMPAT_43)
633 if (SV_PROC_FLAG(td->td_proc, SV_AOUT) && td->td_sigstk.ss_size == 0)
634 return ((td->td_sigstk.ss_flags & SS_ONSTACK) != 0);
636 return (sp >= (size_t)td->td_sigstk.ss_sp &&
637 sp < td->td_sigstk.ss_size + (size_t)td->td_sigstk.ss_sp);
644 if (sig > 0 && sig < nitems(sigproptbl))
645 return (sigproptbl[sig]);
650 sig_ffs(sigset_t *set)
654 for (i = 0; i < _SIG_WORDS; i++)
656 return (ffs(set->__bits[i]) + (i * 32));
661 sigact_flag_test(const struct sigaction *act, int flag)
665 * SA_SIGINFO is reset when signal disposition is set to
666 * ignore or default. Other flags are kept according to user
669 return ((act->sa_flags & flag) != 0 && (flag != SA_SIGINFO ||
670 ((__sighandler_t *)act->sa_sigaction != SIG_IGN &&
671 (__sighandler_t *)act->sa_sigaction != SIG_DFL)));
681 kern_sigaction(struct thread *td, int sig, const struct sigaction *act,
682 struct sigaction *oact, int flags)
685 struct proc *p = td->td_proc;
687 if (!_SIG_VALID(sig))
689 if (act != NULL && act->sa_handler != SIG_DFL &&
690 act->sa_handler != SIG_IGN && (act->sa_flags & ~(SA_ONSTACK |
691 SA_RESTART | SA_RESETHAND | SA_NOCLDSTOP | SA_NODEFER |
692 SA_NOCLDWAIT | SA_SIGINFO)) != 0)
697 mtx_lock(&ps->ps_mtx);
699 memset(oact, 0, sizeof(*oact));
700 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)];
701 if (SIGISMEMBER(ps->ps_sigonstack, sig))
702 oact->sa_flags |= SA_ONSTACK;
703 if (!SIGISMEMBER(ps->ps_sigintr, sig))
704 oact->sa_flags |= SA_RESTART;
705 if (SIGISMEMBER(ps->ps_sigreset, sig))
706 oact->sa_flags |= SA_RESETHAND;
707 if (SIGISMEMBER(ps->ps_signodefer, sig))
708 oact->sa_flags |= SA_NODEFER;
709 if (SIGISMEMBER(ps->ps_siginfo, sig)) {
710 oact->sa_flags |= SA_SIGINFO;
712 (__siginfohandler_t *)ps->ps_sigact[_SIG_IDX(sig)];
714 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)];
715 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP)
716 oact->sa_flags |= SA_NOCLDSTOP;
717 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT)
718 oact->sa_flags |= SA_NOCLDWAIT;
721 if ((sig == SIGKILL || sig == SIGSTOP) &&
722 act->sa_handler != SIG_DFL) {
723 mtx_unlock(&ps->ps_mtx);
729 * Change setting atomically.
732 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask;
733 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]);
734 if (sigact_flag_test(act, SA_SIGINFO)) {
735 ps->ps_sigact[_SIG_IDX(sig)] =
736 (__sighandler_t *)act->sa_sigaction;
737 SIGADDSET(ps->ps_siginfo, sig);
739 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler;
740 SIGDELSET(ps->ps_siginfo, sig);
742 if (!sigact_flag_test(act, SA_RESTART))
743 SIGADDSET(ps->ps_sigintr, sig);
745 SIGDELSET(ps->ps_sigintr, sig);
746 if (sigact_flag_test(act, SA_ONSTACK))
747 SIGADDSET(ps->ps_sigonstack, sig);
749 SIGDELSET(ps->ps_sigonstack, sig);
750 if (sigact_flag_test(act, SA_RESETHAND))
751 SIGADDSET(ps->ps_sigreset, sig);
753 SIGDELSET(ps->ps_sigreset, sig);
754 if (sigact_flag_test(act, SA_NODEFER))
755 SIGADDSET(ps->ps_signodefer, sig);
757 SIGDELSET(ps->ps_signodefer, sig);
758 if (sig == SIGCHLD) {
759 if (act->sa_flags & SA_NOCLDSTOP)
760 ps->ps_flag |= PS_NOCLDSTOP;
762 ps->ps_flag &= ~PS_NOCLDSTOP;
763 if (act->sa_flags & SA_NOCLDWAIT) {
765 * Paranoia: since SA_NOCLDWAIT is implemented
766 * by reparenting the dying child to PID 1 (and
767 * trust it to reap the zombie), PID 1 itself
768 * is forbidden to set SA_NOCLDWAIT.
771 ps->ps_flag &= ~PS_NOCLDWAIT;
773 ps->ps_flag |= PS_NOCLDWAIT;
775 ps->ps_flag &= ~PS_NOCLDWAIT;
776 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
777 ps->ps_flag |= PS_CLDSIGIGN;
779 ps->ps_flag &= ~PS_CLDSIGIGN;
782 * Set bit in ps_sigignore for signals that are set to SIG_IGN,
783 * and for signals set to SIG_DFL where the default is to
784 * ignore. However, don't put SIGCONT in ps_sigignore, as we
785 * have to restart the process.
787 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
788 (sigprop(sig) & SIGPROP_IGNORE &&
789 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
790 /* never to be seen again */
791 sigqueue_delete_proc(p, sig);
793 /* easier in psignal */
794 SIGADDSET(ps->ps_sigignore, sig);
795 SIGDELSET(ps->ps_sigcatch, sig);
797 SIGDELSET(ps->ps_sigignore, sig);
798 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)
799 SIGDELSET(ps->ps_sigcatch, sig);
801 SIGADDSET(ps->ps_sigcatch, sig);
803 #ifdef COMPAT_FREEBSD4
804 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
805 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
806 (flags & KSA_FREEBSD4) == 0)
807 SIGDELSET(ps->ps_freebsd4, sig);
809 SIGADDSET(ps->ps_freebsd4, sig);
812 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
813 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
814 (flags & KSA_OSIGSET) == 0)
815 SIGDELSET(ps->ps_osigset, sig);
817 SIGADDSET(ps->ps_osigset, sig);
820 mtx_unlock(&ps->ps_mtx);
825 #ifndef _SYS_SYSPROTO_H_
826 struct sigaction_args {
828 struct sigaction *act;
829 struct sigaction *oact;
833 sys_sigaction(struct thread *td, struct sigaction_args *uap)
835 struct sigaction act, oact;
836 struct sigaction *actp, *oactp;
839 actp = (uap->act != NULL) ? &act : NULL;
840 oactp = (uap->oact != NULL) ? &oact : NULL;
842 error = copyin(uap->act, actp, sizeof(act));
846 error = kern_sigaction(td, uap->sig, actp, oactp, 0);
848 error = copyout(oactp, uap->oact, sizeof(oact));
852 #ifdef COMPAT_FREEBSD4
853 #ifndef _SYS_SYSPROTO_H_
854 struct freebsd4_sigaction_args {
856 struct sigaction *act;
857 struct sigaction *oact;
861 freebsd4_sigaction(struct thread *td, struct freebsd4_sigaction_args *uap)
863 struct sigaction act, oact;
864 struct sigaction *actp, *oactp;
868 actp = (uap->act != NULL) ? &act : NULL;
869 oactp = (uap->oact != NULL) ? &oact : NULL;
871 error = copyin(uap->act, actp, sizeof(act));
875 error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4);
877 error = copyout(oactp, uap->oact, sizeof(oact));
880 #endif /* COMAPT_FREEBSD4 */
882 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
883 #ifndef _SYS_SYSPROTO_H_
884 struct osigaction_args {
886 struct osigaction *nsa;
887 struct osigaction *osa;
891 osigaction(struct thread *td, struct osigaction_args *uap)
893 struct osigaction sa;
894 struct sigaction nsa, osa;
895 struct sigaction *nsap, *osap;
898 if (uap->signum <= 0 || uap->signum >= ONSIG)
901 nsap = (uap->nsa != NULL) ? &nsa : NULL;
902 osap = (uap->osa != NULL) ? &osa : NULL;
905 error = copyin(uap->nsa, &sa, sizeof(sa));
908 nsap->sa_handler = sa.sa_handler;
909 nsap->sa_flags = sa.sa_flags;
910 OSIG2SIG(sa.sa_mask, nsap->sa_mask);
912 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
913 if (osap && !error) {
914 sa.sa_handler = osap->sa_handler;
915 sa.sa_flags = osap->sa_flags;
916 SIG2OSIG(osap->sa_mask, sa.sa_mask);
917 error = copyout(&sa, uap->osa, sizeof(sa));
922 #if !defined(__i386__)
923 /* Avoid replicating the same stub everywhere */
925 osigreturn(struct thread *td, struct osigreturn_args *uap)
928 return (nosys(td, (struct nosys_args *)uap));
931 #endif /* COMPAT_43 */
934 * Initialize signal state for process 0;
935 * set to ignore signals that are ignored by default.
938 siginit(struct proc *p)
945 mtx_lock(&ps->ps_mtx);
946 for (i = 1; i <= NSIG; i++) {
947 if (sigprop(i) & SIGPROP_IGNORE && i != SIGCONT) {
948 SIGADDSET(ps->ps_sigignore, i);
951 mtx_unlock(&ps->ps_mtx);
956 * Reset specified signal to the default disposition.
959 sigdflt(struct sigacts *ps, int sig)
962 mtx_assert(&ps->ps_mtx, MA_OWNED);
963 SIGDELSET(ps->ps_sigcatch, sig);
964 if ((sigprop(sig) & SIGPROP_IGNORE) != 0 && sig != SIGCONT)
965 SIGADDSET(ps->ps_sigignore, sig);
966 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
967 SIGDELSET(ps->ps_siginfo, sig);
971 * Reset signals for an exec of the specified process.
974 execsigs(struct proc *p)
982 * Reset caught signals. Held signals remain held
983 * through td_sigmask (unless they were caught,
984 * and are now ignored by default).
986 PROC_LOCK_ASSERT(p, MA_OWNED);
988 mtx_lock(&ps->ps_mtx);
992 * As CloudABI processes cannot modify signal handlers, fully
993 * reset all signals to their default behavior. Do ignore
994 * SIGPIPE, as it would otherwise be impossible to recover from
995 * writes to broken pipes and sockets.
997 if (SV_PROC_ABI(p) == SV_ABI_CLOUDABI) {
998 osigignore = ps->ps_sigignore;
999 while (SIGNOTEMPTY(osigignore)) {
1000 sig = sig_ffs(&osigignore);
1001 SIGDELSET(osigignore, sig);
1005 SIGADDSET(ps->ps_sigignore, SIGPIPE);
1009 * Reset stack state to the user stack.
1010 * Clear set of signals caught on the signal stack.
1013 MPASS(td->td_proc == p);
1014 td->td_sigstk.ss_flags = SS_DISABLE;
1015 td->td_sigstk.ss_size = 0;
1016 td->td_sigstk.ss_sp = 0;
1017 td->td_pflags &= ~TDP_ALTSTACK;
1019 * Reset no zombies if child dies flag as Solaris does.
1021 ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN);
1022 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
1023 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL;
1024 mtx_unlock(&ps->ps_mtx);
1028 * kern_sigprocmask()
1030 * Manipulate signal mask.
1033 kern_sigprocmask(struct thread *td, int how, sigset_t *set, sigset_t *oset,
1036 sigset_t new_block, oset1;
1041 if ((flags & SIGPROCMASK_PROC_LOCKED) != 0)
1042 PROC_LOCK_ASSERT(p, MA_OWNED);
1045 mtx_assert(&p->p_sigacts->ps_mtx, (flags & SIGPROCMASK_PS_LOCKED) != 0
1046 ? MA_OWNED : MA_NOTOWNED);
1048 *oset = td->td_sigmask;
1055 oset1 = td->td_sigmask;
1056 SIGSETOR(td->td_sigmask, *set);
1057 new_block = td->td_sigmask;
1058 SIGSETNAND(new_block, oset1);
1061 SIGSETNAND(td->td_sigmask, *set);
1066 oset1 = td->td_sigmask;
1067 if (flags & SIGPROCMASK_OLD)
1068 SIGSETLO(td->td_sigmask, *set);
1070 td->td_sigmask = *set;
1071 new_block = td->td_sigmask;
1072 SIGSETNAND(new_block, oset1);
1081 * The new_block set contains signals that were not previously
1082 * blocked, but are blocked now.
1084 * In case we block any signal that was not previously blocked
1085 * for td, and process has the signal pending, try to schedule
1086 * signal delivery to some thread that does not block the
1087 * signal, possibly waking it up.
1089 if (p->p_numthreads != 1)
1090 reschedule_signals(p, new_block, flags);
1094 if (!(flags & SIGPROCMASK_PROC_LOCKED))
1099 #ifndef _SYS_SYSPROTO_H_
1100 struct sigprocmask_args {
1102 const sigset_t *set;
1107 sys_sigprocmask(struct thread *td, struct sigprocmask_args *uap)
1110 sigset_t *setp, *osetp;
1113 setp = (uap->set != NULL) ? &set : NULL;
1114 osetp = (uap->oset != NULL) ? &oset : NULL;
1116 error = copyin(uap->set, setp, sizeof(set));
1120 error = kern_sigprocmask(td, uap->how, setp, osetp, 0);
1121 if (osetp && !error) {
1122 error = copyout(osetp, uap->oset, sizeof(oset));
1127 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1128 #ifndef _SYS_SYSPROTO_H_
1129 struct osigprocmask_args {
1135 osigprocmask(struct thread *td, struct osigprocmask_args *uap)
1140 OSIG2SIG(uap->mask, set);
1141 error = kern_sigprocmask(td, uap->how, &set, &oset, 1);
1142 SIG2OSIG(oset, td->td_retval[0]);
1145 #endif /* COMPAT_43 */
1148 sys_sigwait(struct thread *td, struct sigwait_args *uap)
1154 error = copyin(uap->set, &set, sizeof(set));
1156 td->td_retval[0] = error;
1160 error = kern_sigtimedwait(td, set, &ksi, NULL);
1162 if (error == EINTR && td->td_proc->p_osrel < P_OSREL_SIGWAIT)
1164 if (error == ERESTART)
1166 td->td_retval[0] = error;
1170 error = copyout(&ksi.ksi_signo, uap->sig, sizeof(ksi.ksi_signo));
1171 td->td_retval[0] = error;
1176 sys_sigtimedwait(struct thread *td, struct sigtimedwait_args *uap)
1179 struct timespec *timeout;
1185 error = copyin(uap->timeout, &ts, sizeof(ts));
1193 error = copyin(uap->set, &set, sizeof(set));
1197 error = kern_sigtimedwait(td, set, &ksi, timeout);
1202 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
1205 td->td_retval[0] = ksi.ksi_signo;
1210 sys_sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap)
1216 error = copyin(uap->set, &set, sizeof(set));
1220 error = kern_sigtimedwait(td, set, &ksi, NULL);
1225 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
1228 td->td_retval[0] = ksi.ksi_signo;
1233 proc_td_siginfo_capture(struct thread *td, siginfo_t *si)
1237 FOREACH_THREAD_IN_PROC(td->td_proc, thr) {
1241 thr->td_si.si_signo = 0;
1246 kern_sigtimedwait(struct thread *td, sigset_t waitset, ksiginfo_t *ksi,
1247 struct timespec *timeout)
1250 sigset_t saved_mask, new_block;
1252 int error, sig, timo, timevalid = 0;
1253 struct timespec rts, ets, ts;
1261 if (timeout != NULL) {
1262 if (timeout->tv_nsec >= 0 && timeout->tv_nsec < 1000000000) {
1264 getnanouptime(&rts);
1265 timespecadd(&rts, timeout, &ets);
1269 /* Some signals can not be waited for. */
1270 SIG_CANTMASK(waitset);
1273 saved_mask = td->td_sigmask;
1274 SIGSETNAND(td->td_sigmask, waitset);
1276 mtx_lock(&ps->ps_mtx);
1278 mtx_unlock(&ps->ps_mtx);
1279 KASSERT(sig >= 0, ("sig %d", sig));
1280 if (sig != 0 && SIGISMEMBER(waitset, sig)) {
1281 if (sigqueue_get(&td->td_sigqueue, sig, ksi) != 0 ||
1282 sigqueue_get(&p->p_sigqueue, sig, ksi) != 0) {
1292 * POSIX says this must be checked after looking for pending
1295 if (timeout != NULL) {
1300 getnanouptime(&rts);
1301 if (timespeccmp(&rts, &ets, >=)) {
1305 timespecsub(&ets, &rts, &ts);
1306 TIMESPEC_TO_TIMEVAL(&tv, &ts);
1312 error = msleep(ps, &p->p_mtx, PPAUSE|PCATCH, "sigwait", timo);
1314 if (timeout != NULL) {
1315 if (error == ERESTART) {
1316 /* Timeout can not be restarted. */
1318 } else if (error == EAGAIN) {
1319 /* We will calculate timeout by ourself. */
1325 new_block = saved_mask;
1326 SIGSETNAND(new_block, td->td_sigmask);
1327 td->td_sigmask = saved_mask;
1329 * Fewer signals can be delivered to us, reschedule signal
1332 if (p->p_numthreads != 1)
1333 reschedule_signals(p, new_block, 0);
1336 SDT_PROBE2(proc, , , signal__clear, sig, ksi);
1338 if (ksi->ksi_code == SI_TIMER)
1339 itimer_accept(p, ksi->ksi_timerid, ksi);
1342 if (KTRPOINT(td, KTR_PSIG)) {
1345 mtx_lock(&ps->ps_mtx);
1346 action = ps->ps_sigact[_SIG_IDX(sig)];
1347 mtx_unlock(&ps->ps_mtx);
1348 ktrpsig(sig, action, &td->td_sigmask, ksi->ksi_code);
1351 if (sig == SIGKILL) {
1352 proc_td_siginfo_capture(td, &ksi->ksi_info);
1360 #ifndef _SYS_SYSPROTO_H_
1361 struct sigpending_args {
1366 sys_sigpending(struct thread *td, struct sigpending_args *uap)
1368 struct proc *p = td->td_proc;
1372 pending = p->p_sigqueue.sq_signals;
1373 SIGSETOR(pending, td->td_sigqueue.sq_signals);
1375 return (copyout(&pending, uap->set, sizeof(sigset_t)));
1378 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1379 #ifndef _SYS_SYSPROTO_H_
1380 struct osigpending_args {
1385 osigpending(struct thread *td, struct osigpending_args *uap)
1387 struct proc *p = td->td_proc;
1391 pending = p->p_sigqueue.sq_signals;
1392 SIGSETOR(pending, td->td_sigqueue.sq_signals);
1394 SIG2OSIG(pending, td->td_retval[0]);
1397 #endif /* COMPAT_43 */
1399 #if defined(COMPAT_43)
1401 * Generalized interface signal handler, 4.3-compatible.
1403 #ifndef _SYS_SYSPROTO_H_
1404 struct osigvec_args {
1412 osigvec(struct thread *td, struct osigvec_args *uap)
1415 struct sigaction nsa, osa;
1416 struct sigaction *nsap, *osap;
1419 if (uap->signum <= 0 || uap->signum >= ONSIG)
1421 nsap = (uap->nsv != NULL) ? &nsa : NULL;
1422 osap = (uap->osv != NULL) ? &osa : NULL;
1424 error = copyin(uap->nsv, &vec, sizeof(vec));
1427 nsap->sa_handler = vec.sv_handler;
1428 OSIG2SIG(vec.sv_mask, nsap->sa_mask);
1429 nsap->sa_flags = vec.sv_flags;
1430 nsap->sa_flags ^= SA_RESTART; /* opposite of SV_INTERRUPT */
1432 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
1433 if (osap && !error) {
1434 vec.sv_handler = osap->sa_handler;
1435 SIG2OSIG(osap->sa_mask, vec.sv_mask);
1436 vec.sv_flags = osap->sa_flags;
1437 vec.sv_flags &= ~SA_NOCLDWAIT;
1438 vec.sv_flags ^= SA_RESTART;
1439 error = copyout(&vec, uap->osv, sizeof(vec));
1444 #ifndef _SYS_SYSPROTO_H_
1445 struct osigblock_args {
1450 osigblock(struct thread *td, struct osigblock_args *uap)
1454 OSIG2SIG(uap->mask, set);
1455 kern_sigprocmask(td, SIG_BLOCK, &set, &oset, 0);
1456 SIG2OSIG(oset, td->td_retval[0]);
1460 #ifndef _SYS_SYSPROTO_H_
1461 struct osigsetmask_args {
1466 osigsetmask(struct thread *td, struct osigsetmask_args *uap)
1470 OSIG2SIG(uap->mask, set);
1471 kern_sigprocmask(td, SIG_SETMASK, &set, &oset, 0);
1472 SIG2OSIG(oset, td->td_retval[0]);
1475 #endif /* COMPAT_43 */
1478 * Suspend calling thread until signal, providing mask to be set in the
1481 #ifndef _SYS_SYSPROTO_H_
1482 struct sigsuspend_args {
1483 const sigset_t *sigmask;
1488 sys_sigsuspend(struct thread *td, struct sigsuspend_args *uap)
1493 error = copyin(uap->sigmask, &mask, sizeof(mask));
1496 return (kern_sigsuspend(td, mask));
1500 kern_sigsuspend(struct thread *td, sigset_t mask)
1502 struct proc *p = td->td_proc;
1506 * When returning from sigsuspend, we want
1507 * the old mask to be restored after the
1508 * signal handler has finished. Thus, we
1509 * save it here and mark the sigacts structure
1513 kern_sigprocmask(td, SIG_SETMASK, &mask, &td->td_oldsigmask,
1514 SIGPROCMASK_PROC_LOCKED);
1515 td->td_pflags |= TDP_OLDMASK;
1518 * Process signals now. Otherwise, we can get spurious wakeup
1519 * due to signal entered process queue, but delivered to other
1520 * thread. But sigsuspend should return only on signal
1523 (p->p_sysent->sv_set_syscall_retval)(td, EINTR);
1524 for (has_sig = 0; !has_sig;) {
1525 while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause",
1528 thread_suspend_check(0);
1529 mtx_lock(&p->p_sigacts->ps_mtx);
1530 while ((sig = cursig(td)) != 0) {
1531 KASSERT(sig >= 0, ("sig %d", sig));
1532 has_sig += postsig(sig);
1534 mtx_unlock(&p->p_sigacts->ps_mtx);
1537 td->td_errno = EINTR;
1538 td->td_pflags |= TDP_NERRNO;
1539 return (EJUSTRETURN);
1542 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1544 * Compatibility sigsuspend call for old binaries. Note nonstandard calling
1545 * convention: libc stub passes mask, not pointer, to save a copyin.
1547 #ifndef _SYS_SYSPROTO_H_
1548 struct osigsuspend_args {
1554 osigsuspend(struct thread *td, struct osigsuspend_args *uap)
1558 OSIG2SIG(uap->mask, mask);
1559 return (kern_sigsuspend(td, mask));
1561 #endif /* COMPAT_43 */
1563 #if defined(COMPAT_43)
1564 #ifndef _SYS_SYSPROTO_H_
1565 struct osigstack_args {
1566 struct sigstack *nss;
1567 struct sigstack *oss;
1572 osigstack(struct thread *td, struct osigstack_args *uap)
1574 struct sigstack nss, oss;
1577 if (uap->nss != NULL) {
1578 error = copyin(uap->nss, &nss, sizeof(nss));
1582 oss.ss_sp = td->td_sigstk.ss_sp;
1583 oss.ss_onstack = sigonstack(cpu_getstack(td));
1584 if (uap->nss != NULL) {
1585 td->td_sigstk.ss_sp = nss.ss_sp;
1586 td->td_sigstk.ss_size = 0;
1587 td->td_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK;
1588 td->td_pflags |= TDP_ALTSTACK;
1590 if (uap->oss != NULL)
1591 error = copyout(&oss, uap->oss, sizeof(oss));
1595 #endif /* COMPAT_43 */
1597 #ifndef _SYS_SYSPROTO_H_
1598 struct sigaltstack_args {
1605 sys_sigaltstack(struct thread *td, struct sigaltstack_args *uap)
1610 if (uap->ss != NULL) {
1611 error = copyin(uap->ss, &ss, sizeof(ss));
1615 error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL,
1616 (uap->oss != NULL) ? &oss : NULL);
1619 if (uap->oss != NULL)
1620 error = copyout(&oss, uap->oss, sizeof(stack_t));
1625 kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss)
1627 struct proc *p = td->td_proc;
1630 oonstack = sigonstack(cpu_getstack(td));
1633 *oss = td->td_sigstk;
1634 oss->ss_flags = (td->td_pflags & TDP_ALTSTACK)
1635 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
1641 if ((ss->ss_flags & ~SS_DISABLE) != 0)
1643 if (!(ss->ss_flags & SS_DISABLE)) {
1644 if (ss->ss_size < p->p_sysent->sv_minsigstksz)
1647 td->td_sigstk = *ss;
1648 td->td_pflags |= TDP_ALTSTACK;
1650 td->td_pflags &= ~TDP_ALTSTACK;
1657 * Common code for kill process group/broadcast kill.
1658 * cp is calling process.
1661 killpg1(struct thread *td, int sig, int pgid, int all, ksiginfo_t *ksi)
1673 sx_slock(&allproc_lock);
1674 FOREACH_PROC_IN_SYSTEM(p) {
1675 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1676 p == td->td_proc || p->p_state == PRS_NEW) {
1680 err = p_cansignal(td, p, sig);
1683 pksignal(p, sig, ksi);
1686 else if (ret == ESRCH)
1690 sx_sunlock(&allproc_lock);
1692 sx_slock(&proctree_lock);
1695 * zero pgid means send to my process group.
1697 pgrp = td->td_proc->p_pgrp;
1700 pgrp = pgfind(pgid);
1702 sx_sunlock(&proctree_lock);
1706 sx_sunlock(&proctree_lock);
1707 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1709 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1710 p->p_state == PRS_NEW) {
1714 err = p_cansignal(td, p, sig);
1717 pksignal(p, sig, ksi);
1720 else if (ret == ESRCH)
1729 #ifndef _SYS_SYSPROTO_H_
1737 sys_kill(struct thread *td, struct kill_args *uap)
1744 * A process in capability mode can send signals only to himself.
1745 * The main rationale behind this is that abort(3) is implemented as
1746 * kill(getpid(), SIGABRT).
1748 if (IN_CAPABILITY_MODE(td) && uap->pid != td->td_proc->p_pid)
1751 AUDIT_ARG_SIGNUM(uap->signum);
1752 AUDIT_ARG_PID(uap->pid);
1753 if ((u_int)uap->signum > _SIG_MAXSIG)
1756 ksiginfo_init(&ksi);
1757 ksi.ksi_signo = uap->signum;
1758 ksi.ksi_code = SI_USER;
1759 ksi.ksi_pid = td->td_proc->p_pid;
1760 ksi.ksi_uid = td->td_ucred->cr_ruid;
1763 /* kill single process */
1764 if ((p = pfind_any(uap->pid)) == NULL)
1766 AUDIT_ARG_PROCESS(p);
1767 error = p_cansignal(td, p, uap->signum);
1768 if (error == 0 && uap->signum)
1769 pksignal(p, uap->signum, &ksi);
1774 case -1: /* broadcast signal */
1775 return (killpg1(td, uap->signum, 0, 1, &ksi));
1776 case 0: /* signal own process group */
1777 return (killpg1(td, uap->signum, 0, 0, &ksi));
1778 default: /* negative explicit process group */
1779 return (killpg1(td, uap->signum, -uap->pid, 0, &ksi));
1785 sys_pdkill(struct thread *td, struct pdkill_args *uap)
1790 AUDIT_ARG_SIGNUM(uap->signum);
1791 AUDIT_ARG_FD(uap->fd);
1792 if ((u_int)uap->signum > _SIG_MAXSIG)
1795 error = procdesc_find(td, uap->fd, &cap_pdkill_rights, &p);
1798 AUDIT_ARG_PROCESS(p);
1799 error = p_cansignal(td, p, uap->signum);
1800 if (error == 0 && uap->signum)
1801 kern_psignal(p, uap->signum);
1806 #if defined(COMPAT_43)
1807 #ifndef _SYS_SYSPROTO_H_
1808 struct okillpg_args {
1815 okillpg(struct thread *td, struct okillpg_args *uap)
1819 AUDIT_ARG_SIGNUM(uap->signum);
1820 AUDIT_ARG_PID(uap->pgid);
1821 if ((u_int)uap->signum > _SIG_MAXSIG)
1824 ksiginfo_init(&ksi);
1825 ksi.ksi_signo = uap->signum;
1826 ksi.ksi_code = SI_USER;
1827 ksi.ksi_pid = td->td_proc->p_pid;
1828 ksi.ksi_uid = td->td_ucred->cr_ruid;
1829 return (killpg1(td, uap->signum, uap->pgid, 0, &ksi));
1831 #endif /* COMPAT_43 */
1833 #ifndef _SYS_SYSPROTO_H_
1834 struct sigqueue_args {
1837 /* union sigval */ void *value;
1841 sys_sigqueue(struct thread *td, struct sigqueue_args *uap)
1845 sv.sival_ptr = uap->value;
1847 return (kern_sigqueue(td, uap->pid, uap->signum, &sv));
1851 kern_sigqueue(struct thread *td, pid_t pid, int signum, union sigval *value)
1857 if ((u_int)signum > _SIG_MAXSIG)
1861 * Specification says sigqueue can only send signal to
1867 if ((p = pfind_any(pid)) == NULL)
1869 error = p_cansignal(td, p, signum);
1870 if (error == 0 && signum != 0) {
1871 ksiginfo_init(&ksi);
1872 ksi.ksi_flags = KSI_SIGQ;
1873 ksi.ksi_signo = signum;
1874 ksi.ksi_code = SI_QUEUE;
1875 ksi.ksi_pid = td->td_proc->p_pid;
1876 ksi.ksi_uid = td->td_ucred->cr_ruid;
1877 ksi.ksi_value = *value;
1878 error = pksignal(p, ksi.ksi_signo, &ksi);
1885 * Send a signal to a process group.
1888 gsignal(int pgid, int sig, ksiginfo_t *ksi)
1893 sx_slock(&proctree_lock);
1894 pgrp = pgfind(pgid);
1895 sx_sunlock(&proctree_lock);
1897 pgsignal(pgrp, sig, 0, ksi);
1904 * Send a signal to a process group. If checktty is 1,
1905 * limit to members which have a controlling terminal.
1908 pgsignal(struct pgrp *pgrp, int sig, int checkctty, ksiginfo_t *ksi)
1913 PGRP_LOCK_ASSERT(pgrp, MA_OWNED);
1914 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1916 if (p->p_state == PRS_NORMAL &&
1917 (checkctty == 0 || p->p_flag & P_CONTROLT))
1918 pksignal(p, sig, ksi);
1926 * Recalculate the signal mask and reset the signal disposition after
1927 * usermode frame for delivery is formed. Should be called after
1928 * mach-specific routine, because sysent->sv_sendsig() needs correct
1929 * ps_siginfo and signal mask.
1932 postsig_done(int sig, struct thread *td, struct sigacts *ps)
1936 mtx_assert(&ps->ps_mtx, MA_OWNED);
1937 td->td_ru.ru_nsignals++;
1938 mask = ps->ps_catchmask[_SIG_IDX(sig)];
1939 if (!SIGISMEMBER(ps->ps_signodefer, sig))
1940 SIGADDSET(mask, sig);
1941 kern_sigprocmask(td, SIG_BLOCK, &mask, NULL,
1942 SIGPROCMASK_PROC_LOCKED | SIGPROCMASK_PS_LOCKED);
1943 if (SIGISMEMBER(ps->ps_sigreset, sig))
1949 * Send a signal caused by a trap to the current thread. If it will be
1950 * caught immediately, deliver it with correct code. Otherwise, post it
1954 trapsignal(struct thread *td, ksiginfo_t *ksi)
1962 sig = ksi->ksi_signo;
1963 code = ksi->ksi_code;
1964 KASSERT(_SIG_VALID(sig), ("invalid signal"));
1968 mtx_lock(&ps->ps_mtx);
1969 if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) &&
1970 !SIGISMEMBER(td->td_sigmask, sig)) {
1972 if (KTRPOINT(curthread, KTR_PSIG))
1973 ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
1974 &td->td_sigmask, code);
1976 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)],
1977 ksi, &td->td_sigmask);
1978 postsig_done(sig, td, ps);
1979 mtx_unlock(&ps->ps_mtx);
1982 * Avoid a possible infinite loop if the thread
1983 * masking the signal or process is ignoring the
1986 if (kern_forcesigexit &&
1987 (SIGISMEMBER(td->td_sigmask, sig) ||
1988 ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN)) {
1989 SIGDELSET(td->td_sigmask, sig);
1990 SIGDELSET(ps->ps_sigcatch, sig);
1991 SIGDELSET(ps->ps_sigignore, sig);
1992 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
1994 mtx_unlock(&ps->ps_mtx);
1995 p->p_sig = sig; /* XXX to verify code */
1996 tdsendsignal(p, td, sig, ksi);
2001 static struct thread *
2002 sigtd(struct proc *p, int sig, int prop)
2004 struct thread *td, *signal_td;
2006 PROC_LOCK_ASSERT(p, MA_OWNED);
2009 * Check if current thread can handle the signal without
2010 * switching context to another thread.
2012 if (curproc == p && !SIGISMEMBER(curthread->td_sigmask, sig))
2015 FOREACH_THREAD_IN_PROC(p, td) {
2016 if (!SIGISMEMBER(td->td_sigmask, sig)) {
2021 if (signal_td == NULL)
2022 signal_td = FIRST_THREAD_IN_PROC(p);
2027 * Send the signal to the process. If the signal has an action, the action
2028 * is usually performed by the target process rather than the caller; we add
2029 * the signal to the set of pending signals for the process.
2032 * o When a stop signal is sent to a sleeping process that takes the
2033 * default action, the process is stopped without awakening it.
2034 * o SIGCONT restarts stopped processes (or puts them back to sleep)
2035 * regardless of the signal action (eg, blocked or ignored).
2037 * Other ignored signals are discarded immediately.
2039 * NB: This function may be entered from the debugger via the "kill" DDB
2040 * command. There is little that can be done to mitigate the possibly messy
2041 * side effects of this unwise possibility.
2044 kern_psignal(struct proc *p, int sig)
2048 ksiginfo_init(&ksi);
2049 ksi.ksi_signo = sig;
2050 ksi.ksi_code = SI_KERNEL;
2051 (void) tdsendsignal(p, NULL, sig, &ksi);
2055 pksignal(struct proc *p, int sig, ksiginfo_t *ksi)
2058 return (tdsendsignal(p, NULL, sig, ksi));
2061 /* Utility function for finding a thread to send signal event to. */
2063 sigev_findtd(struct proc *p ,struct sigevent *sigev, struct thread **ttd)
2067 if (sigev->sigev_notify == SIGEV_THREAD_ID) {
2068 td = tdfind(sigev->sigev_notify_thread_id, p->p_pid);
2080 tdsignal(struct thread *td, int sig)
2084 ksiginfo_init(&ksi);
2085 ksi.ksi_signo = sig;
2086 ksi.ksi_code = SI_KERNEL;
2087 (void) tdsendsignal(td->td_proc, td, sig, &ksi);
2091 tdksignal(struct thread *td, int sig, ksiginfo_t *ksi)
2094 (void) tdsendsignal(td->td_proc, td, sig, ksi);
2098 tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
2101 sigqueue_t *sigqueue;
2108 MPASS(td == NULL || p == td->td_proc);
2109 PROC_LOCK_ASSERT(p, MA_OWNED);
2111 if (!_SIG_VALID(sig))
2112 panic("%s(): invalid signal %d", __func__, sig);
2114 KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("%s: ksi on queue", __func__));
2117 * IEEE Std 1003.1-2001: return success when killing a zombie.
2119 if (p->p_state == PRS_ZOMBIE) {
2120 if (ksi && (ksi->ksi_flags & KSI_INS))
2121 ksiginfo_tryfree(ksi);
2126 KNOTE_LOCKED(p->p_klist, NOTE_SIGNAL | sig);
2127 prop = sigprop(sig);
2130 td = sigtd(p, sig, prop);
2131 sigqueue = &p->p_sigqueue;
2133 sigqueue = &td->td_sigqueue;
2135 SDT_PROBE3(proc, , , signal__send, td, p, sig);
2138 * If the signal is being ignored,
2139 * then we forget about it immediately.
2140 * (Note: we don't set SIGCONT in ps_sigignore,
2141 * and if it is set to SIG_IGN,
2142 * action will be SIG_DFL here.)
2144 mtx_lock(&ps->ps_mtx);
2145 if (SIGISMEMBER(ps->ps_sigignore, sig)) {
2146 SDT_PROBE3(proc, , , signal__discard, td, p, sig);
2148 mtx_unlock(&ps->ps_mtx);
2149 if (ksi && (ksi->ksi_flags & KSI_INS))
2150 ksiginfo_tryfree(ksi);
2153 if (SIGISMEMBER(td->td_sigmask, sig))
2155 else if (SIGISMEMBER(ps->ps_sigcatch, sig))
2159 if (SIGISMEMBER(ps->ps_sigintr, sig))
2163 mtx_unlock(&ps->ps_mtx);
2165 if (prop & SIGPROP_CONT)
2166 sigqueue_delete_stopmask_proc(p);
2167 else if (prop & SIGPROP_STOP) {
2169 * If sending a tty stop signal to a member of an orphaned
2170 * process group, discard the signal here if the action
2171 * is default; don't stop the process below if sleeping,
2172 * and don't clear any pending SIGCONT.
2174 if ((prop & SIGPROP_TTYSTOP) &&
2175 (p->p_pgrp->pg_jobc == 0) &&
2176 (action == SIG_DFL)) {
2177 if (ksi && (ksi->ksi_flags & KSI_INS))
2178 ksiginfo_tryfree(ksi);
2181 sigqueue_delete_proc(p, SIGCONT);
2182 if (p->p_flag & P_CONTINUED) {
2183 p->p_flag &= ~P_CONTINUED;
2184 PROC_LOCK(p->p_pptr);
2185 sigqueue_take(p->p_ksi);
2186 PROC_UNLOCK(p->p_pptr);
2190 ret = sigqueue_add(sigqueue, sig, ksi);
2195 * Defer further processing for signals which are held,
2196 * except that stopped processes must be continued by SIGCONT.
2198 if (action == SIG_HOLD &&
2199 !((prop & SIGPROP_CONT) && (p->p_flag & P_STOPPED_SIG)))
2202 /* SIGKILL: Remove procfs STOPEVENTs. */
2203 if (sig == SIGKILL) {
2204 /* from procfs_ioctl.c: PIOCBIC */
2206 /* from procfs_ioctl.c: PIOCCONT */
2211 * Some signals have a process-wide effect and a per-thread
2212 * component. Most processing occurs when the process next
2213 * tries to cross the user boundary, however there are some
2214 * times when processing needs to be done immediately, such as
2215 * waking up threads so that they can cross the user boundary.
2216 * We try to do the per-process part here.
2218 if (P_SHOULDSTOP(p)) {
2219 KASSERT(!(p->p_flag & P_WEXIT),
2220 ("signal to stopped but exiting process"));
2221 if (sig == SIGKILL) {
2223 * If traced process is already stopped,
2224 * then no further action is necessary.
2226 if (p->p_flag & P_TRACED)
2229 * SIGKILL sets process running.
2230 * It will die elsewhere.
2231 * All threads must be restarted.
2233 p->p_flag &= ~P_STOPPED_SIG;
2237 if (prop & SIGPROP_CONT) {
2239 * If traced process is already stopped,
2240 * then no further action is necessary.
2242 if (p->p_flag & P_TRACED)
2245 * If SIGCONT is default (or ignored), we continue the
2246 * process but don't leave the signal in sigqueue as
2247 * it has no further action. If SIGCONT is held, we
2248 * continue the process and leave the signal in
2249 * sigqueue. If the process catches SIGCONT, let it
2250 * handle the signal itself. If it isn't waiting on
2251 * an event, it goes back to run state.
2252 * Otherwise, process goes back to sleep state.
2254 p->p_flag &= ~P_STOPPED_SIG;
2256 if (p->p_numthreads == p->p_suspcount) {
2258 p->p_flag |= P_CONTINUED;
2259 p->p_xsig = SIGCONT;
2260 PROC_LOCK(p->p_pptr);
2261 childproc_continued(p);
2262 PROC_UNLOCK(p->p_pptr);
2265 if (action == SIG_DFL) {
2266 thread_unsuspend(p);
2268 sigqueue_delete(sigqueue, sig);
2271 if (action == SIG_CATCH) {
2273 * The process wants to catch it so it needs
2274 * to run at least one thread, but which one?
2280 * The signal is not ignored or caught.
2282 thread_unsuspend(p);
2287 if (prop & SIGPROP_STOP) {
2289 * If traced process is already stopped,
2290 * then no further action is necessary.
2292 if (p->p_flag & P_TRACED)
2295 * Already stopped, don't need to stop again
2296 * (If we did the shell could get confused).
2297 * Just make sure the signal STOP bit set.
2299 p->p_flag |= P_STOPPED_SIG;
2300 sigqueue_delete(sigqueue, sig);
2305 * All other kinds of signals:
2306 * If a thread is sleeping interruptibly, simulate a
2307 * wakeup so that when it is continued it will be made
2308 * runnable and can look at the signal. However, don't make
2309 * the PROCESS runnable, leave it stopped.
2310 * It may run a bit until it hits a thread_suspend_check().
2315 if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR))
2316 wakeup_swapper = sleepq_abort(td, intrval);
2323 * Mutexes are short lived. Threads waiting on them will
2324 * hit thread_suspend_check() soon.
2326 } else if (p->p_state == PRS_NORMAL) {
2327 if (p->p_flag & P_TRACED || action == SIG_CATCH) {
2328 tdsigwakeup(td, sig, action, intrval);
2332 MPASS(action == SIG_DFL);
2334 if (prop & SIGPROP_STOP) {
2335 if (p->p_flag & (P_PPWAIT|P_WEXIT))
2337 p->p_flag |= P_STOPPED_SIG;
2340 wakeup_swapper = sig_suspend_threads(td, p, 1);
2341 if (p->p_numthreads == p->p_suspcount) {
2343 * only thread sending signal to another
2344 * process can reach here, if thread is sending
2345 * signal to its process, because thread does
2346 * not suspend itself here, p_numthreads
2347 * should never be equal to p_suspcount.
2351 sigqueue_delete_proc(p, p->p_xsig);
2359 /* Not in "NORMAL" state. discard the signal. */
2360 sigqueue_delete(sigqueue, sig);
2365 * The process is not stopped so we need to apply the signal to all the
2369 tdsigwakeup(td, sig, action, intrval);
2371 thread_unsuspend(p);
2374 /* If we jump here, proc slock should not be owned. */
2375 PROC_SLOCK_ASSERT(p, MA_NOTOWNED);
2380 * The force of a signal has been directed against a single
2381 * thread. We need to see what we can do about knocking it
2382 * out of any sleep it may be in etc.
2385 tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
2387 struct proc *p = td->td_proc;
2392 PROC_LOCK_ASSERT(p, MA_OWNED);
2393 prop = sigprop(sig);
2398 * Bring the priority of a thread up if we want it to get
2399 * killed in this lifetime. Be careful to avoid bumping the
2400 * priority of the idle thread, since we still allow to signal
2403 if (action == SIG_DFL && (prop & SIGPROP_KILL) != 0 &&
2404 td->td_priority > PUSER && !TD_IS_IDLETHREAD(td))
2405 sched_prio(td, PUSER);
2406 if (TD_ON_SLEEPQ(td)) {
2408 * If thread is sleeping uninterruptibly
2409 * we can't interrupt the sleep... the signal will
2410 * be noticed when the process returns through
2411 * trap() or syscall().
2413 if ((td->td_flags & TDF_SINTR) == 0)
2416 * If SIGCONT is default (or ignored) and process is
2417 * asleep, we are finished; the process should not
2420 if ((prop & SIGPROP_CONT) && action == SIG_DFL) {
2423 sigqueue_delete(&p->p_sigqueue, sig);
2425 * It may be on either list in this state.
2426 * Remove from both for now.
2428 sigqueue_delete(&td->td_sigqueue, sig);
2433 * Don't awaken a sleeping thread for SIGSTOP if the
2434 * STOP signal is deferred.
2436 if ((prop & SIGPROP_STOP) != 0 && (td->td_flags & (TDF_SBDRY |
2437 TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY)
2441 * Give low priority threads a better chance to run.
2443 if (td->td_priority > PUSER && !TD_IS_IDLETHREAD(td))
2444 sched_prio(td, PUSER);
2446 wakeup_swapper = sleepq_abort(td, intrval);
2449 * Other states do nothing with the signal immediately,
2450 * other than kicking ourselves if we are running.
2451 * It will either never be noticed, or noticed very soon.
2454 if (TD_IS_RUNNING(td) && td != curthread)
2466 sig_suspend_threads(struct thread *td, struct proc *p, int sending)
2471 PROC_LOCK_ASSERT(p, MA_OWNED);
2472 PROC_SLOCK_ASSERT(p, MA_OWNED);
2473 MPASS(sending || td == curthread);
2476 FOREACH_THREAD_IN_PROC(p, td2) {
2478 td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
2479 if ((TD_IS_SLEEPING(td2) || TD_IS_SWAPPED(td2)) &&
2480 (td2->td_flags & TDF_SINTR)) {
2481 if (td2->td_flags & TDF_SBDRY) {
2483 * Once a thread is asleep with
2484 * TDF_SBDRY and without TDF_SERESTART
2485 * or TDF_SEINTR set, it should never
2486 * become suspended due to this check.
2488 KASSERT(!TD_IS_SUSPENDED(td2),
2489 ("thread with deferred stops suspended"));
2490 if (TD_SBDRY_INTR(td2))
2491 wakeup_swapper |= sleepq_abort(td2,
2492 TD_SBDRY_ERRNO(td2));
2493 } else if (!TD_IS_SUSPENDED(td2)) {
2494 thread_suspend_one(td2);
2496 } else if (!TD_IS_SUSPENDED(td2)) {
2497 if (sending || td != td2)
2498 td2->td_flags |= TDF_ASTPENDING;
2500 if (TD_IS_RUNNING(td2) && td2 != td)
2501 forward_signal(td2);
2506 return (wakeup_swapper);
2510 * Stop the process for an event deemed interesting to the debugger. If si is
2511 * non-NULL, this is a signal exchange; the new signal requested by the
2512 * debugger will be returned for handling. If si is NULL, this is some other
2513 * type of interesting event. The debugger may request a signal be delivered in
2514 * that case as well, however it will be deferred until it can be handled.
2517 ptracestop(struct thread *td, int sig, ksiginfo_t *si)
2519 struct proc *p = td->td_proc;
2524 PROC_LOCK_ASSERT(p, MA_OWNED);
2525 KASSERT(!(p->p_flag & P_WEXIT), ("Stopping exiting process"));
2526 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
2527 &p->p_mtx.lock_object, "Stopping for traced signal");
2531 if (si == NULL || (si->ksi_flags & KSI_PTRACE) == 0) {
2532 td->td_dbgflags |= TDB_XSIG;
2533 CTR4(KTR_PTRACE, "ptracestop: tid %d (pid %d) flags %#x sig %d",
2534 td->td_tid, p->p_pid, td->td_dbgflags, sig);
2536 while ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_XSIG)) {
2539 * Ensure that, if we've been PT_KILLed, the
2540 * exit status reflects that. Another thread
2541 * may also be in ptracestop(), having just
2542 * received the SIGKILL, but this thread was
2543 * unsuspended first.
2545 td->td_dbgflags &= ~TDB_XSIG;
2546 td->td_xsig = SIGKILL;
2550 if (p->p_flag & P_SINGLE_EXIT &&
2551 !(td->td_dbgflags & TDB_EXIT)) {
2553 * Ignore ptrace stops except for thread exit
2554 * events when the process exits.
2556 td->td_dbgflags &= ~TDB_XSIG;
2562 * Make wait(2) work. Ensure that right after the
2563 * attach, the thread which was decided to become the
2564 * leader of attach gets reported to the waiter.
2565 * Otherwise, just avoid overwriting another thread's
2566 * assignment to p_xthread. If another thread has
2567 * already set p_xthread, the current thread will get
2568 * a chance to report itself upon the next iteration.
2570 if ((td->td_dbgflags & TDB_FSTP) != 0 ||
2571 ((p->p_flag2 & P2_PTRACE_FSTP) == 0 &&
2572 p->p_xthread == NULL)) {
2577 * If we are on sleepqueue already,
2578 * let sleepqueue code decide if it
2579 * needs to go sleep after attach.
2581 if (td->td_wchan == NULL)
2582 td->td_dbgflags &= ~TDB_FSTP;
2584 p->p_flag2 &= ~P2_PTRACE_FSTP;
2585 p->p_flag |= P_STOPPED_SIG | P_STOPPED_TRACE;
2586 sig_suspend_threads(td, p, 0);
2588 if ((td->td_dbgflags & TDB_STOPATFORK) != 0) {
2589 td->td_dbgflags &= ~TDB_STOPATFORK;
2592 thread_suspend_switch(td, p);
2593 if (p->p_xthread == td)
2594 p->p_xthread = NULL;
2595 if (!(p->p_flag & P_TRACED))
2597 if (td->td_dbgflags & TDB_SUSPEND) {
2598 if (p->p_flag & P_SINGLE_EXIT)
2606 if (si != NULL && sig == td->td_xsig) {
2607 /* Parent wants us to take the original signal unchanged. */
2608 si->ksi_flags |= KSI_HEAD;
2609 if (sigqueue_add(&td->td_sigqueue, sig, si) != 0)
2611 } else if (td->td_xsig != 0) {
2613 * If parent wants us to take a new signal, then it will leave
2614 * it in td->td_xsig; otherwise we just look for signals again.
2616 ksiginfo_init(&ksi);
2617 ksi.ksi_signo = td->td_xsig;
2618 ksi.ksi_flags |= KSI_PTRACE;
2619 prop = sigprop(td->td_xsig);
2620 td2 = sigtd(p, td->td_xsig, prop);
2621 tdsendsignal(p, td2, td->td_xsig, &ksi);
2626 return (td->td_xsig);
2630 reschedule_signals(struct proc *p, sigset_t block, int flags)
2636 PROC_LOCK_ASSERT(p, MA_OWNED);
2638 mtx_assert(&ps->ps_mtx, (flags & SIGPROCMASK_PS_LOCKED) != 0 ?
2639 MA_OWNED : MA_NOTOWNED);
2640 if (SIGISEMPTY(p->p_siglist))
2642 SIGSETAND(block, p->p_siglist);
2643 while ((sig = sig_ffs(&block)) != 0) {
2644 SIGDELSET(block, sig);
2645 td = sigtd(p, sig, 0);
2647 if (!(flags & SIGPROCMASK_PS_LOCKED))
2648 mtx_lock(&ps->ps_mtx);
2649 if (p->p_flag & P_TRACED ||
2650 (SIGISMEMBER(ps->ps_sigcatch, sig) &&
2651 !SIGISMEMBER(td->td_sigmask, sig)))
2652 tdsigwakeup(td, sig, SIG_CATCH,
2653 (SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR :
2655 if (!(flags & SIGPROCMASK_PS_LOCKED))
2656 mtx_unlock(&ps->ps_mtx);
2661 tdsigcleanup(struct thread *td)
2667 PROC_LOCK_ASSERT(p, MA_OWNED);
2669 sigqueue_flush(&td->td_sigqueue);
2670 if (p->p_numthreads == 1)
2674 * Since we cannot handle signals, notify signal post code
2675 * about this by filling the sigmask.
2677 * Also, if needed, wake up thread(s) that do not block the
2678 * same signals as the exiting thread, since the thread might
2679 * have been selected for delivery and woken up.
2681 SIGFILLSET(unblocked);
2682 SIGSETNAND(unblocked, td->td_sigmask);
2683 SIGFILLSET(td->td_sigmask);
2684 reschedule_signals(p, unblocked, 0);
2689 sigdeferstop_curr_flags(int cflags)
2692 MPASS((cflags & (TDF_SEINTR | TDF_SERESTART)) == 0 ||
2693 (cflags & TDF_SBDRY) != 0);
2694 return (cflags & (TDF_SBDRY | TDF_SEINTR | TDF_SERESTART));
2698 * Defer the delivery of SIGSTOP for the current thread, according to
2699 * the requested mode. Returns previous flags, which must be restored
2700 * by sigallowstop().
2702 * TDF_SBDRY, TDF_SEINTR, and TDF_SERESTART flags are only set and
2703 * cleared by the current thread, which allow the lock-less read-only
2707 sigdeferstop_impl(int mode)
2713 cflags = sigdeferstop_curr_flags(td->td_flags);
2715 case SIGDEFERSTOP_NOP:
2718 case SIGDEFERSTOP_OFF:
2721 case SIGDEFERSTOP_SILENT:
2722 nflags = (cflags | TDF_SBDRY) & ~(TDF_SEINTR | TDF_SERESTART);
2724 case SIGDEFERSTOP_EINTR:
2725 nflags = (cflags | TDF_SBDRY | TDF_SEINTR) & ~TDF_SERESTART;
2727 case SIGDEFERSTOP_ERESTART:
2728 nflags = (cflags | TDF_SBDRY | TDF_SERESTART) & ~TDF_SEINTR;
2731 panic("sigdeferstop: invalid mode %x", mode);
2734 if (cflags == nflags)
2735 return (SIGDEFERSTOP_VAL_NCHG);
2737 td->td_flags = (td->td_flags & ~cflags) | nflags;
2743 * Restores the STOP handling mode, typically permitting the delivery
2744 * of SIGSTOP for the current thread. This does not immediately
2745 * suspend if a stop was posted. Instead, the thread will suspend
2746 * either via ast() or a subsequent interruptible sleep.
2749 sigallowstop_impl(int prev)
2754 KASSERT(prev != SIGDEFERSTOP_VAL_NCHG, ("failed sigallowstop"));
2755 KASSERT((prev & ~(TDF_SBDRY | TDF_SEINTR | TDF_SERESTART)) == 0,
2756 ("sigallowstop: incorrect previous mode %x", prev));
2758 cflags = sigdeferstop_curr_flags(td->td_flags);
2759 if (cflags != prev) {
2761 td->td_flags = (td->td_flags & ~cflags) | prev;
2767 * If the current process has received a signal (should be caught or cause
2768 * termination, should interrupt current syscall), return the signal number.
2769 * Stop signals with default action are processed immediately, then cleared;
2770 * they aren't returned. This is checked after each entry to the system for
2771 * a syscall or trap (though this can usually be done without calling issignal
2772 * by checking the pending signal masks in cursig.) The normal call
2775 * while (sig = cursig(curthread))
2779 issignal(struct thread *td)
2783 struct sigqueue *queue;
2784 sigset_t sigpending;
2786 int prop, sig, traced;
2790 mtx_assert(&ps->ps_mtx, MA_OWNED);
2791 PROC_LOCK_ASSERT(p, MA_OWNED);
2793 traced = (p->p_flag & P_TRACED) || (p->p_stops & S_SIG);
2795 sigpending = td->td_sigqueue.sq_signals;
2796 SIGSETOR(sigpending, p->p_sigqueue.sq_signals);
2797 SIGSETNAND(sigpending, td->td_sigmask);
2799 if ((p->p_flag & P_PPWAIT) != 0 || (td->td_flags &
2800 (TDF_SBDRY | TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY)
2801 SIG_STOPSIGMASK(sigpending);
2802 if (SIGISEMPTY(sigpending)) /* no signal to send */
2804 if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED &&
2805 (p->p_flag2 & P2_PTRACE_FSTP) != 0 &&
2806 SIGISMEMBER(sigpending, SIGSTOP)) {
2808 * If debugger just attached, always consume
2809 * SIGSTOP from ptrace(PT_ATTACH) first, to
2810 * execute the debugger attach ritual in
2814 td->td_dbgflags |= TDB_FSTP;
2816 sig = sig_ffs(&sigpending);
2819 if (p->p_stops & S_SIG) {
2820 mtx_unlock(&ps->ps_mtx);
2821 stopevent(p, S_SIG, sig);
2822 mtx_lock(&ps->ps_mtx);
2826 * We should see pending but ignored signals
2827 * only if P_TRACED was on when they were posted.
2829 if (SIGISMEMBER(ps->ps_sigignore, sig) && (traced == 0)) {
2830 sigqueue_delete(&td->td_sigqueue, sig);
2831 sigqueue_delete(&p->p_sigqueue, sig);
2834 if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED) {
2836 * If traced, always stop.
2837 * Remove old signal from queue before the stop.
2838 * XXX shrug off debugger, it causes siginfo to
2841 queue = &td->td_sigqueue;
2842 ksiginfo_init(&ksi);
2843 if (sigqueue_get(queue, sig, &ksi) == 0) {
2844 queue = &p->p_sigqueue;
2845 sigqueue_get(queue, sig, &ksi);
2847 td->td_si = ksi.ksi_info;
2849 mtx_unlock(&ps->ps_mtx);
2850 sig = ptracestop(td, sig, &ksi);
2851 mtx_lock(&ps->ps_mtx);
2853 td->td_si.si_signo = 0;
2856 * Keep looking if the debugger discarded or
2857 * replaced the signal.
2863 * If the signal became masked, re-queue it.
2865 if (SIGISMEMBER(td->td_sigmask, sig)) {
2866 ksi.ksi_flags |= KSI_HEAD;
2867 sigqueue_add(&p->p_sigqueue, sig, &ksi);
2872 * If the traced bit got turned off, requeue
2873 * the signal and go back up to the top to
2874 * rescan signals. This ensures that p_sig*
2875 * and p_sigact are consistent.
2877 if ((p->p_flag & P_TRACED) == 0) {
2878 ksi.ksi_flags |= KSI_HEAD;
2879 sigqueue_add(queue, sig, &ksi);
2884 prop = sigprop(sig);
2887 * Decide whether the signal should be returned.
2888 * Return the signal's number, or fall through
2889 * to clear it from the pending mask.
2891 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) {
2893 case (intptr_t)SIG_DFL:
2895 * Don't take default actions on system processes.
2897 if (p->p_pid <= 1) {
2900 * Are you sure you want to ignore SIGSEGV
2903 printf("Process (pid %lu) got signal %d\n",
2904 (u_long)p->p_pid, sig);
2906 break; /* == ignore */
2909 * If there is a pending stop signal to process with
2910 * default action, stop here, then clear the signal.
2911 * Traced or exiting processes should ignore stops.
2912 * Additionally, a member of an orphaned process group
2913 * should ignore tty stops.
2915 if (prop & SIGPROP_STOP) {
2917 (P_TRACED | P_WEXIT | P_SINGLE_EXIT) ||
2918 (p->p_pgrp->pg_jobc == 0 &&
2919 prop & SIGPROP_TTYSTOP))
2920 break; /* == ignore */
2921 if (TD_SBDRY_INTR(td)) {
2922 KASSERT((td->td_flags & TDF_SBDRY) != 0,
2923 ("lost TDF_SBDRY"));
2926 mtx_unlock(&ps->ps_mtx);
2927 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
2928 &p->p_mtx.lock_object, "Catching SIGSTOP");
2929 sigqueue_delete(&td->td_sigqueue, sig);
2930 sigqueue_delete(&p->p_sigqueue, sig);
2931 p->p_flag |= P_STOPPED_SIG;
2934 sig_suspend_threads(td, p, 0);
2935 thread_suspend_switch(td, p);
2937 mtx_lock(&ps->ps_mtx);
2939 } else if (prop & SIGPROP_IGNORE) {
2941 * Except for SIGCONT, shouldn't get here.
2942 * Default action is to ignore; drop it.
2944 break; /* == ignore */
2949 case (intptr_t)SIG_IGN:
2951 * Masking above should prevent us ever trying
2952 * to take action on an ignored signal other
2953 * than SIGCONT, unless process is traced.
2955 if ((prop & SIGPROP_CONT) == 0 &&
2956 (p->p_flag & P_TRACED) == 0)
2957 printf("issignal\n");
2958 break; /* == ignore */
2962 * This signal has an action, let
2963 * postsig() process it.
2967 sigqueue_delete(&td->td_sigqueue, sig); /* take the signal! */
2968 sigqueue_delete(&p->p_sigqueue, sig);
2975 thread_stopped(struct proc *p)
2979 PROC_LOCK_ASSERT(p, MA_OWNED);
2980 PROC_SLOCK_ASSERT(p, MA_OWNED);
2984 if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) {
2986 p->p_flag &= ~P_WAITED;
2987 PROC_LOCK(p->p_pptr);
2988 childproc_stopped(p, (p->p_flag & P_TRACED) ?
2989 CLD_TRAPPED : CLD_STOPPED);
2990 PROC_UNLOCK(p->p_pptr);
2996 * Take the action for the specified signal
2997 * from the current set of pending signals.
3007 sigset_t returnmask;
3009 KASSERT(sig != 0, ("postsig"));
3013 PROC_LOCK_ASSERT(p, MA_OWNED);
3015 mtx_assert(&ps->ps_mtx, MA_OWNED);
3016 ksiginfo_init(&ksi);
3017 if (sigqueue_get(&td->td_sigqueue, sig, &ksi) == 0 &&
3018 sigqueue_get(&p->p_sigqueue, sig, &ksi) == 0)
3020 ksi.ksi_signo = sig;
3021 if (ksi.ksi_code == SI_TIMER)
3022 itimer_accept(p, ksi.ksi_timerid, &ksi);
3023 action = ps->ps_sigact[_SIG_IDX(sig)];
3025 if (KTRPOINT(td, KTR_PSIG))
3026 ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ?
3027 &td->td_oldsigmask : &td->td_sigmask, ksi.ksi_code);
3029 if ((p->p_stops & S_SIG) != 0) {
3030 mtx_unlock(&ps->ps_mtx);
3031 stopevent(p, S_SIG, sig);
3032 mtx_lock(&ps->ps_mtx);
3035 if (action == SIG_DFL) {
3037 * Default action, where the default is to kill
3038 * the process. (Other cases were ignored above.)
3040 mtx_unlock(&ps->ps_mtx);
3041 proc_td_siginfo_capture(td, &ksi.ksi_info);
3046 * If we get here, the signal must be caught.
3048 KASSERT(action != SIG_IGN, ("postsig action %p", action));
3049 KASSERT(!SIGISMEMBER(td->td_sigmask, sig),
3050 ("postsig action: blocked sig %d", sig));
3053 * Set the new mask value and also defer further
3054 * occurrences of this signal.
3056 * Special case: user has done a sigsuspend. Here the
3057 * current mask is not of interest, but rather the
3058 * mask from before the sigsuspend is what we want
3059 * restored after the signal processing is completed.
3061 if (td->td_pflags & TDP_OLDMASK) {
3062 returnmask = td->td_oldsigmask;
3063 td->td_pflags &= ~TDP_OLDMASK;
3065 returnmask = td->td_sigmask;
3067 if (p->p_sig == sig) {
3070 (*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask);
3071 postsig_done(sig, td, ps);
3077 proc_wkilled(struct proc *p)
3080 PROC_LOCK_ASSERT(p, MA_OWNED);
3081 if ((p->p_flag & P_WKILLED) == 0) {
3082 p->p_flag |= P_WKILLED;
3084 * Notify swapper that there is a process to swap in.
3085 * The notification is racy, at worst it would take 10
3086 * seconds for the swapper process to notice.
3088 if ((p->p_flag & (P_INMEM | P_SWAPPINGIN)) == 0)
3094 * Kill the current process for stated reason.
3097 killproc(struct proc *p, char *why)
3100 PROC_LOCK_ASSERT(p, MA_OWNED);
3101 CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)", p, p->p_pid,
3103 log(LOG_ERR, "pid %d (%s), jid %d, uid %d, was killed: %s\n",
3104 p->p_pid, p->p_comm, p->p_ucred->cr_prison->pr_id,
3105 p->p_ucred->cr_uid, why);
3107 kern_psignal(p, SIGKILL);
3111 * Force the current process to exit with the specified signal, dumping core
3112 * if appropriate. We bypass the normal tests for masked and caught signals,
3113 * allowing unrecoverable failures to terminate the process without changing
3114 * signal state. Mark the accounting record with the signal termination.
3115 * If dumping core, save the signal number for the debugger. Calls exit and
3119 sigexit(struct thread *td, int sig)
3121 struct proc *p = td->td_proc;
3123 PROC_LOCK_ASSERT(p, MA_OWNED);
3124 p->p_acflag |= AXSIG;
3126 * We must be single-threading to generate a core dump. This
3127 * ensures that the registers in the core file are up-to-date.
3128 * Also, the ELF dump handler assumes that the thread list doesn't
3129 * change out from under it.
3131 * XXX If another thread attempts to single-thread before us
3132 * (e.g. via fork()), we won't get a dump at all.
3134 if ((sigprop(sig) & SIGPROP_CORE) &&
3135 thread_single(p, SINGLE_NO_EXIT) == 0) {
3138 * Log signals which would cause core dumps
3139 * (Log as LOG_INFO to appease those who don't want
3141 * XXX : Todo, as well as euid, write out ruid too
3142 * Note that coredump() drops proc lock.
3144 if (coredump(td) == 0)
3146 if (kern_logsigexit)
3148 "pid %d (%s), jid %d, uid %d: exited on "
3149 "signal %d%s\n", p->p_pid, p->p_comm,
3150 p->p_ucred->cr_prison->pr_id,
3151 td->td_ucred->cr_uid,
3153 sig & WCOREFLAG ? " (core dumped)" : "");
3161 * Send queued SIGCHLD to parent when child process's state
3165 sigparent(struct proc *p, int reason, int status)
3167 PROC_LOCK_ASSERT(p, MA_OWNED);
3168 PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
3170 if (p->p_ksi != NULL) {
3171 p->p_ksi->ksi_signo = SIGCHLD;
3172 p->p_ksi->ksi_code = reason;
3173 p->p_ksi->ksi_status = status;
3174 p->p_ksi->ksi_pid = p->p_pid;
3175 p->p_ksi->ksi_uid = p->p_ucred->cr_ruid;
3176 if (KSI_ONQ(p->p_ksi))
3179 pksignal(p->p_pptr, SIGCHLD, p->p_ksi);
3183 childproc_jobstate(struct proc *p, int reason, int sig)
3187 PROC_LOCK_ASSERT(p, MA_OWNED);
3188 PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
3191 * Wake up parent sleeping in kern_wait(), also send
3192 * SIGCHLD to parent, but SIGCHLD does not guarantee
3193 * that parent will awake, because parent may masked
3196 p->p_pptr->p_flag |= P_STATCHILD;
3199 ps = p->p_pptr->p_sigacts;
3200 mtx_lock(&ps->ps_mtx);
3201 if ((ps->ps_flag & PS_NOCLDSTOP) == 0) {
3202 mtx_unlock(&ps->ps_mtx);
3203 sigparent(p, reason, sig);
3205 mtx_unlock(&ps->ps_mtx);
3209 childproc_stopped(struct proc *p, int reason)
3212 childproc_jobstate(p, reason, p->p_xsig);
3216 childproc_continued(struct proc *p)
3218 childproc_jobstate(p, CLD_CONTINUED, SIGCONT);
3222 childproc_exited(struct proc *p)
3226 if (WCOREDUMP(p->p_xsig)) {
3227 reason = CLD_DUMPED;
3228 status = WTERMSIG(p->p_xsig);
3229 } else if (WIFSIGNALED(p->p_xsig)) {
3230 reason = CLD_KILLED;
3231 status = WTERMSIG(p->p_xsig);
3233 reason = CLD_EXITED;
3234 status = p->p_xexit;
3237 * XXX avoid calling wakeup(p->p_pptr), the work is
3240 sigparent(p, reason, status);
3243 #define MAX_NUM_CORE_FILES 100000
3244 #ifndef NUM_CORE_FILES
3245 #define NUM_CORE_FILES 5
3247 CTASSERT(NUM_CORE_FILES >= 0 && NUM_CORE_FILES <= MAX_NUM_CORE_FILES);
3248 static int num_cores = NUM_CORE_FILES;
3251 sysctl_debug_num_cores_check (SYSCTL_HANDLER_ARGS)
3256 new_val = num_cores;
3257 error = sysctl_handle_int(oidp, &new_val, 0, req);
3258 if (error != 0 || req->newptr == NULL)
3260 if (new_val > MAX_NUM_CORE_FILES)
3261 new_val = MAX_NUM_CORE_FILES;
3264 num_cores = new_val;
3267 SYSCTL_PROC(_debug, OID_AUTO, ncores, CTLTYPE_INT|CTLFLAG_RW,
3268 0, sizeof(int), sysctl_debug_num_cores_check, "I",
3269 "Maximum number of generated process corefiles while using index format");
3271 #define GZIP_SUFFIX ".gz"
3272 #define ZSTD_SUFFIX ".zst"
3274 int compress_user_cores = 0;
3277 sysctl_compress_user_cores(SYSCTL_HANDLER_ARGS)
3281 val = compress_user_cores;
3282 error = sysctl_handle_int(oidp, &val, 0, req);
3283 if (error != 0 || req->newptr == NULL)
3285 if (val != 0 && !compressor_avail(val))
3287 compress_user_cores = val;
3290 SYSCTL_PROC(_kern, OID_AUTO, compress_user_cores, CTLTYPE_INT | CTLFLAG_RWTUN,
3291 0, sizeof(int), sysctl_compress_user_cores, "I",
3292 "Enable compression of user corefiles ("
3293 __XSTRING(COMPRESS_GZIP) " = gzip, "
3294 __XSTRING(COMPRESS_ZSTD) " = zstd)");
3296 int compress_user_cores_level = 6;
3297 SYSCTL_INT(_kern, OID_AUTO, compress_user_cores_level, CTLFLAG_RWTUN,
3298 &compress_user_cores_level, 0,
3299 "Corefile compression level");
3302 * Protect the access to corefilename[] by allproc_lock.
3304 #define corefilename_lock allproc_lock
3306 static char corefilename[MAXPATHLEN] = {"%N.core"};
3307 TUNABLE_STR("kern.corefile", corefilename, sizeof(corefilename));
3310 sysctl_kern_corefile(SYSCTL_HANDLER_ARGS)
3314 sx_xlock(&corefilename_lock);
3315 error = sysctl_handle_string(oidp, corefilename, sizeof(corefilename),
3317 sx_xunlock(&corefilename_lock);
3321 SYSCTL_PROC(_kern, OID_AUTO, corefile, CTLTYPE_STRING | CTLFLAG_RW |
3322 CTLFLAG_MPSAFE, 0, 0, sysctl_kern_corefile, "A",
3323 "Process corefile name format string");
3326 vnode_close_locked(struct thread *td, struct vnode *vp)
3330 vn_close(vp, FWRITE, td->td_ucred, td);
3334 * If the core format has a %I in it, then we need to check
3335 * for existing corefiles before defining a name.
3336 * To do this we iterate over 0..ncores to find a
3337 * non-existing core file name to use. If all core files are
3338 * already used we choose the oldest one.
3341 corefile_open_last(struct thread *td, char *name, int indexpos,
3342 int indexlen, int ncores, struct vnode **vpp)
3344 struct vnode *oldvp, *nextvp, *vp;
3346 struct nameidata nd;
3347 int error, i, flags, oflags, cmode;
3349 struct timespec lasttime;
3351 nextvp = oldvp = NULL;
3352 cmode = S_IRUSR | S_IWUSR;
3353 oflags = VN_OPEN_NOAUDIT | VN_OPEN_NAMECACHE |
3354 (capmode_coredump ? VN_OPEN_NOCAPCHECK : 0);
3356 for (i = 0; i < ncores; i++) {
3357 flags = O_CREAT | FWRITE | O_NOFOLLOW;
3359 ch = name[indexpos + indexlen];
3360 (void)snprintf(name + indexpos, indexlen + 1, "%.*u", indexlen,
3362 name[indexpos + indexlen] = ch;
3364 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td);
3365 error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred,
3371 NDFREE(&nd, NDF_ONLY_PNBUF);
3372 if ((flags & O_CREAT) == O_CREAT) {
3377 error = VOP_GETATTR(vp, &vattr, td->td_ucred);
3379 vnode_close_locked(td, vp);
3383 if (oldvp == NULL ||
3384 lasttime.tv_sec > vattr.va_mtime.tv_sec ||
3385 (lasttime.tv_sec == vattr.va_mtime.tv_sec &&
3386 lasttime.tv_nsec >= vattr.va_mtime.tv_nsec)) {
3388 vnode_close_locked(td, oldvp);
3390 lasttime = vattr.va_mtime;
3392 vnode_close_locked(td, vp);
3396 if (oldvp != NULL) {
3397 if (nextvp == NULL) {
3398 if ((td->td_proc->p_flag & P_SUGID) != 0) {
3400 vnode_close_locked(td, oldvp);
3405 vnode_close_locked(td, oldvp);
3410 vnode_close_locked(td, oldvp);
3419 * corefile_open(comm, uid, pid, td, compress, vpp, namep)
3420 * Expand the name described in corefilename, using name, uid, and pid
3421 * and open/create core file.
3422 * corefilename is a printf-like string, with three format specifiers:
3423 * %N name of process ("name")
3424 * %P process id (pid)
3426 * For example, "%N.core" is the default; they can be disabled completely
3427 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
3428 * This is controlled by the sysctl variable kern.corefile (see above).
3431 corefile_open(const char *comm, uid_t uid, pid_t pid, struct thread *td,
3432 int compress, int signum, struct vnode **vpp, char **namep)
3435 struct nameidata nd;
3437 char *hostname, *name;
3438 int cmode, error, flags, i, indexpos, indexlen, oflags, ncores;
3441 format = corefilename;
3442 name = malloc(MAXPATHLEN, M_TEMP, M_WAITOK | M_ZERO);
3446 (void)sbuf_new(&sb, name, MAXPATHLEN, SBUF_FIXEDLEN);
3447 sx_slock(&corefilename_lock);
3448 for (i = 0; format[i] != '\0'; i++) {
3449 switch (format[i]) {
3450 case '%': /* Format character */
3452 switch (format[i]) {
3454 sbuf_putc(&sb, '%');
3456 case 'H': /* hostname */
3457 if (hostname == NULL) {
3458 hostname = malloc(MAXHOSTNAMELEN,
3461 getcredhostname(td->td_ucred, hostname,
3463 sbuf_printf(&sb, "%s", hostname);
3465 case 'I': /* autoincrementing index */
3466 if (indexpos != -1) {
3467 sbuf_printf(&sb, "%%I");
3471 indexpos = sbuf_len(&sb);
3472 sbuf_printf(&sb, "%u", ncores - 1);
3473 indexlen = sbuf_len(&sb) - indexpos;
3475 case 'N': /* process name */
3476 sbuf_printf(&sb, "%s", comm);
3478 case 'P': /* process id */
3479 sbuf_printf(&sb, "%u", pid);
3481 case 'S': /* signal number */
3482 sbuf_printf(&sb, "%i", signum);
3484 case 'U': /* user id */
3485 sbuf_printf(&sb, "%u", uid);
3489 "Unknown format character %c in "
3490 "corename `%s'\n", format[i], format);
3495 sbuf_putc(&sb, format[i]);
3499 sx_sunlock(&corefilename_lock);
3500 free(hostname, M_TEMP);
3501 if (compress == COMPRESS_GZIP)
3502 sbuf_printf(&sb, GZIP_SUFFIX);
3503 else if (compress == COMPRESS_ZSTD)
3504 sbuf_printf(&sb, ZSTD_SUFFIX);
3505 if (sbuf_error(&sb) != 0) {
3506 log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too "
3507 "long\n", (long)pid, comm, (u_long)uid);
3515 if (indexpos != -1) {
3516 error = corefile_open_last(td, name, indexpos, indexlen, ncores,
3520 "pid %d (%s), uid (%u): Path `%s' failed "
3521 "on initial open test, error = %d\n",
3522 pid, comm, uid, name, error);
3525 cmode = S_IRUSR | S_IWUSR;
3526 oflags = VN_OPEN_NOAUDIT | VN_OPEN_NAMECACHE |
3527 (capmode_coredump ? VN_OPEN_NOCAPCHECK : 0);
3528 flags = O_CREAT | FWRITE | O_NOFOLLOW;
3529 if ((td->td_proc->p_flag & P_SUGID) != 0)
3532 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td);
3533 error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred,
3537 NDFREE(&nd, NDF_ONLY_PNBUF);
3543 audit_proc_coredump(td, name, error);
3553 * Dump a process' core. The main routine does some
3554 * policy checking, and creates the name of the coredump;
3555 * then it passes on a vnode and a size limit to the process-specific
3556 * coredump routine if there is one; if there _is not_ one, it returns
3557 * ENOSYS; otherwise it returns the error from the process-specific routine.
3561 coredump(struct thread *td)
3563 struct proc *p = td->td_proc;
3564 struct ucred *cred = td->td_ucred;
3568 int error, error1, locked;
3569 char *name; /* name of corefile */
3572 char *fullpath, *freepath = NULL;
3575 PROC_LOCK_ASSERT(p, MA_OWNED);
3576 MPASS((p->p_flag & P_HADTHREADS) == 0 || p->p_singlethread == td);
3577 _STOPEVENT(p, S_CORE, 0);
3579 if (!do_coredump || (!sugid_coredump && (p->p_flag & P_SUGID) != 0) ||
3580 (p->p_flag2 & P2_NOTRACE) != 0) {
3586 * Note that the bulk of limit checking is done after
3587 * the corefile is created. The exception is if the limit
3588 * for corefiles is 0, in which case we don't bother
3589 * creating the corefile at all. This layout means that
3590 * a corefile is truncated instead of not being created,
3591 * if it is larger than the limit.
3593 limit = (off_t)lim_cur(td, RLIMIT_CORE);
3594 if (limit == 0 || racct_get_available(p, RACCT_CORE) == 0) {
3600 error = corefile_open(p->p_comm, cred->cr_uid, p->p_pid, td,
3601 compress_user_cores, p->p_sig, &vp, &name);
3606 * Don't dump to non-regular files or files with links.
3607 * Do not dump into system files. Effective user must own the corefile.
3609 if (vp->v_type != VREG || VOP_GETATTR(vp, &vattr, cred) != 0 ||
3610 vattr.va_nlink != 1 || (vp->v_vflag & VV_SYSTEM) != 0 ||
3611 vattr.va_uid != cred->cr_uid) {
3619 /* Postpone other writers, including core dumps of other processes. */
3620 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX);
3622 lf.l_whence = SEEK_SET;
3625 lf.l_type = F_WRLCK;
3626 locked = (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK) == 0);
3630 if (set_core_nodump_flag)
3631 vattr.va_flags = UF_NODUMP;
3632 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3633 VOP_SETATTR(vp, &vattr, cred);
3636 p->p_acflag |= ACORE;
3639 if (p->p_sysent->sv_coredump != NULL) {
3640 error = p->p_sysent->sv_coredump(td, vp, limit, 0);
3646 lf.l_type = F_UNLCK;
3647 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
3649 vn_rangelock_unlock(vp, rl_cookie);
3652 * Notify the userland helper that a process triggered a core dump.
3653 * This allows the helper to run an automated debugging session.
3655 if (error != 0 || coredump_devctl == 0)
3657 sb = sbuf_new_auto();
3658 if (vn_fullpath_global(td, p->p_textvp, &fullpath, &freepath) != 0)
3660 sbuf_printf(sb, "comm=\"");
3661 devctl_safe_quote_sb(sb, fullpath);
3662 free(freepath, M_TEMP);
3663 sbuf_printf(sb, "\" core=\"");
3666 * We can't lookup core file vp directly. When we're replacing a core, and
3667 * other random times, we flush the name cache, so it will fail. Instead,
3668 * if the path of the core is relative, add the current dir in front if it.
3670 if (name[0] != '/') {
3671 fullpath = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
3672 if (kern___getcwd(td, fullpath, UIO_SYSSPACE, MAXPATHLEN, MAXPATHLEN) != 0) {
3673 free(fullpath, M_TEMP);
3676 devctl_safe_quote_sb(sb, fullpath);
3677 free(fullpath, M_TEMP);
3680 devctl_safe_quote_sb(sb, name);
3681 sbuf_printf(sb, "\"");
3682 if (sbuf_finish(sb) == 0)
3683 devctl_notify("kernel", "signal", "coredump", sbuf_data(sb));
3687 error1 = vn_close(vp, FWRITE, cred, td);
3691 audit_proc_coredump(td, name, error);
3698 * Nonexistent system call-- signal process (may want to handle it). Flag
3699 * error in case process won't see signal immediately (blocked or ignored).
3701 #ifndef _SYS_SYSPROTO_H_
3708 nosys(struct thread *td, struct nosys_args *args)
3715 tdsignal(td, SIGSYS);
3717 if (kern_lognosys == 1 || kern_lognosys == 3) {
3718 uprintf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm,
3721 if (kern_lognosys == 2 || kern_lognosys == 3) {
3722 printf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm,
3729 * Send a SIGIO or SIGURG signal to a process or process group using stored
3730 * credentials rather than those of the current process.
3733 pgsigio(struct sigio **sigiop, int sig, int checkctty)
3736 struct sigio *sigio;
3738 ksiginfo_init(&ksi);
3739 ksi.ksi_signo = sig;
3740 ksi.ksi_code = SI_KERNEL;
3744 if (sigio == NULL) {
3748 if (sigio->sio_pgid > 0) {
3749 PROC_LOCK(sigio->sio_proc);
3750 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred))
3751 kern_psignal(sigio->sio_proc, sig);
3752 PROC_UNLOCK(sigio->sio_proc);
3753 } else if (sigio->sio_pgid < 0) {
3756 PGRP_LOCK(sigio->sio_pgrp);
3757 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) {
3759 if (p->p_state == PRS_NORMAL &&
3760 CANSIGIO(sigio->sio_ucred, p->p_ucred) &&
3761 (checkctty == 0 || (p->p_flag & P_CONTROLT)))
3762 kern_psignal(p, sig);
3765 PGRP_UNLOCK(sigio->sio_pgrp);
3771 filt_sigattach(struct knote *kn)
3773 struct proc *p = curproc;
3775 kn->kn_ptr.p_proc = p;
3776 kn->kn_flags |= EV_CLEAR; /* automatically set */
3778 knlist_add(p->p_klist, kn, 0);
3784 filt_sigdetach(struct knote *kn)
3786 struct proc *p = kn->kn_ptr.p_proc;
3788 knlist_remove(p->p_klist, kn, 0);
3792 * signal knotes are shared with proc knotes, so we apply a mask to
3793 * the hint in order to differentiate them from process hints. This
3794 * could be avoided by using a signal-specific knote list, but probably
3795 * isn't worth the trouble.
3798 filt_signal(struct knote *kn, long hint)
3801 if (hint & NOTE_SIGNAL) {
3802 hint &= ~NOTE_SIGNAL;
3804 if (kn->kn_id == hint)
3807 return (kn->kn_data != 0);
3815 ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO);
3816 refcount_init(&ps->ps_refcnt, 1);
3817 mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF);
3822 sigacts_free(struct sigacts *ps)
3825 if (refcount_release(&ps->ps_refcnt) == 0)
3827 mtx_destroy(&ps->ps_mtx);
3828 free(ps, M_SUBPROC);
3832 sigacts_hold(struct sigacts *ps)
3835 refcount_acquire(&ps->ps_refcnt);
3840 sigacts_copy(struct sigacts *dest, struct sigacts *src)
3843 KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest"));
3844 mtx_lock(&src->ps_mtx);
3845 bcopy(src, dest, offsetof(struct sigacts, ps_refcnt));
3846 mtx_unlock(&src->ps_mtx);
3850 sigacts_shared(struct sigacts *ps)
3853 return (ps->ps_refcnt > 1);
3857 sig_drop_caught(struct proc *p)
3863 PROC_LOCK_ASSERT(p, MA_OWNED);
3864 mtx_assert(&ps->ps_mtx, MA_OWNED);
3865 while (SIGNOTEMPTY(ps->ps_sigcatch)) {
3866 sig = sig_ffs(&ps->ps_sigcatch);
3868 if ((sigprop(sig) & SIGPROP_IGNORE) != 0)
3869 sigqueue_delete_proc(p, sig);