2 * Copyright (c) 2014 John Baldwin
3 * Copyright (c) 2014, 2016 The FreeBSD Foundation
5 * Portions of this software were developed by Konstantin Belousov
6 * under sponsorship from the FreeBSD Foundation.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/_unrhdr.h>
35 #include <sys/systm.h>
36 #include <sys/capsicum.h>
39 #include <sys/mutex.h>
42 #include <sys/procctl.h>
44 #include <sys/syscallsubr.h>
45 #include <sys/sysproto.h>
46 #include <sys/taskqueue.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_extern.h>
55 protect_setchild(struct thread *td, struct proc *p, int flags)
58 PROC_LOCK_ASSERT(p, MA_OWNED);
59 if (p->p_flag & P_SYSTEM || p_cansched(td, p) != 0)
61 if (flags & PPROT_SET) {
62 p->p_flag |= P_PROTECTED;
63 if (flags & PPROT_INHERIT)
64 p->p_flag2 |= P2_INHERIT_PROTECTED;
66 p->p_flag &= ~P_PROTECTED;
67 p->p_flag2 &= ~P2_INHERIT_PROTECTED;
73 protect_setchildren(struct thread *td, struct proc *top, int flags)
80 sx_assert(&proctree_lock, SX_LOCKED);
82 ret |= protect_setchild(td, p, flags);
85 * If this process has children, descend to them next,
86 * otherwise do any siblings, and if done with this level,
87 * follow back up the tree (but not past top).
89 if (!LIST_EMPTY(&p->p_children))
90 p = LIST_FIRST(&p->p_children);
96 if (LIST_NEXT(p, p_sibling)) {
97 p = LIST_NEXT(p, p_sibling);
107 protect_set(struct thread *td, struct proc *p, void *data)
109 int error, flags, ret;
111 flags = *(int *)data;
112 switch (PPROT_OP(flags)) {
120 if ((PPROT_FLAGS(flags) & ~(PPROT_DESCEND | PPROT_INHERIT)) != 0)
123 error = priv_check(td, PRIV_VM_MADV_PROTECT);
127 if (flags & PPROT_DESCEND)
128 ret = protect_setchildren(td, p, flags);
130 ret = protect_setchild(td, p, flags);
137 reap_acquire(struct thread *td, struct proc *p, void *data __unused)
140 sx_assert(&proctree_lock, SX_XLOCKED);
141 if (p != td->td_proc)
143 if ((p->p_treeflag & P_TREE_REAPER) != 0)
145 p->p_treeflag |= P_TREE_REAPER;
147 * We do not reattach existing children and the whole tree
148 * under them to us, since p->p_reaper already seen them.
154 reap_release(struct thread *td, struct proc *p, void *data __unused)
157 sx_assert(&proctree_lock, SX_XLOCKED);
158 if (p != td->td_proc)
162 if ((p->p_treeflag & P_TREE_REAPER) == 0)
164 reaper_abandon_children(p, false);
169 reap_status(struct thread *td, struct proc *p, void *data)
171 struct proc *reap, *p2, *first_p;
172 struct procctl_reaper_status *rs;
175 sx_assert(&proctree_lock, SX_LOCKED);
176 if ((p->p_treeflag & P_TREE_REAPER) == 0) {
180 rs->rs_flags |= REAPER_STATUS_OWNED;
182 if (reap == initproc)
183 rs->rs_flags |= REAPER_STATUS_REALINIT;
184 rs->rs_reaper = reap->p_pid;
185 rs->rs_descendants = 0;
187 if (!LIST_EMPTY(&reap->p_reaplist)) {
188 first_p = LIST_FIRST(&reap->p_children);
190 first_p = LIST_FIRST(&reap->p_reaplist);
191 rs->rs_pid = first_p->p_pid;
192 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) {
193 if (proc_realparent(p2) == reap)
195 rs->rs_descendants++;
204 reap_getpids(struct thread *td, struct proc *p, void *data)
206 struct proc *reap, *p2;
207 struct procctl_reaper_pidinfo *pi, *pip;
208 struct procctl_reaper_pids *rp;
213 sx_assert(&proctree_lock, SX_LOCKED);
215 reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p;
218 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling)
220 sx_unlock(&proctree_lock);
221 if (rp->rp_count < n)
223 pi = malloc(n * sizeof(*pi), M_TEMP, M_WAITOK);
224 sx_slock(&proctree_lock);
225 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) {
229 bzero(pip, sizeof(*pip));
230 pip->pi_pid = p2->p_pid;
231 pip->pi_subtree = p2->p_reapsubtree;
232 pip->pi_flags = REAPER_PIDINFO_VALID;
233 if (proc_realparent(p2) == reap)
234 pip->pi_flags |= REAPER_PIDINFO_CHILD;
235 if ((p2->p_treeflag & P_TREE_REAPER) != 0)
236 pip->pi_flags |= REAPER_PIDINFO_REAPER;
237 if ((p2->p_flag & P_STOPPED) != 0)
238 pip->pi_flags |= REAPER_PIDINFO_STOPPED;
239 if (p2->p_state == PRS_ZOMBIE)
240 pip->pi_flags |= REAPER_PIDINFO_ZOMBIE;
241 else if ((p2->p_flag & P_WEXIT) != 0)
242 pip->pi_flags |= REAPER_PIDINFO_EXITING;
245 sx_sunlock(&proctree_lock);
246 error = copyout(pi, rp->rp_pids, i * sizeof(*pi));
248 sx_slock(&proctree_lock);
253 struct reap_kill_proc_work {
257 struct procctl_reaper_kill *rk;
263 reap_kill_proc_locked(struct reap_kill_proc_work *w)
268 PROC_LOCK_ASSERT(w->target, MA_OWNED);
269 PROC_ASSERT_HELD(w->target);
271 error1 = cr_cansignal(w->cr, w->target, w->rk->rk_sig);
273 if (*w->error == ESRCH) {
274 w->rk->rk_fpid = w->target->p_pid;
281 * The need_stop indicates if the target process needs to be
282 * suspended before being signalled. This is needed when we
283 * guarantee that all processes in subtree are signalled,
284 * avoiding the race with some process not yet fully linked
285 * into all structures during fork, ignored by iterator, and
286 * then escaping signalling.
288 * The thread cannot usefully stop itself anyway, and if other
289 * thread of the current process forks while the current
290 * thread signals the whole subtree, it is an application
293 if ((w->target->p_flag & (P_KPROC | P_SYSTEM | P_STOPPED)) == 0)
294 need_stop = thread_single(w->target, SINGLE_ALLPROC) == 0;
298 (void)pksignal(w->target, w->rk->rk_sig, w->ksi);
303 thread_single_end(w->target, SINGLE_ALLPROC);
307 reap_kill_proc_work(void *arg, int pending __unused)
309 struct reap_kill_proc_work *w;
312 PROC_LOCK(w->target);
313 if ((w->target->p_flag2 & P2_WEXIT) == 0)
314 reap_kill_proc_locked(w);
315 PROC_UNLOCK(w->target);
317 sx_xlock(&proctree_lock);
320 sx_xunlock(&proctree_lock);
323 struct reap_kill_tracker {
325 TAILQ_ENTRY(reap_kill_tracker) link;
328 TAILQ_HEAD(reap_kill_tracker_head, reap_kill_tracker);
331 reap_kill_sched(struct reap_kill_tracker_head *tracker, struct proc *p2)
333 struct reap_kill_tracker *t;
336 if ((p2->p_flag2 & P2_WEXIT) != 0) {
342 t = malloc(sizeof(struct reap_kill_tracker), M_TEMP, M_WAITOK);
344 TAILQ_INSERT_TAIL(tracker, t, link);
348 reap_kill_sched_free(struct reap_kill_tracker *t)
355 reap_kill_children(struct thread *td, struct proc *reaper,
356 struct procctl_reaper_kill *rk, ksiginfo_t *ksi, int *error)
361 LIST_FOREACH(p2, &reaper->p_children, p_sibling) {
363 if ((p2->p_flag2 & P2_WEXIT) == 0) {
364 error1 = p_cansignal(td, p2, rk->rk_sig);
366 if (*error == ESRCH) {
367 rk->rk_fpid = p2->p_pid;
372 * Do not end the loop on error,
373 * signal everything we can.
376 (void)pksignal(p2, rk->rk_sig, ksi);
385 reap_kill_subtree_once(struct thread *td, struct proc *p, struct proc *reaper,
386 struct unrhdr *pids, struct reap_kill_proc_work *w)
388 struct reap_kill_tracker_head tracker;
389 struct reap_kill_tracker *t;
395 TAILQ_INIT(&tracker);
396 reap_kill_sched(&tracker, reaper);
397 while ((t = TAILQ_FIRST(&tracker)) != NULL) {
398 TAILQ_REMOVE(&tracker, t, link);
401 * Since reap_kill_proc() drops proctree_lock sx, it
402 * is possible that the tracked reaper is no longer.
403 * In this case the subtree is reparented to the new
404 * reaper, which should handle it.
406 if ((t->parent->p_treeflag & P_TREE_REAPER) == 0) {
407 reap_kill_sched_free(t);
412 LIST_FOREACH(p2, &t->parent->p_reaplist, p_reapsibling) {
413 if (t->parent == reaper &&
414 (w->rk->rk_flags & REAPER_KILL_SUBTREE) != 0 &&
415 p2->p_reapsubtree != w->rk->rk_subtree)
417 if ((p2->p_treeflag & P_TREE_REAPER) != 0)
418 reap_kill_sched(&tracker, p2);
419 if (alloc_unr_specific(pids, p2->p_pid) != p2->p_pid)
421 if (p2 == td->td_proc) {
422 if ((p2->p_flag & P_HADTHREADS) != 0 &&
423 (p2->p_flag2 & P2_WEXIT) == 0) {
424 xlocked = sx_xlocked(&proctree_lock);
425 sx_unlock(&proctree_lock);
432 r = thread_single(p2, SINGLE_NO_EXIT);
433 (void)pksignal(p2, w->rk->rk_sig, w->ksi);
436 thread_single_end(p2, SINGLE_NO_EXIT);
440 sx_xlock(&proctree_lock);
442 sx_slock(&proctree_lock);
446 if ((p2->p_flag2 & P2_WEXIT) == 0) {
450 taskqueue_enqueue(taskqueue_thread,
452 while (w->target != NULL) {
454 &proctree_lock, PWAIT,
464 reap_kill_sched_free(t);
470 reap_kill_subtree(struct thread *td, struct proc *p, struct proc *reaper,
471 struct reap_kill_proc_work *w)
476 * pids records processes which were already signalled, to
477 * avoid doubling signals to them if iteration needs to be
480 init_unrhdr(&pids, 1, PID_MAX, UNR_NO_MTX);
481 PROC_LOCK(td->td_proc);
482 if ((td->td_proc->p_flag2 & P2_WEXIT) != 0) {
483 PROC_UNLOCK(td->td_proc);
486 PROC_UNLOCK(td->td_proc);
487 while (reap_kill_subtree_once(td, p, reaper, &pids, w))
495 reap_kill_sapblk(struct thread *td __unused, void *data)
497 struct procctl_reaper_kill *rk;
500 return ((rk->rk_flags & REAPER_KILL_CHILDREN) == 0);
504 reap_kill(struct thread *td, struct proc *p, void *data)
506 struct reap_kill_proc_work w;
509 struct procctl_reaper_kill *rk;
513 sx_assert(&proctree_lock, SX_LOCKED);
514 if (IN_CAPABILITY_MODE(td))
516 if (rk->rk_sig <= 0 || rk->rk_sig > _SIG_MAXSIG ||
517 (rk->rk_flags & ~(REAPER_KILL_CHILDREN |
518 REAPER_KILL_SUBTREE)) != 0 || (rk->rk_flags &
519 (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE)) ==
520 (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE))
523 reaper = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p;
525 ksi.ksi_signo = rk->rk_sig;
526 ksi.ksi_code = SI_USER;
527 ksi.ksi_pid = td->td_proc->p_pid;
528 ksi.ksi_uid = td->td_ucred->cr_ruid;
532 if ((rk->rk_flags & REAPER_KILL_CHILDREN) != 0) {
533 reap_kill_children(td, reaper, rk, &ksi, &error);
535 w.cr = crhold(td->td_ucred);
539 TASK_INIT(&w.t, 0, reap_kill_proc_work, &w);
542 * Prevent swapout, since w, ksi, and possibly rk, are
543 * allocated on the stack. We sleep in
544 * reap_kill_subtree_once() waiting for task to
545 * complete single-threading.
549 reap_kill_subtree(td, p, reaper, &w);
558 trace_ctl(struct thread *td, struct proc *p, void *data)
562 PROC_LOCK_ASSERT(p, MA_OWNED);
563 state = *(int *)data;
566 * Ktrace changes p_traceflag from or to zero under the
567 * process lock, so the test does not need to acquire ktrace
570 if ((p->p_flag & P_TRACED) != 0 || p->p_traceflag != 0)
574 case PROC_TRACE_CTL_ENABLE:
575 if (td->td_proc != p)
577 p->p_flag2 &= ~(P2_NOTRACE | P2_NOTRACE_EXEC);
579 case PROC_TRACE_CTL_DISABLE_EXEC:
580 p->p_flag2 |= P2_NOTRACE_EXEC | P2_NOTRACE;
582 case PROC_TRACE_CTL_DISABLE:
583 if ((p->p_flag2 & P2_NOTRACE_EXEC) != 0) {
584 KASSERT((p->p_flag2 & P2_NOTRACE) != 0,
585 ("dandling P2_NOTRACE_EXEC"));
586 if (td->td_proc != p)
588 p->p_flag2 &= ~P2_NOTRACE_EXEC;
590 p->p_flag2 |= P2_NOTRACE;
600 trace_status(struct thread *td, struct proc *p, void *data)
605 if ((p->p_flag2 & P2_NOTRACE) != 0) {
606 KASSERT((p->p_flag & P_TRACED) == 0,
607 ("%d traced but tracing disabled", p->p_pid));
609 } else if ((p->p_flag & P_TRACED) != 0) {
610 *status = p->p_pptr->p_pid;
618 trapcap_ctl(struct thread *td, struct proc *p, void *data)
622 PROC_LOCK_ASSERT(p, MA_OWNED);
623 state = *(int *)data;
626 case PROC_TRAPCAP_CTL_ENABLE:
627 p->p_flag2 |= P2_TRAPCAP;
629 case PROC_TRAPCAP_CTL_DISABLE:
630 p->p_flag2 &= ~P2_TRAPCAP;
639 trapcap_status(struct thread *td, struct proc *p, void *data)
644 *status = (p->p_flag2 & P2_TRAPCAP) != 0 ? PROC_TRAPCAP_CTL_ENABLE :
645 PROC_TRAPCAP_CTL_DISABLE;
650 no_new_privs_ctl(struct thread *td, struct proc *p, void *data)
654 PROC_LOCK_ASSERT(p, MA_OWNED);
655 state = *(int *)data;
657 if (state != PROC_NO_NEW_PRIVS_ENABLE)
659 p->p_flag2 |= P2_NO_NEW_PRIVS;
664 no_new_privs_status(struct thread *td, struct proc *p, void *data)
667 *(int *)data = (p->p_flag2 & P2_NO_NEW_PRIVS) != 0 ?
668 PROC_NO_NEW_PRIVS_ENABLE : PROC_NO_NEW_PRIVS_DISABLE;
673 protmax_ctl(struct thread *td, struct proc *p, void *data)
677 PROC_LOCK_ASSERT(p, MA_OWNED);
678 state = *(int *)data;
681 case PROC_PROTMAX_FORCE_ENABLE:
682 p->p_flag2 &= ~P2_PROTMAX_DISABLE;
683 p->p_flag2 |= P2_PROTMAX_ENABLE;
685 case PROC_PROTMAX_FORCE_DISABLE:
686 p->p_flag2 |= P2_PROTMAX_DISABLE;
687 p->p_flag2 &= ~P2_PROTMAX_ENABLE;
689 case PROC_PROTMAX_NOFORCE:
690 p->p_flag2 &= ~(P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE);
699 protmax_status(struct thread *td, struct proc *p, void *data)
703 switch (p->p_flag2 & (P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE)) {
705 d = PROC_PROTMAX_NOFORCE;
707 case P2_PROTMAX_ENABLE:
708 d = PROC_PROTMAX_FORCE_ENABLE;
710 case P2_PROTMAX_DISABLE:
711 d = PROC_PROTMAX_FORCE_DISABLE;
714 if (kern_mmap_maxprot(p, PROT_READ) == PROT_READ)
715 d |= PROC_PROTMAX_ACTIVE;
721 aslr_ctl(struct thread *td, struct proc *p, void *data)
725 PROC_LOCK_ASSERT(p, MA_OWNED);
726 state = *(int *)data;
729 case PROC_ASLR_FORCE_ENABLE:
730 p->p_flag2 &= ~P2_ASLR_DISABLE;
731 p->p_flag2 |= P2_ASLR_ENABLE;
733 case PROC_ASLR_FORCE_DISABLE:
734 p->p_flag2 |= P2_ASLR_DISABLE;
735 p->p_flag2 &= ~P2_ASLR_ENABLE;
737 case PROC_ASLR_NOFORCE:
738 p->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE);
747 aslr_status(struct thread *td, struct proc *p, void *data)
752 switch (p->p_flag2 & (P2_ASLR_ENABLE | P2_ASLR_DISABLE)) {
754 d = PROC_ASLR_NOFORCE;
757 d = PROC_ASLR_FORCE_ENABLE;
759 case P2_ASLR_DISABLE:
760 d = PROC_ASLR_FORCE_DISABLE;
763 if ((p->p_flag & P_WEXIT) == 0) {
766 vm = vmspace_acquire_ref(p);
768 if ((vm->vm_map.flags & MAP_ASLR) != 0)
769 d |= PROC_ASLR_ACTIVE;
780 stackgap_ctl(struct thread *td, struct proc *p, void *data)
784 PROC_LOCK_ASSERT(p, MA_OWNED);
785 state = *(int *)data;
787 if ((state & ~(PROC_STACKGAP_ENABLE | PROC_STACKGAP_DISABLE |
788 PROC_STACKGAP_ENABLE_EXEC | PROC_STACKGAP_DISABLE_EXEC)) != 0)
790 switch (state & (PROC_STACKGAP_ENABLE | PROC_STACKGAP_DISABLE)) {
791 case PROC_STACKGAP_ENABLE:
792 if ((p->p_flag2 & P2_STKGAP_DISABLE) != 0)
795 case PROC_STACKGAP_DISABLE:
796 p->p_flag2 |= P2_STKGAP_DISABLE;
803 switch (state & (PROC_STACKGAP_ENABLE_EXEC |
804 PROC_STACKGAP_DISABLE_EXEC)) {
805 case PROC_STACKGAP_ENABLE_EXEC:
806 p->p_flag2 &= ~P2_STKGAP_DISABLE_EXEC;
808 case PROC_STACKGAP_DISABLE_EXEC:
809 p->p_flag2 |= P2_STKGAP_DISABLE_EXEC;
820 stackgap_status(struct thread *td, struct proc *p, void *data)
824 PROC_LOCK_ASSERT(p, MA_OWNED);
826 d = (p->p_flag2 & P2_STKGAP_DISABLE) != 0 ? PROC_STACKGAP_DISABLE :
827 PROC_STACKGAP_ENABLE;
828 d |= (p->p_flag2 & P2_STKGAP_DISABLE_EXEC) != 0 ?
829 PROC_STACKGAP_DISABLE_EXEC : PROC_STACKGAP_ENABLE_EXEC;
835 wxmap_ctl(struct thread *td, struct proc *p, void *data)
841 PROC_LOCK_ASSERT(p, MA_OWNED);
842 if ((p->p_flag & P_WEXIT) != 0)
844 state = *(int *)data;
847 case PROC_WX_MAPPINGS_PERMIT:
848 p->p_flag2 |= P2_WXORX_DISABLE;
851 vm = vmspace_acquire_ref(p);
855 map->flags &= ~MAP_WXORX;
862 case PROC_WX_MAPPINGS_DISALLOW_EXEC:
863 p->p_flag2 |= P2_WXORX_ENABLE_EXEC;
873 wxmap_status(struct thread *td, struct proc *p, void *data)
878 PROC_LOCK_ASSERT(p, MA_OWNED);
879 if ((p->p_flag & P_WEXIT) != 0)
883 if ((p->p_flag2 & P2_WXORX_DISABLE) != 0)
884 d |= PROC_WX_MAPPINGS_PERMIT;
885 if ((p->p_flag2 & P2_WXORX_ENABLE_EXEC) != 0)
886 d |= PROC_WX_MAPPINGS_DISALLOW_EXEC;
889 vm = vmspace_acquire_ref(p);
891 if ((vm->vm_map.flags & MAP_WXORX) != 0)
892 d |= PROC_WXORX_ENFORCE;
902 pdeathsig_ctl(struct thread *td, struct proc *p, void *data)
906 signum = *(int *)data;
907 if (p != td->td_proc || (signum != 0 && !_SIG_VALID(signum)))
909 p->p_pdeathsig = signum;
914 pdeathsig_status(struct thread *td, struct proc *p, void *data)
916 if (p != td->td_proc)
918 *(int *)data = p->p_pdeathsig;
928 struct procctl_cmd_info {
931 bool esrch_is_einval : 1;
932 bool copyout_on_error : 1;
933 bool no_nonnull_data : 1;
934 bool need_candebug : 1;
937 int (*exec)(struct thread *, struct proc *, void *);
938 bool (*sapblk)(struct thread *, void *);
940 static const struct procctl_cmd_info procctl_cmds_info[] = {
942 { .lock_tree = PCTL_SLOCKED, .one_proc = false,
943 .esrch_is_einval = false, .no_nonnull_data = false,
944 .need_candebug = false,
945 .copyin_sz = sizeof(int), .copyout_sz = 0,
946 .exec = protect_set, .copyout_on_error = false, },
947 [PROC_REAP_ACQUIRE] =
948 { .lock_tree = PCTL_XLOCKED, .one_proc = true,
949 .esrch_is_einval = false, .no_nonnull_data = true,
950 .need_candebug = false,
951 .copyin_sz = 0, .copyout_sz = 0,
952 .exec = reap_acquire, .copyout_on_error = false, },
953 [PROC_REAP_RELEASE] =
954 { .lock_tree = PCTL_XLOCKED, .one_proc = true,
955 .esrch_is_einval = false, .no_nonnull_data = true,
956 .need_candebug = false,
957 .copyin_sz = 0, .copyout_sz = 0,
958 .exec = reap_release, .copyout_on_error = false, },
960 { .lock_tree = PCTL_SLOCKED, .one_proc = true,
961 .esrch_is_einval = false, .no_nonnull_data = false,
962 .need_candebug = false,
964 .copyout_sz = sizeof(struct procctl_reaper_status),
965 .exec = reap_status, .copyout_on_error = false, },
966 [PROC_REAP_GETPIDS] =
967 { .lock_tree = PCTL_SLOCKED, .one_proc = true,
968 .esrch_is_einval = false, .no_nonnull_data = false,
969 .need_candebug = false,
970 .copyin_sz = sizeof(struct procctl_reaper_pids),
972 .exec = reap_getpids, .copyout_on_error = false, },
974 { .lock_tree = PCTL_SLOCKED, .one_proc = true,
975 .esrch_is_einval = false, .no_nonnull_data = false,
976 .need_candebug = false,
977 .copyin_sz = sizeof(struct procctl_reaper_kill),
978 .copyout_sz = sizeof(struct procctl_reaper_kill),
979 .exec = reap_kill, .copyout_on_error = true,
980 .sapblk = reap_kill_sapblk, },
982 { .lock_tree = PCTL_SLOCKED, .one_proc = false,
983 .esrch_is_einval = false, .no_nonnull_data = false,
984 .need_candebug = true,
985 .copyin_sz = sizeof(int), .copyout_sz = 0,
986 .exec = trace_ctl, .copyout_on_error = false, },
987 [PROC_TRACE_STATUS] =
988 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
989 .esrch_is_einval = false, .no_nonnull_data = false,
990 .need_candebug = false,
991 .copyin_sz = 0, .copyout_sz = sizeof(int),
992 .exec = trace_status, .copyout_on_error = false, },
994 { .lock_tree = PCTL_SLOCKED, .one_proc = false,
995 .esrch_is_einval = false, .no_nonnull_data = false,
996 .need_candebug = true,
997 .copyin_sz = sizeof(int), .copyout_sz = 0,
998 .exec = trapcap_ctl, .copyout_on_error = false, },
999 [PROC_TRAPCAP_STATUS] =
1000 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1001 .esrch_is_einval = false, .no_nonnull_data = false,
1002 .need_candebug = false,
1003 .copyin_sz = 0, .copyout_sz = sizeof(int),
1004 .exec = trapcap_status, .copyout_on_error = false, },
1005 [PROC_PDEATHSIG_CTL] =
1006 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1007 .esrch_is_einval = true, .no_nonnull_data = false,
1008 .need_candebug = false,
1009 .copyin_sz = sizeof(int), .copyout_sz = 0,
1010 .exec = pdeathsig_ctl, .copyout_on_error = false, },
1011 [PROC_PDEATHSIG_STATUS] =
1012 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1013 .esrch_is_einval = true, .no_nonnull_data = false,
1014 .need_candebug = false,
1015 .copyin_sz = 0, .copyout_sz = sizeof(int),
1016 .exec = pdeathsig_status, .copyout_on_error = false, },
1018 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1019 .esrch_is_einval = false, .no_nonnull_data = false,
1020 .need_candebug = true,
1021 .copyin_sz = sizeof(int), .copyout_sz = 0,
1022 .exec = aslr_ctl, .copyout_on_error = false, },
1023 [PROC_ASLR_STATUS] =
1024 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1025 .esrch_is_einval = false, .no_nonnull_data = false,
1026 .need_candebug = false,
1027 .copyin_sz = 0, .copyout_sz = sizeof(int),
1028 .exec = aslr_status, .copyout_on_error = false, },
1029 [PROC_PROTMAX_CTL] =
1030 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1031 .esrch_is_einval = false, .no_nonnull_data = false,
1032 .need_candebug = true,
1033 .copyin_sz = sizeof(int), .copyout_sz = 0,
1034 .exec = protmax_ctl, .copyout_on_error = false, },
1035 [PROC_PROTMAX_STATUS] =
1036 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1037 .esrch_is_einval = false, .no_nonnull_data = false,
1038 .need_candebug = false,
1039 .copyin_sz = 0, .copyout_sz = sizeof(int),
1040 .exec = protmax_status, .copyout_on_error = false, },
1041 [PROC_STACKGAP_CTL] =
1042 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1043 .esrch_is_einval = false, .no_nonnull_data = false,
1044 .need_candebug = true,
1045 .copyin_sz = sizeof(int), .copyout_sz = 0,
1046 .exec = stackgap_ctl, .copyout_on_error = false, },
1047 [PROC_STACKGAP_STATUS] =
1048 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1049 .esrch_is_einval = false, .no_nonnull_data = false,
1050 .need_candebug = false,
1051 .copyin_sz = 0, .copyout_sz = sizeof(int),
1052 .exec = stackgap_status, .copyout_on_error = false, },
1053 [PROC_NO_NEW_PRIVS_CTL] =
1054 { .lock_tree = PCTL_SLOCKED, .one_proc = true,
1055 .esrch_is_einval = false, .no_nonnull_data = false,
1056 .need_candebug = true,
1057 .copyin_sz = sizeof(int), .copyout_sz = 0,
1058 .exec = no_new_privs_ctl, .copyout_on_error = false, },
1059 [PROC_NO_NEW_PRIVS_STATUS] =
1060 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1061 .esrch_is_einval = false, .no_nonnull_data = false,
1062 .need_candebug = false,
1063 .copyin_sz = 0, .copyout_sz = sizeof(int),
1064 .exec = no_new_privs_status, .copyout_on_error = false, },
1066 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1067 .esrch_is_einval = false, .no_nonnull_data = false,
1068 .need_candebug = true,
1069 .copyin_sz = sizeof(int), .copyout_sz = 0,
1070 .exec = wxmap_ctl, .copyout_on_error = false, },
1071 [PROC_WXMAP_STATUS] =
1072 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1073 .esrch_is_einval = false, .no_nonnull_data = false,
1074 .need_candebug = false,
1075 .copyin_sz = 0, .copyout_sz = sizeof(int),
1076 .exec = wxmap_status, .copyout_on_error = false, },
1080 sys_procctl(struct thread *td, struct procctl_args *uap)
1083 struct procctl_reaper_status rs;
1084 struct procctl_reaper_pids rp;
1085 struct procctl_reaper_kill rk;
1088 const struct procctl_cmd_info *cmd_info;
1091 if (uap->com >= PROC_PROCCTL_MD_MIN)
1092 return (cpu_procctl(td, uap->idtype, uap->id,
1093 uap->com, uap->data));
1094 if (uap->com == 0 || uap->com >= nitems(procctl_cmds_info))
1096 cmd_info = &procctl_cmds_info[uap->com];
1097 bzero(&x, sizeof(x));
1099 if (cmd_info->copyin_sz > 0) {
1100 error = copyin(uap->data, &x, cmd_info->copyin_sz);
1103 } else if (cmd_info->no_nonnull_data && uap->data != NULL) {
1107 error = kern_procctl(td, uap->idtype, uap->id, uap->com, &x);
1109 if (cmd_info->copyout_sz > 0 && (error == 0 ||
1110 cmd_info->copyout_on_error)) {
1111 error1 = copyout(&x, uap->data, cmd_info->copyout_sz);
1119 kern_procctl_single(struct thread *td, struct proc *p, int com, void *data)
1122 PROC_LOCK_ASSERT(p, MA_OWNED);
1123 return (procctl_cmds_info[com].exec(td, p, data));
1127 kern_procctl(struct thread *td, idtype_t idtype, id_t id, int com, void *data)
1131 const struct procctl_cmd_info *cmd_info;
1132 int error, first_error, ok;
1135 MPASS(com > 0 && com < nitems(procctl_cmds_info));
1136 cmd_info = &procctl_cmds_info[com];
1137 if (idtype != P_PID && cmd_info->one_proc)
1141 if (cmd_info->sapblk != NULL) {
1142 sapblk = cmd_info->sapblk(td, data);
1143 if (sapblk && !stop_all_proc_block())
1147 switch (cmd_info->lock_tree) {
1149 sx_xlock(&proctree_lock);
1152 sx_slock(&proctree_lock);
1167 error = cmd_info->esrch_is_einval ?
1171 error = cmd_info->need_candebug ? p_candebug(td, p) :
1175 error = kern_procctl_single(td, p, com, data);
1180 * Attempt to apply the operation to all members of the
1181 * group. Ignore processes in the group that can't be
1182 * seen. Ignore errors so long as at least one process is
1183 * able to complete the request successfully.
1193 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
1195 if (p->p_state == PRS_NEW ||
1196 p->p_state == PRS_ZOMBIE ||
1197 (cmd_info->need_candebug ? p_candebug(td, p) :
1198 p_cansee(td, p)) != 0) {
1202 error = kern_procctl_single(td, p, com, data);
1206 else if (first_error == 0)
1207 first_error = error;
1211 else if (first_error != 0)
1212 error = first_error;
1215 * Was not able to see any processes in the
1225 switch (cmd_info->lock_tree) {
1227 sx_xunlock(&proctree_lock);
1230 sx_sunlock(&proctree_lock);
1236 stop_all_proc_unblock();