2 * Copyright (c) 2014 John Baldwin
3 * Copyright (c) 2014, 2016 The FreeBSD Foundation
5 * Portions of this software were developed by Konstantin Belousov
6 * under sponsorship from the FreeBSD Foundation.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 #include <sys/param.h>
32 #include <sys/_unrhdr.h>
33 #include <sys/systm.h>
34 #include <sys/capsicum.h>
37 #include <sys/mutex.h>
40 #include <sys/procctl.h>
42 #include <sys/syscallsubr.h>
43 #include <sys/sysproto.h>
44 #include <sys/taskqueue.h>
49 #include <vm/vm_map.h>
50 #include <vm/vm_extern.h>
53 protect_setchild(struct thread *td, struct proc *p, int flags)
56 PROC_LOCK_ASSERT(p, MA_OWNED);
57 if (p->p_flag & P_SYSTEM || p_cansched(td, p) != 0)
59 if (flags & PPROT_SET) {
60 p->p_flag |= P_PROTECTED;
61 if (flags & PPROT_INHERIT)
62 p->p_flag2 |= P2_INHERIT_PROTECTED;
64 p->p_flag &= ~P_PROTECTED;
65 p->p_flag2 &= ~P2_INHERIT_PROTECTED;
71 protect_setchildren(struct thread *td, struct proc *top, int flags)
78 sx_assert(&proctree_lock, SX_LOCKED);
80 ret |= protect_setchild(td, p, flags);
83 * If this process has children, descend to them next,
84 * otherwise do any siblings, and if done with this level,
85 * follow back up the tree (but not past top).
87 if (!LIST_EMPTY(&p->p_children))
88 p = LIST_FIRST(&p->p_children);
94 if (LIST_NEXT(p, p_sibling)) {
95 p = LIST_NEXT(p, p_sibling);
105 protect_set(struct thread *td, struct proc *p, void *data)
107 int error, flags, ret;
109 flags = *(int *)data;
110 switch (PPROT_OP(flags)) {
118 if ((PPROT_FLAGS(flags) & ~(PPROT_DESCEND | PPROT_INHERIT)) != 0)
121 error = priv_check(td, PRIV_VM_MADV_PROTECT);
125 if (flags & PPROT_DESCEND)
126 ret = protect_setchildren(td, p, flags);
128 ret = protect_setchild(td, p, flags);
135 reap_acquire(struct thread *td, struct proc *p, void *data __unused)
138 sx_assert(&proctree_lock, SX_XLOCKED);
139 if (p != td->td_proc)
141 if ((p->p_treeflag & P_TREE_REAPER) != 0)
143 p->p_treeflag |= P_TREE_REAPER;
145 * We do not reattach existing children and the whole tree
146 * under them to us, since p->p_reaper already seen them.
152 reap_release(struct thread *td, struct proc *p, void *data __unused)
155 sx_assert(&proctree_lock, SX_XLOCKED);
156 if (p != td->td_proc)
160 if ((p->p_treeflag & P_TREE_REAPER) == 0)
162 reaper_abandon_children(p, false);
167 reap_status(struct thread *td, struct proc *p, void *data)
169 struct proc *reap, *p2, *first_p;
170 struct procctl_reaper_status *rs;
173 sx_assert(&proctree_lock, SX_LOCKED);
174 if ((p->p_treeflag & P_TREE_REAPER) == 0) {
178 rs->rs_flags |= REAPER_STATUS_OWNED;
180 if (reap == initproc)
181 rs->rs_flags |= REAPER_STATUS_REALINIT;
182 rs->rs_reaper = reap->p_pid;
183 rs->rs_descendants = 0;
185 if (!LIST_EMPTY(&reap->p_reaplist)) {
186 first_p = LIST_FIRST(&reap->p_children);
188 first_p = LIST_FIRST(&reap->p_reaplist);
189 rs->rs_pid = first_p->p_pid;
190 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) {
191 if (proc_realparent(p2) == reap)
193 rs->rs_descendants++;
202 reap_getpids(struct thread *td, struct proc *p, void *data)
204 struct proc *reap, *p2;
205 struct procctl_reaper_pidinfo *pi, *pip;
206 struct procctl_reaper_pids *rp;
211 sx_assert(&proctree_lock, SX_LOCKED);
213 reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p;
216 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling)
218 sx_unlock(&proctree_lock);
219 if (rp->rp_count < n)
221 pi = malloc(n * sizeof(*pi), M_TEMP, M_WAITOK);
222 sx_slock(&proctree_lock);
223 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) {
227 bzero(pip, sizeof(*pip));
228 pip->pi_pid = p2->p_pid;
229 pip->pi_subtree = p2->p_reapsubtree;
230 pip->pi_flags = REAPER_PIDINFO_VALID;
231 if (proc_realparent(p2) == reap)
232 pip->pi_flags |= REAPER_PIDINFO_CHILD;
233 if ((p2->p_treeflag & P_TREE_REAPER) != 0)
234 pip->pi_flags |= REAPER_PIDINFO_REAPER;
235 if ((p2->p_flag & P_STOPPED) != 0)
236 pip->pi_flags |= REAPER_PIDINFO_STOPPED;
237 if (p2->p_state == PRS_ZOMBIE)
238 pip->pi_flags |= REAPER_PIDINFO_ZOMBIE;
239 else if ((p2->p_flag & P_WEXIT) != 0)
240 pip->pi_flags |= REAPER_PIDINFO_EXITING;
243 sx_sunlock(&proctree_lock);
244 error = copyout(pi, rp->rp_pids, i * sizeof(*pi));
246 sx_slock(&proctree_lock);
251 struct reap_kill_proc_work {
255 struct procctl_reaper_kill *rk;
261 reap_kill_proc_locked(struct reap_kill_proc_work *w)
266 PROC_LOCK_ASSERT(w->target, MA_OWNED);
267 PROC_ASSERT_HELD(w->target);
269 error1 = cr_cansignal(w->cr, w->target, w->rk->rk_sig);
271 if (*w->error == ESRCH) {
272 w->rk->rk_fpid = w->target->p_pid;
279 * The need_stop indicates if the target process needs to be
280 * suspended before being signalled. This is needed when we
281 * guarantee that all processes in subtree are signalled,
282 * avoiding the race with some process not yet fully linked
283 * into all structures during fork, ignored by iterator, and
284 * then escaping signalling.
286 * The thread cannot usefully stop itself anyway, and if other
287 * thread of the current process forks while the current
288 * thread signals the whole subtree, it is an application
291 if ((w->target->p_flag & (P_KPROC | P_SYSTEM | P_STOPPED)) == 0)
292 need_stop = thread_single(w->target, SINGLE_ALLPROC) == 0;
296 (void)pksignal(w->target, w->rk->rk_sig, w->ksi);
301 thread_single_end(w->target, SINGLE_ALLPROC);
305 reap_kill_proc_work(void *arg, int pending __unused)
307 struct reap_kill_proc_work *w;
310 PROC_LOCK(w->target);
311 if ((w->target->p_flag2 & P2_WEXIT) == 0)
312 reap_kill_proc_locked(w);
313 PROC_UNLOCK(w->target);
315 sx_xlock(&proctree_lock);
318 sx_xunlock(&proctree_lock);
321 struct reap_kill_tracker {
323 TAILQ_ENTRY(reap_kill_tracker) link;
326 TAILQ_HEAD(reap_kill_tracker_head, reap_kill_tracker);
329 reap_kill_sched(struct reap_kill_tracker_head *tracker, struct proc *p2)
331 struct reap_kill_tracker *t;
334 if ((p2->p_flag2 & P2_WEXIT) != 0) {
340 t = malloc(sizeof(struct reap_kill_tracker), M_TEMP, M_WAITOK);
342 TAILQ_INSERT_TAIL(tracker, t, link);
346 reap_kill_sched_free(struct reap_kill_tracker *t)
353 reap_kill_children(struct thread *td, struct proc *reaper,
354 struct procctl_reaper_kill *rk, ksiginfo_t *ksi, int *error)
359 LIST_FOREACH(p2, &reaper->p_children, p_sibling) {
361 if ((p2->p_flag2 & P2_WEXIT) == 0) {
362 error1 = p_cansignal(td, p2, rk->rk_sig);
364 if (*error == ESRCH) {
365 rk->rk_fpid = p2->p_pid;
370 * Do not end the loop on error,
371 * signal everything we can.
374 (void)pksignal(p2, rk->rk_sig, ksi);
383 reap_kill_subtree_once(struct thread *td, struct proc *p, struct proc *reaper,
384 struct unrhdr *pids, struct reap_kill_proc_work *w)
386 struct reap_kill_tracker_head tracker;
387 struct reap_kill_tracker *t;
393 TAILQ_INIT(&tracker);
394 reap_kill_sched(&tracker, reaper);
395 while ((t = TAILQ_FIRST(&tracker)) != NULL) {
396 TAILQ_REMOVE(&tracker, t, link);
399 * Since reap_kill_proc() drops proctree_lock sx, it
400 * is possible that the tracked reaper is no longer.
401 * In this case the subtree is reparented to the new
402 * reaper, which should handle it.
404 if ((t->parent->p_treeflag & P_TREE_REAPER) == 0) {
405 reap_kill_sched_free(t);
410 LIST_FOREACH(p2, &t->parent->p_reaplist, p_reapsibling) {
411 if (t->parent == reaper &&
412 (w->rk->rk_flags & REAPER_KILL_SUBTREE) != 0 &&
413 p2->p_reapsubtree != w->rk->rk_subtree)
415 if ((p2->p_treeflag & P_TREE_REAPER) != 0)
416 reap_kill_sched(&tracker, p2);
419 * Handle possible pid reuse. If we recorded
420 * p2 as killed but its p_flag2 does not
421 * confirm it, that means that the process
422 * terminated and its id was reused by other
423 * process in the reaper subtree.
425 * Unlocked read of p2->p_flag2 is fine, it is
426 * our thread that set the tested flag.
428 if (alloc_unr_specific(pids, p2->p_pid) != p2->p_pid &&
429 (atomic_load_int(&p2->p_flag2) &
430 (P2_REAPKILLED | P2_WEXIT)) != 0)
433 if (p2 == td->td_proc) {
434 if ((p2->p_flag & P_HADTHREADS) != 0 &&
435 (p2->p_flag2 & P2_WEXIT) == 0) {
436 xlocked = sx_xlocked(&proctree_lock);
437 sx_unlock(&proctree_lock);
444 * sapblk ensures that only one thread
445 * in the system sets this flag.
447 p2->p_flag2 |= P2_REAPKILLED;
449 r = thread_single(p2, SINGLE_NO_EXIT);
450 (void)pksignal(p2, w->rk->rk_sig, w->ksi);
453 thread_single_end(p2, SINGLE_NO_EXIT);
457 sx_xlock(&proctree_lock);
459 sx_slock(&proctree_lock);
463 if ((p2->p_flag2 & P2_WEXIT) == 0) {
465 p2->p_flag2 |= P2_REAPKILLED;
468 taskqueue_enqueue(taskqueue_thread,
470 while (w->target != NULL) {
472 &proctree_lock, PWAIT,
482 reap_kill_sched_free(t);
488 reap_kill_subtree(struct thread *td, struct proc *p, struct proc *reaper,
489 struct reap_kill_proc_work *w)
497 * pids records processes which were already signalled, to
498 * avoid doubling signals to them if iteration needs to be
501 init_unrhdr(&pids, 1, PID_MAX, UNR_NO_MTX);
502 PROC_LOCK(td->td_proc);
503 if ((td->td_proc->p_flag2 & P2_WEXIT) != 0) {
504 PROC_UNLOCK(td->td_proc);
507 PROC_UNLOCK(td->td_proc);
508 while (reap_kill_subtree_once(td, p, reaper, &pids, w))
511 ihandle = create_iter_unr(&pids);
512 while ((pid = next_iter_unr(ihandle)) != -1) {
515 p2->p_flag2 &= ~P2_REAPKILLED;
519 free_iter_unr(ihandle);
527 reap_kill_sapblk(struct thread *td __unused, void *data)
529 struct procctl_reaper_kill *rk;
532 return ((rk->rk_flags & REAPER_KILL_CHILDREN) == 0);
536 reap_kill(struct thread *td, struct proc *p, void *data)
538 struct reap_kill_proc_work w;
541 struct procctl_reaper_kill *rk;
545 sx_assert(&proctree_lock, SX_LOCKED);
546 if (IN_CAPABILITY_MODE(td))
548 if (rk->rk_sig <= 0 || rk->rk_sig > _SIG_MAXSIG ||
549 (rk->rk_flags & ~(REAPER_KILL_CHILDREN |
550 REAPER_KILL_SUBTREE)) != 0 || (rk->rk_flags &
551 (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE)) ==
552 (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE))
555 reaper = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p;
557 ksi.ksi_signo = rk->rk_sig;
558 ksi.ksi_code = SI_USER;
559 ksi.ksi_pid = td->td_proc->p_pid;
560 ksi.ksi_uid = td->td_ucred->cr_ruid;
564 if ((rk->rk_flags & REAPER_KILL_CHILDREN) != 0) {
565 reap_kill_children(td, reaper, rk, &ksi, &error);
567 w.cr = crhold(td->td_ucred);
571 TASK_INIT(&w.t, 0, reap_kill_proc_work, &w);
574 * Prevent swapout, since w, ksi, and possibly rk, are
575 * allocated on the stack. We sleep in
576 * reap_kill_subtree_once() waiting for task to
577 * complete single-threading.
581 reap_kill_subtree(td, p, reaper, &w);
590 trace_ctl(struct thread *td, struct proc *p, void *data)
594 PROC_LOCK_ASSERT(p, MA_OWNED);
595 state = *(int *)data;
598 * Ktrace changes p_traceflag from or to zero under the
599 * process lock, so the test does not need to acquire ktrace
602 if ((p->p_flag & P_TRACED) != 0 || p->p_traceflag != 0)
606 case PROC_TRACE_CTL_ENABLE:
607 if (td->td_proc != p)
609 p->p_flag2 &= ~(P2_NOTRACE | P2_NOTRACE_EXEC);
611 case PROC_TRACE_CTL_DISABLE_EXEC:
612 p->p_flag2 |= P2_NOTRACE_EXEC | P2_NOTRACE;
614 case PROC_TRACE_CTL_DISABLE:
615 if ((p->p_flag2 & P2_NOTRACE_EXEC) != 0) {
616 KASSERT((p->p_flag2 & P2_NOTRACE) != 0,
617 ("dandling P2_NOTRACE_EXEC"));
618 if (td->td_proc != p)
620 p->p_flag2 &= ~P2_NOTRACE_EXEC;
622 p->p_flag2 |= P2_NOTRACE;
632 trace_status(struct thread *td, struct proc *p, void *data)
637 if ((p->p_flag2 & P2_NOTRACE) != 0) {
638 KASSERT((p->p_flag & P_TRACED) == 0,
639 ("%d traced but tracing disabled", p->p_pid));
641 } else if ((p->p_flag & P_TRACED) != 0) {
642 *status = p->p_pptr->p_pid;
650 trapcap_ctl(struct thread *td, struct proc *p, void *data)
654 PROC_LOCK_ASSERT(p, MA_OWNED);
655 state = *(int *)data;
658 case PROC_TRAPCAP_CTL_ENABLE:
659 p->p_flag2 |= P2_TRAPCAP;
661 case PROC_TRAPCAP_CTL_DISABLE:
662 p->p_flag2 &= ~P2_TRAPCAP;
671 trapcap_status(struct thread *td, struct proc *p, void *data)
676 *status = (p->p_flag2 & P2_TRAPCAP) != 0 ? PROC_TRAPCAP_CTL_ENABLE :
677 PROC_TRAPCAP_CTL_DISABLE;
682 no_new_privs_ctl(struct thread *td, struct proc *p, void *data)
686 PROC_LOCK_ASSERT(p, MA_OWNED);
687 state = *(int *)data;
689 if (state != PROC_NO_NEW_PRIVS_ENABLE)
691 p->p_flag2 |= P2_NO_NEW_PRIVS;
696 no_new_privs_status(struct thread *td, struct proc *p, void *data)
699 *(int *)data = (p->p_flag2 & P2_NO_NEW_PRIVS) != 0 ?
700 PROC_NO_NEW_PRIVS_ENABLE : PROC_NO_NEW_PRIVS_DISABLE;
705 protmax_ctl(struct thread *td, struct proc *p, void *data)
709 PROC_LOCK_ASSERT(p, MA_OWNED);
710 state = *(int *)data;
713 case PROC_PROTMAX_FORCE_ENABLE:
714 p->p_flag2 &= ~P2_PROTMAX_DISABLE;
715 p->p_flag2 |= P2_PROTMAX_ENABLE;
717 case PROC_PROTMAX_FORCE_DISABLE:
718 p->p_flag2 |= P2_PROTMAX_DISABLE;
719 p->p_flag2 &= ~P2_PROTMAX_ENABLE;
721 case PROC_PROTMAX_NOFORCE:
722 p->p_flag2 &= ~(P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE);
731 protmax_status(struct thread *td, struct proc *p, void *data)
735 switch (p->p_flag2 & (P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE)) {
737 d = PROC_PROTMAX_NOFORCE;
739 case P2_PROTMAX_ENABLE:
740 d = PROC_PROTMAX_FORCE_ENABLE;
742 case P2_PROTMAX_DISABLE:
743 d = PROC_PROTMAX_FORCE_DISABLE;
746 if (kern_mmap_maxprot(p, PROT_READ) == PROT_READ)
747 d |= PROC_PROTMAX_ACTIVE;
753 aslr_ctl(struct thread *td, struct proc *p, void *data)
757 PROC_LOCK_ASSERT(p, MA_OWNED);
758 state = *(int *)data;
761 case PROC_ASLR_FORCE_ENABLE:
762 p->p_flag2 &= ~P2_ASLR_DISABLE;
763 p->p_flag2 |= P2_ASLR_ENABLE;
765 case PROC_ASLR_FORCE_DISABLE:
766 p->p_flag2 |= P2_ASLR_DISABLE;
767 p->p_flag2 &= ~P2_ASLR_ENABLE;
769 case PROC_ASLR_NOFORCE:
770 p->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE);
779 aslr_status(struct thread *td, struct proc *p, void *data)
784 switch (p->p_flag2 & (P2_ASLR_ENABLE | P2_ASLR_DISABLE)) {
786 d = PROC_ASLR_NOFORCE;
789 d = PROC_ASLR_FORCE_ENABLE;
791 case P2_ASLR_DISABLE:
792 d = PROC_ASLR_FORCE_DISABLE;
795 if ((p->p_flag & P_WEXIT) == 0) {
798 vm = vmspace_acquire_ref(p);
800 if ((vm->vm_map.flags & MAP_ASLR) != 0)
801 d |= PROC_ASLR_ACTIVE;
812 stackgap_ctl(struct thread *td, struct proc *p, void *data)
816 PROC_LOCK_ASSERT(p, MA_OWNED);
817 state = *(int *)data;
819 if ((state & ~(PROC_STACKGAP_ENABLE | PROC_STACKGAP_DISABLE |
820 PROC_STACKGAP_ENABLE_EXEC | PROC_STACKGAP_DISABLE_EXEC)) != 0)
822 switch (state & (PROC_STACKGAP_ENABLE | PROC_STACKGAP_DISABLE)) {
823 case PROC_STACKGAP_ENABLE:
824 if ((p->p_flag2 & P2_STKGAP_DISABLE) != 0)
827 case PROC_STACKGAP_DISABLE:
828 p->p_flag2 |= P2_STKGAP_DISABLE;
835 switch (state & (PROC_STACKGAP_ENABLE_EXEC |
836 PROC_STACKGAP_DISABLE_EXEC)) {
837 case PROC_STACKGAP_ENABLE_EXEC:
838 p->p_flag2 &= ~P2_STKGAP_DISABLE_EXEC;
840 case PROC_STACKGAP_DISABLE_EXEC:
841 p->p_flag2 |= P2_STKGAP_DISABLE_EXEC;
852 stackgap_status(struct thread *td, struct proc *p, void *data)
856 PROC_LOCK_ASSERT(p, MA_OWNED);
858 d = (p->p_flag2 & P2_STKGAP_DISABLE) != 0 ? PROC_STACKGAP_DISABLE :
859 PROC_STACKGAP_ENABLE;
860 d |= (p->p_flag2 & P2_STKGAP_DISABLE_EXEC) != 0 ?
861 PROC_STACKGAP_DISABLE_EXEC : PROC_STACKGAP_ENABLE_EXEC;
867 wxmap_ctl(struct thread *td, struct proc *p, void *data)
873 PROC_LOCK_ASSERT(p, MA_OWNED);
874 if ((p->p_flag & P_WEXIT) != 0)
876 state = *(int *)data;
879 case PROC_WX_MAPPINGS_PERMIT:
880 p->p_flag2 |= P2_WXORX_DISABLE;
883 vm = vmspace_acquire_ref(p);
887 map->flags &= ~MAP_WXORX;
894 case PROC_WX_MAPPINGS_DISALLOW_EXEC:
895 p->p_flag2 |= P2_WXORX_ENABLE_EXEC;
905 wxmap_status(struct thread *td, struct proc *p, void *data)
910 PROC_LOCK_ASSERT(p, MA_OWNED);
911 if ((p->p_flag & P_WEXIT) != 0)
915 if ((p->p_flag2 & P2_WXORX_DISABLE) != 0)
916 d |= PROC_WX_MAPPINGS_PERMIT;
917 if ((p->p_flag2 & P2_WXORX_ENABLE_EXEC) != 0)
918 d |= PROC_WX_MAPPINGS_DISALLOW_EXEC;
921 vm = vmspace_acquire_ref(p);
923 if ((vm->vm_map.flags & MAP_WXORX) != 0)
924 d |= PROC_WXORX_ENFORCE;
934 pdeathsig_ctl(struct thread *td, struct proc *p, void *data)
938 signum = *(int *)data;
939 if (p != td->td_proc || (signum != 0 && !_SIG_VALID(signum)))
941 p->p_pdeathsig = signum;
946 pdeathsig_status(struct thread *td, struct proc *p, void *data)
948 if (p != td->td_proc)
950 *(int *)data = p->p_pdeathsig;
960 struct procctl_cmd_info {
963 bool esrch_is_einval : 1;
964 bool copyout_on_error : 1;
965 bool no_nonnull_data : 1;
966 bool need_candebug : 1;
969 int (*exec)(struct thread *, struct proc *, void *);
970 bool (*sapblk)(struct thread *, void *);
972 static const struct procctl_cmd_info procctl_cmds_info[] = {
974 { .lock_tree = PCTL_SLOCKED, .one_proc = false,
975 .esrch_is_einval = false, .no_nonnull_data = false,
976 .need_candebug = false,
977 .copyin_sz = sizeof(int), .copyout_sz = 0,
978 .exec = protect_set, .copyout_on_error = false, },
979 [PROC_REAP_ACQUIRE] =
980 { .lock_tree = PCTL_XLOCKED, .one_proc = true,
981 .esrch_is_einval = false, .no_nonnull_data = true,
982 .need_candebug = false,
983 .copyin_sz = 0, .copyout_sz = 0,
984 .exec = reap_acquire, .copyout_on_error = false, },
985 [PROC_REAP_RELEASE] =
986 { .lock_tree = PCTL_XLOCKED, .one_proc = true,
987 .esrch_is_einval = false, .no_nonnull_data = true,
988 .need_candebug = false,
989 .copyin_sz = 0, .copyout_sz = 0,
990 .exec = reap_release, .copyout_on_error = false, },
992 { .lock_tree = PCTL_SLOCKED, .one_proc = true,
993 .esrch_is_einval = false, .no_nonnull_data = false,
994 .need_candebug = false,
996 .copyout_sz = sizeof(struct procctl_reaper_status),
997 .exec = reap_status, .copyout_on_error = false, },
998 [PROC_REAP_GETPIDS] =
999 { .lock_tree = PCTL_SLOCKED, .one_proc = true,
1000 .esrch_is_einval = false, .no_nonnull_data = false,
1001 .need_candebug = false,
1002 .copyin_sz = sizeof(struct procctl_reaper_pids),
1004 .exec = reap_getpids, .copyout_on_error = false, },
1006 { .lock_tree = PCTL_SLOCKED, .one_proc = true,
1007 .esrch_is_einval = false, .no_nonnull_data = false,
1008 .need_candebug = false,
1009 .copyin_sz = sizeof(struct procctl_reaper_kill),
1010 .copyout_sz = sizeof(struct procctl_reaper_kill),
1011 .exec = reap_kill, .copyout_on_error = true,
1012 .sapblk = reap_kill_sapblk, },
1014 { .lock_tree = PCTL_SLOCKED, .one_proc = false,
1015 .esrch_is_einval = false, .no_nonnull_data = false,
1016 .need_candebug = true,
1017 .copyin_sz = sizeof(int), .copyout_sz = 0,
1018 .exec = trace_ctl, .copyout_on_error = false, },
1019 [PROC_TRACE_STATUS] =
1020 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1021 .esrch_is_einval = false, .no_nonnull_data = false,
1022 .need_candebug = false,
1023 .copyin_sz = 0, .copyout_sz = sizeof(int),
1024 .exec = trace_status, .copyout_on_error = false, },
1025 [PROC_TRAPCAP_CTL] =
1026 { .lock_tree = PCTL_SLOCKED, .one_proc = false,
1027 .esrch_is_einval = false, .no_nonnull_data = false,
1028 .need_candebug = true,
1029 .copyin_sz = sizeof(int), .copyout_sz = 0,
1030 .exec = trapcap_ctl, .copyout_on_error = false, },
1031 [PROC_TRAPCAP_STATUS] =
1032 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1033 .esrch_is_einval = false, .no_nonnull_data = false,
1034 .need_candebug = false,
1035 .copyin_sz = 0, .copyout_sz = sizeof(int),
1036 .exec = trapcap_status, .copyout_on_error = false, },
1037 [PROC_PDEATHSIG_CTL] =
1038 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1039 .esrch_is_einval = true, .no_nonnull_data = false,
1040 .need_candebug = false,
1041 .copyin_sz = sizeof(int), .copyout_sz = 0,
1042 .exec = pdeathsig_ctl, .copyout_on_error = false, },
1043 [PROC_PDEATHSIG_STATUS] =
1044 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1045 .esrch_is_einval = true, .no_nonnull_data = false,
1046 .need_candebug = false,
1047 .copyin_sz = 0, .copyout_sz = sizeof(int),
1048 .exec = pdeathsig_status, .copyout_on_error = false, },
1050 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1051 .esrch_is_einval = false, .no_nonnull_data = false,
1052 .need_candebug = true,
1053 .copyin_sz = sizeof(int), .copyout_sz = 0,
1054 .exec = aslr_ctl, .copyout_on_error = false, },
1055 [PROC_ASLR_STATUS] =
1056 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1057 .esrch_is_einval = false, .no_nonnull_data = false,
1058 .need_candebug = false,
1059 .copyin_sz = 0, .copyout_sz = sizeof(int),
1060 .exec = aslr_status, .copyout_on_error = false, },
1061 [PROC_PROTMAX_CTL] =
1062 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1063 .esrch_is_einval = false, .no_nonnull_data = false,
1064 .need_candebug = true,
1065 .copyin_sz = sizeof(int), .copyout_sz = 0,
1066 .exec = protmax_ctl, .copyout_on_error = false, },
1067 [PROC_PROTMAX_STATUS] =
1068 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1069 .esrch_is_einval = false, .no_nonnull_data = false,
1070 .need_candebug = false,
1071 .copyin_sz = 0, .copyout_sz = sizeof(int),
1072 .exec = protmax_status, .copyout_on_error = false, },
1073 [PROC_STACKGAP_CTL] =
1074 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1075 .esrch_is_einval = false, .no_nonnull_data = false,
1076 .need_candebug = true,
1077 .copyin_sz = sizeof(int), .copyout_sz = 0,
1078 .exec = stackgap_ctl, .copyout_on_error = false, },
1079 [PROC_STACKGAP_STATUS] =
1080 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1081 .esrch_is_einval = false, .no_nonnull_data = false,
1082 .need_candebug = false,
1083 .copyin_sz = 0, .copyout_sz = sizeof(int),
1084 .exec = stackgap_status, .copyout_on_error = false, },
1085 [PROC_NO_NEW_PRIVS_CTL] =
1086 { .lock_tree = PCTL_SLOCKED, .one_proc = true,
1087 .esrch_is_einval = false, .no_nonnull_data = false,
1088 .need_candebug = true,
1089 .copyin_sz = sizeof(int), .copyout_sz = 0,
1090 .exec = no_new_privs_ctl, .copyout_on_error = false, },
1091 [PROC_NO_NEW_PRIVS_STATUS] =
1092 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1093 .esrch_is_einval = false, .no_nonnull_data = false,
1094 .need_candebug = false,
1095 .copyin_sz = 0, .copyout_sz = sizeof(int),
1096 .exec = no_new_privs_status, .copyout_on_error = false, },
1098 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1099 .esrch_is_einval = false, .no_nonnull_data = false,
1100 .need_candebug = true,
1101 .copyin_sz = sizeof(int), .copyout_sz = 0,
1102 .exec = wxmap_ctl, .copyout_on_error = false, },
1103 [PROC_WXMAP_STATUS] =
1104 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1105 .esrch_is_einval = false, .no_nonnull_data = false,
1106 .need_candebug = false,
1107 .copyin_sz = 0, .copyout_sz = sizeof(int),
1108 .exec = wxmap_status, .copyout_on_error = false, },
1112 sys_procctl(struct thread *td, struct procctl_args *uap)
1115 struct procctl_reaper_status rs;
1116 struct procctl_reaper_pids rp;
1117 struct procctl_reaper_kill rk;
1120 const struct procctl_cmd_info *cmd_info;
1123 if (uap->com >= PROC_PROCCTL_MD_MIN)
1124 return (cpu_procctl(td, uap->idtype, uap->id,
1125 uap->com, uap->data));
1126 if (uap->com == 0 || uap->com >= nitems(procctl_cmds_info))
1128 cmd_info = &procctl_cmds_info[uap->com];
1129 bzero(&x, sizeof(x));
1131 if (cmd_info->copyin_sz > 0) {
1132 error = copyin(uap->data, &x, cmd_info->copyin_sz);
1135 } else if (cmd_info->no_nonnull_data && uap->data != NULL) {
1139 error = kern_procctl(td, uap->idtype, uap->id, uap->com, &x);
1141 if (cmd_info->copyout_sz > 0 && (error == 0 ||
1142 cmd_info->copyout_on_error)) {
1143 error1 = copyout(&x, uap->data, cmd_info->copyout_sz);
1151 kern_procctl_single(struct thread *td, struct proc *p, int com, void *data)
1154 PROC_LOCK_ASSERT(p, MA_OWNED);
1155 return (procctl_cmds_info[com].exec(td, p, data));
1159 kern_procctl(struct thread *td, idtype_t idtype, id_t id, int com, void *data)
1163 const struct procctl_cmd_info *cmd_info;
1164 int error, first_error, ok;
1167 MPASS(com > 0 && com < nitems(procctl_cmds_info));
1168 cmd_info = &procctl_cmds_info[com];
1169 if (idtype != P_PID && cmd_info->one_proc)
1173 if (cmd_info->sapblk != NULL) {
1174 sapblk = cmd_info->sapblk(td, data);
1175 if (sapblk && !stop_all_proc_block())
1179 switch (cmd_info->lock_tree) {
1181 sx_xlock(&proctree_lock);
1184 sx_slock(&proctree_lock);
1199 error = cmd_info->esrch_is_einval ?
1203 error = cmd_info->need_candebug ? p_candebug(td, p) :
1207 error = kern_procctl_single(td, p, com, data);
1212 * Attempt to apply the operation to all members of the
1213 * group. Ignore processes in the group that can't be
1214 * seen. Ignore errors so long as at least one process is
1215 * able to complete the request successfully.
1225 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
1227 if (p->p_state == PRS_NEW ||
1228 p->p_state == PRS_ZOMBIE ||
1229 (cmd_info->need_candebug ? p_candebug(td, p) :
1230 p_cansee(td, p)) != 0) {
1234 error = kern_procctl_single(td, p, com, data);
1238 else if (first_error == 0)
1239 first_error = error;
1243 else if (first_error != 0)
1244 error = first_error;
1247 * Was not able to see any processes in the
1257 switch (cmd_info->lock_tree) {
1259 sx_xunlock(&proctree_lock);
1262 sx_sunlock(&proctree_lock);
1268 stop_all_proc_unblock();