2 * Copyright (c) 2014 John Baldwin
3 * Copyright (c) 2014, 2016 The FreeBSD Foundation
5 * Portions of this software were developed by Konstantin Belousov
6 * under sponsorship from the FreeBSD Foundation.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/capsicum.h>
38 #include <sys/mutex.h>
41 #include <sys/procctl.h>
43 #include <sys/syscallsubr.h>
44 #include <sys/sysproto.h>
49 #include <vm/vm_map.h>
50 #include <vm/vm_extern.h>
53 protect_setchild(struct thread *td, struct proc *p, int flags)
56 PROC_LOCK_ASSERT(p, MA_OWNED);
57 if (p->p_flag & P_SYSTEM || p_cansched(td, p) != 0)
59 if (flags & PPROT_SET) {
60 p->p_flag |= P_PROTECTED;
61 if (flags & PPROT_INHERIT)
62 p->p_flag2 |= P2_INHERIT_PROTECTED;
64 p->p_flag &= ~P_PROTECTED;
65 p->p_flag2 &= ~P2_INHERIT_PROTECTED;
71 protect_setchildren(struct thread *td, struct proc *top, int flags)
78 sx_assert(&proctree_lock, SX_LOCKED);
80 ret |= protect_setchild(td, p, flags);
83 * If this process has children, descend to them next,
84 * otherwise do any siblings, and if done with this level,
85 * follow back up the tree (but not past top).
87 if (!LIST_EMPTY(&p->p_children))
88 p = LIST_FIRST(&p->p_children);
94 if (LIST_NEXT(p, p_sibling)) {
95 p = LIST_NEXT(p, p_sibling);
105 protect_set(struct thread *td, struct proc *p, void *data)
107 int error, flags, ret;
109 flags = *(int *)data;
110 switch (PPROT_OP(flags)) {
118 if ((PPROT_FLAGS(flags) & ~(PPROT_DESCEND | PPROT_INHERIT)) != 0)
121 error = priv_check(td, PRIV_VM_MADV_PROTECT);
125 if (flags & PPROT_DESCEND)
126 ret = protect_setchildren(td, p, flags);
128 ret = protect_setchild(td, p, flags);
135 reap_acquire(struct thread *td, struct proc *p, void *data __unused)
138 sx_assert(&proctree_lock, SX_XLOCKED);
139 if (p != td->td_proc)
141 if ((p->p_treeflag & P_TREE_REAPER) != 0)
143 p->p_treeflag |= P_TREE_REAPER;
145 * We do not reattach existing children and the whole tree
146 * under them to us, since p->p_reaper already seen them.
152 reap_release(struct thread *td, struct proc *p, void *data __unused)
155 sx_assert(&proctree_lock, SX_XLOCKED);
156 if (p != td->td_proc)
160 if ((p->p_treeflag & P_TREE_REAPER) == 0)
162 reaper_abandon_children(p, false);
167 reap_status(struct thread *td, struct proc *p, void *data)
169 struct proc *reap, *p2, *first_p;
170 struct procctl_reaper_status *rs;
173 sx_assert(&proctree_lock, SX_LOCKED);
174 if ((p->p_treeflag & P_TREE_REAPER) == 0) {
178 rs->rs_flags |= REAPER_STATUS_OWNED;
180 if (reap == initproc)
181 rs->rs_flags |= REAPER_STATUS_REALINIT;
182 rs->rs_reaper = reap->p_pid;
183 rs->rs_descendants = 0;
185 if (!LIST_EMPTY(&reap->p_reaplist)) {
186 first_p = LIST_FIRST(&reap->p_children);
188 first_p = LIST_FIRST(&reap->p_reaplist);
189 rs->rs_pid = first_p->p_pid;
190 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) {
191 if (proc_realparent(p2) == reap)
193 rs->rs_descendants++;
202 reap_getpids(struct thread *td, struct proc *p, void *data)
204 struct proc *reap, *p2;
205 struct procctl_reaper_pidinfo *pi, *pip;
206 struct procctl_reaper_pids *rp;
211 sx_assert(&proctree_lock, SX_LOCKED);
213 reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p;
216 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling)
218 sx_unlock(&proctree_lock);
219 if (rp->rp_count < n)
221 pi = malloc(n * sizeof(*pi), M_TEMP, M_WAITOK);
222 sx_slock(&proctree_lock);
223 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) {
227 bzero(pip, sizeof(*pip));
228 pip->pi_pid = p2->p_pid;
229 pip->pi_subtree = p2->p_reapsubtree;
230 pip->pi_flags = REAPER_PIDINFO_VALID;
231 if (proc_realparent(p2) == reap)
232 pip->pi_flags |= REAPER_PIDINFO_CHILD;
233 if ((p2->p_treeflag & P_TREE_REAPER) != 0)
234 pip->pi_flags |= REAPER_PIDINFO_REAPER;
237 sx_sunlock(&proctree_lock);
238 error = copyout(pi, rp->rp_pids, i * sizeof(*pi));
240 sx_slock(&proctree_lock);
246 reap_kill_proc(struct thread *td, struct proc *p2, ksiginfo_t *ksi,
247 struct procctl_reaper_kill *rk, int *error)
252 error1 = p_cansignal(td, p2, rk->rk_sig);
254 pksignal(p2, rk->rk_sig, ksi);
257 } else if (*error == ESRCH) {
258 rk->rk_fpid = p2->p_pid;
264 struct reap_kill_tracker {
266 TAILQ_ENTRY(reap_kill_tracker) link;
269 TAILQ_HEAD(reap_kill_tracker_head, reap_kill_tracker);
272 reap_kill_sched(struct reap_kill_tracker_head *tracker, struct proc *p2)
274 struct reap_kill_tracker *t;
276 t = malloc(sizeof(struct reap_kill_tracker), M_TEMP, M_WAITOK);
278 TAILQ_INSERT_TAIL(tracker, t, link);
282 reap_kill(struct thread *td, struct proc *p, void *data)
284 struct proc *reap, *p2;
286 struct reap_kill_tracker_head tracker;
287 struct reap_kill_tracker *t;
288 struct procctl_reaper_kill *rk;
292 sx_assert(&proctree_lock, SX_LOCKED);
293 if (IN_CAPABILITY_MODE(td))
295 if (rk->rk_sig <= 0 || rk->rk_sig > _SIG_MAXSIG ||
296 (rk->rk_flags & ~(REAPER_KILL_CHILDREN |
297 REAPER_KILL_SUBTREE)) != 0 || (rk->rk_flags &
298 (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE)) ==
299 (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE))
302 reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p;
304 ksi.ksi_signo = rk->rk_sig;
305 ksi.ksi_code = SI_USER;
306 ksi.ksi_pid = td->td_proc->p_pid;
307 ksi.ksi_uid = td->td_ucred->cr_ruid;
311 if ((rk->rk_flags & REAPER_KILL_CHILDREN) != 0) {
312 for (p2 = LIST_FIRST(&reap->p_children); p2 != NULL;
313 p2 = LIST_NEXT(p2, p_sibling)) {
314 reap_kill_proc(td, p2, &ksi, rk, &error);
316 * Do not end the loop on error, signal
321 TAILQ_INIT(&tracker);
322 reap_kill_sched(&tracker, reap);
323 while ((t = TAILQ_FIRST(&tracker)) != NULL) {
324 MPASS((t->parent->p_treeflag & P_TREE_REAPER) != 0);
325 TAILQ_REMOVE(&tracker, t, link);
326 for (p2 = LIST_FIRST(&t->parent->p_reaplist); p2 != NULL;
327 p2 = LIST_NEXT(p2, p_reapsibling)) {
328 if (t->parent == reap &&
329 (rk->rk_flags & REAPER_KILL_SUBTREE) != 0 &&
330 p2->p_reapsubtree != rk->rk_subtree)
332 if ((p2->p_treeflag & P_TREE_REAPER) != 0)
333 reap_kill_sched(&tracker, p2);
334 reap_kill_proc(td, p2, &ksi, rk, &error);
344 trace_ctl(struct thread *td, struct proc *p, void *data)
348 PROC_LOCK_ASSERT(p, MA_OWNED);
349 state = *(int *)data;
352 * Ktrace changes p_traceflag from or to zero under the
353 * process lock, so the test does not need to acquire ktrace
356 if ((p->p_flag & P_TRACED) != 0 || p->p_traceflag != 0)
360 case PROC_TRACE_CTL_ENABLE:
361 if (td->td_proc != p)
363 p->p_flag2 &= ~(P2_NOTRACE | P2_NOTRACE_EXEC);
365 case PROC_TRACE_CTL_DISABLE_EXEC:
366 p->p_flag2 |= P2_NOTRACE_EXEC | P2_NOTRACE;
368 case PROC_TRACE_CTL_DISABLE:
369 if ((p->p_flag2 & P2_NOTRACE_EXEC) != 0) {
370 KASSERT((p->p_flag2 & P2_NOTRACE) != 0,
371 ("dandling P2_NOTRACE_EXEC"));
372 if (td->td_proc != p)
374 p->p_flag2 &= ~P2_NOTRACE_EXEC;
376 p->p_flag2 |= P2_NOTRACE;
386 trace_status(struct thread *td, struct proc *p, void *data)
391 if ((p->p_flag2 & P2_NOTRACE) != 0) {
392 KASSERT((p->p_flag & P_TRACED) == 0,
393 ("%d traced but tracing disabled", p->p_pid));
395 } else if ((p->p_flag & P_TRACED) != 0) {
396 *status = p->p_pptr->p_pid;
404 trapcap_ctl(struct thread *td, struct proc *p, void *data)
408 PROC_LOCK_ASSERT(p, MA_OWNED);
409 state = *(int *)data;
412 case PROC_TRAPCAP_CTL_ENABLE:
413 p->p_flag2 |= P2_TRAPCAP;
415 case PROC_TRAPCAP_CTL_DISABLE:
416 p->p_flag2 &= ~P2_TRAPCAP;
425 trapcap_status(struct thread *td, struct proc *p, void *data)
430 *status = (p->p_flag2 & P2_TRAPCAP) != 0 ? PROC_TRAPCAP_CTL_ENABLE :
431 PROC_TRAPCAP_CTL_DISABLE;
436 no_new_privs_ctl(struct thread *td, struct proc *p, void *data)
440 PROC_LOCK_ASSERT(p, MA_OWNED);
441 state = *(int *)data;
443 if (state != PROC_NO_NEW_PRIVS_ENABLE)
445 p->p_flag2 |= P2_NO_NEW_PRIVS;
450 no_new_privs_status(struct thread *td, struct proc *p, void *data)
453 *(int *)data = (p->p_flag2 & P2_NO_NEW_PRIVS) != 0 ?
454 PROC_NO_NEW_PRIVS_ENABLE : PROC_NO_NEW_PRIVS_DISABLE;
459 protmax_ctl(struct thread *td, struct proc *p, void *data)
463 PROC_LOCK_ASSERT(p, MA_OWNED);
464 state = *(int *)data;
467 case PROC_PROTMAX_FORCE_ENABLE:
468 p->p_flag2 &= ~P2_PROTMAX_DISABLE;
469 p->p_flag2 |= P2_PROTMAX_ENABLE;
471 case PROC_PROTMAX_FORCE_DISABLE:
472 p->p_flag2 |= P2_PROTMAX_DISABLE;
473 p->p_flag2 &= ~P2_PROTMAX_ENABLE;
475 case PROC_PROTMAX_NOFORCE:
476 p->p_flag2 &= ~(P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE);
485 protmax_status(struct thread *td, struct proc *p, void *data)
489 switch (p->p_flag2 & (P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE)) {
491 d = PROC_PROTMAX_NOFORCE;
493 case P2_PROTMAX_ENABLE:
494 d = PROC_PROTMAX_FORCE_ENABLE;
496 case P2_PROTMAX_DISABLE:
497 d = PROC_PROTMAX_FORCE_DISABLE;
500 if (kern_mmap_maxprot(p, PROT_READ) == PROT_READ)
501 d |= PROC_PROTMAX_ACTIVE;
507 aslr_ctl(struct thread *td, struct proc *p, void *data)
511 PROC_LOCK_ASSERT(p, MA_OWNED);
512 state = *(int *)data;
515 case PROC_ASLR_FORCE_ENABLE:
516 p->p_flag2 &= ~P2_ASLR_DISABLE;
517 p->p_flag2 |= P2_ASLR_ENABLE;
519 case PROC_ASLR_FORCE_DISABLE:
520 p->p_flag2 |= P2_ASLR_DISABLE;
521 p->p_flag2 &= ~P2_ASLR_ENABLE;
523 case PROC_ASLR_NOFORCE:
524 p->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE);
533 aslr_status(struct thread *td, struct proc *p, void *data)
538 switch (p->p_flag2 & (P2_ASLR_ENABLE | P2_ASLR_DISABLE)) {
540 d = PROC_ASLR_NOFORCE;
543 d = PROC_ASLR_FORCE_ENABLE;
545 case P2_ASLR_DISABLE:
546 d = PROC_ASLR_FORCE_DISABLE;
549 if ((p->p_flag & P_WEXIT) == 0) {
552 vm = vmspace_acquire_ref(p);
554 if ((vm->vm_map.flags & MAP_ASLR) != 0)
555 d |= PROC_ASLR_ACTIVE;
566 stackgap_ctl(struct thread *td, struct proc *p, void *data)
570 PROC_LOCK_ASSERT(p, MA_OWNED);
571 state = *(int *)data;
573 if ((state & ~(PROC_STACKGAP_ENABLE | PROC_STACKGAP_DISABLE |
574 PROC_STACKGAP_ENABLE_EXEC | PROC_STACKGAP_DISABLE_EXEC)) != 0)
576 switch (state & (PROC_STACKGAP_ENABLE | PROC_STACKGAP_DISABLE)) {
577 case PROC_STACKGAP_ENABLE:
578 if ((p->p_flag2 & P2_STKGAP_DISABLE) != 0)
581 case PROC_STACKGAP_DISABLE:
582 p->p_flag2 |= P2_STKGAP_DISABLE;
589 switch (state & (PROC_STACKGAP_ENABLE_EXEC |
590 PROC_STACKGAP_DISABLE_EXEC)) {
591 case PROC_STACKGAP_ENABLE_EXEC:
592 p->p_flag2 &= ~P2_STKGAP_DISABLE_EXEC;
594 case PROC_STACKGAP_DISABLE_EXEC:
595 p->p_flag2 |= P2_STKGAP_DISABLE_EXEC;
606 stackgap_status(struct thread *td, struct proc *p, void *data)
610 PROC_LOCK_ASSERT(p, MA_OWNED);
612 d = (p->p_flag2 & P2_STKGAP_DISABLE) != 0 ? PROC_STACKGAP_DISABLE :
613 PROC_STACKGAP_ENABLE;
614 d |= (p->p_flag2 & P2_STKGAP_DISABLE_EXEC) != 0 ?
615 PROC_STACKGAP_DISABLE_EXEC : PROC_STACKGAP_ENABLE_EXEC;
621 wxmap_ctl(struct thread *td, struct proc *p, void *data)
627 PROC_LOCK_ASSERT(p, MA_OWNED);
628 if ((p->p_flag & P_WEXIT) != 0)
630 state = *(int *)data;
633 case PROC_WX_MAPPINGS_PERMIT:
634 p->p_flag2 |= P2_WXORX_DISABLE;
637 vm = vmspace_acquire_ref(p);
641 map->flags &= ~MAP_WXORX;
648 case PROC_WX_MAPPINGS_DISALLOW_EXEC:
649 p->p_flag2 |= P2_WXORX_ENABLE_EXEC;
659 wxmap_status(struct thread *td, struct proc *p, void *data)
664 PROC_LOCK_ASSERT(p, MA_OWNED);
665 if ((p->p_flag & P_WEXIT) != 0)
669 if ((p->p_flag2 & P2_WXORX_DISABLE) != 0)
670 d |= PROC_WX_MAPPINGS_PERMIT;
671 if ((p->p_flag2 & P2_WXORX_ENABLE_EXEC) != 0)
672 d |= PROC_WX_MAPPINGS_DISALLOW_EXEC;
675 vm = vmspace_acquire_ref(p);
677 if ((vm->vm_map.flags & MAP_WXORX) != 0)
678 d |= PROC_WXORX_ENFORCE;
688 pdeathsig_ctl(struct thread *td, struct proc *p, void *data)
692 signum = *(int *)data;
693 if (p != td->td_proc || (signum != 0 && !_SIG_VALID(signum)))
695 p->p_pdeathsig = signum;
700 pdeathsig_status(struct thread *td, struct proc *p, void *data)
702 if (p != td->td_proc)
704 *(int *)data = p->p_pdeathsig;
714 struct procctl_cmd_info {
717 bool esrch_is_einval : 1;
718 bool copyout_on_error : 1;
719 bool no_nonnull_data : 1;
720 bool need_candebug : 1;
723 int (*exec)(struct thread *, struct proc *, void *);
725 static const struct procctl_cmd_info procctl_cmds_info[] = {
727 { .lock_tree = PCTL_SLOCKED, .one_proc = false,
728 .esrch_is_einval = false, .no_nonnull_data = false,
729 .need_candebug = false,
730 .copyin_sz = sizeof(int), .copyout_sz = 0,
731 .exec = protect_set, .copyout_on_error = false, },
732 [PROC_REAP_ACQUIRE] =
733 { .lock_tree = PCTL_XLOCKED, .one_proc = true,
734 .esrch_is_einval = false, .no_nonnull_data = true,
735 .need_candebug = false,
736 .copyin_sz = 0, .copyout_sz = 0,
737 .exec = reap_acquire, .copyout_on_error = false, },
738 [PROC_REAP_RELEASE] =
739 { .lock_tree = PCTL_XLOCKED, .one_proc = true,
740 .esrch_is_einval = false, .no_nonnull_data = true,
741 .need_candebug = false,
742 .copyin_sz = 0, .copyout_sz = 0,
743 .exec = reap_release, .copyout_on_error = false, },
745 { .lock_tree = PCTL_SLOCKED, .one_proc = true,
746 .esrch_is_einval = false, .no_nonnull_data = false,
747 .need_candebug = false,
749 .copyout_sz = sizeof(struct procctl_reaper_status),
750 .exec = reap_status, .copyout_on_error = false, },
751 [PROC_REAP_GETPIDS] =
752 { .lock_tree = PCTL_SLOCKED, .one_proc = true,
753 .esrch_is_einval = false, .no_nonnull_data = false,
754 .need_candebug = false,
755 .copyin_sz = sizeof(struct procctl_reaper_pids),
757 .exec = reap_getpids, .copyout_on_error = false, },
759 { .lock_tree = PCTL_SLOCKED, .one_proc = true,
760 .esrch_is_einval = false, .no_nonnull_data = false,
761 .need_candebug = false,
762 .copyin_sz = sizeof(struct procctl_reaper_kill),
763 .copyout_sz = sizeof(struct procctl_reaper_kill),
764 .exec = reap_kill, .copyout_on_error = true, },
766 { .lock_tree = PCTL_SLOCKED, .one_proc = false,
767 .esrch_is_einval = false, .no_nonnull_data = false,
768 .need_candebug = true,
769 .copyin_sz = sizeof(int), .copyout_sz = 0,
770 .exec = trace_ctl, .copyout_on_error = false, },
771 [PROC_TRACE_STATUS] =
772 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
773 .esrch_is_einval = false, .no_nonnull_data = false,
774 .need_candebug = false,
775 .copyin_sz = 0, .copyout_sz = sizeof(int),
776 .exec = trace_status, .copyout_on_error = false, },
778 { .lock_tree = PCTL_SLOCKED, .one_proc = false,
779 .esrch_is_einval = false, .no_nonnull_data = false,
780 .need_candebug = true,
781 .copyin_sz = sizeof(int), .copyout_sz = 0,
782 .exec = trapcap_ctl, .copyout_on_error = false, },
783 [PROC_TRAPCAP_STATUS] =
784 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
785 .esrch_is_einval = false, .no_nonnull_data = false,
786 .need_candebug = false,
787 .copyin_sz = 0, .copyout_sz = sizeof(int),
788 .exec = trapcap_status, .copyout_on_error = false, },
789 [PROC_PDEATHSIG_CTL] =
790 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
791 .esrch_is_einval = true, .no_nonnull_data = false,
792 .need_candebug = false,
793 .copyin_sz = sizeof(int), .copyout_sz = 0,
794 .exec = pdeathsig_ctl, .copyout_on_error = false, },
795 [PROC_PDEATHSIG_STATUS] =
796 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
797 .esrch_is_einval = true, .no_nonnull_data = false,
798 .need_candebug = false,
799 .copyin_sz = 0, .copyout_sz = sizeof(int),
800 .exec = pdeathsig_status, .copyout_on_error = false, },
802 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
803 .esrch_is_einval = false, .no_nonnull_data = false,
804 .need_candebug = true,
805 .copyin_sz = sizeof(int), .copyout_sz = 0,
806 .exec = aslr_ctl, .copyout_on_error = false, },
808 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
809 .esrch_is_einval = false, .no_nonnull_data = false,
810 .need_candebug = false,
811 .copyin_sz = 0, .copyout_sz = sizeof(int),
812 .exec = aslr_status, .copyout_on_error = false, },
814 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
815 .esrch_is_einval = false, .no_nonnull_data = false,
816 .need_candebug = true,
817 .copyin_sz = sizeof(int), .copyout_sz = 0,
818 .exec = protmax_ctl, .copyout_on_error = false, },
819 [PROC_PROTMAX_STATUS] =
820 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
821 .esrch_is_einval = false, .no_nonnull_data = false,
822 .need_candebug = false,
823 .copyin_sz = 0, .copyout_sz = sizeof(int),
824 .exec = protmax_status, .copyout_on_error = false, },
825 [PROC_STACKGAP_CTL] =
826 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
827 .esrch_is_einval = false, .no_nonnull_data = false,
828 .need_candebug = true,
829 .copyin_sz = sizeof(int), .copyout_sz = 0,
830 .exec = stackgap_ctl, .copyout_on_error = false, },
831 [PROC_STACKGAP_STATUS] =
832 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
833 .esrch_is_einval = false, .no_nonnull_data = false,
834 .need_candebug = false,
835 .copyin_sz = 0, .copyout_sz = sizeof(int),
836 .exec = stackgap_status, .copyout_on_error = false, },
837 [PROC_NO_NEW_PRIVS_CTL] =
838 { .lock_tree = PCTL_SLOCKED, .one_proc = true,
839 .esrch_is_einval = false, .no_nonnull_data = false,
840 .need_candebug = true,
841 .copyin_sz = sizeof(int), .copyout_sz = 0,
842 .exec = no_new_privs_ctl, .copyout_on_error = false, },
843 [PROC_NO_NEW_PRIVS_STATUS] =
844 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
845 .esrch_is_einval = false, .no_nonnull_data = false,
846 .need_candebug = false,
847 .copyin_sz = 0, .copyout_sz = sizeof(int),
848 .exec = no_new_privs_status, .copyout_on_error = false, },
850 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
851 .esrch_is_einval = false, .no_nonnull_data = false,
852 .need_candebug = true,
853 .copyin_sz = sizeof(int), .copyout_sz = 0,
854 .exec = wxmap_ctl, .copyout_on_error = false, },
855 [PROC_WXMAP_STATUS] =
856 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
857 .esrch_is_einval = false, .no_nonnull_data = false,
858 .need_candebug = false,
859 .copyin_sz = 0, .copyout_sz = sizeof(int),
860 .exec = wxmap_status, .copyout_on_error = false, },
864 sys_procctl(struct thread *td, struct procctl_args *uap)
867 struct procctl_reaper_status rs;
868 struct procctl_reaper_pids rp;
869 struct procctl_reaper_kill rk;
872 const struct procctl_cmd_info *cmd_info;
875 if (uap->com >= PROC_PROCCTL_MD_MIN)
876 return (cpu_procctl(td, uap->idtype, uap->id,
877 uap->com, uap->data));
878 if (uap->com == 0 || uap->com >= nitems(procctl_cmds_info))
880 cmd_info = &procctl_cmds_info[uap->com];
881 bzero(&x, sizeof(x));
883 if (cmd_info->copyin_sz > 0) {
884 error = copyin(uap->data, &x, cmd_info->copyin_sz);
887 } else if (cmd_info->no_nonnull_data && uap->data != NULL) {
891 error = kern_procctl(td, uap->idtype, uap->id, uap->com, &x);
893 if (cmd_info->copyout_sz > 0 && (error == 0 ||
894 cmd_info->copyout_on_error)) {
895 error1 = copyout(&x, uap->data, cmd_info->copyout_sz);
903 kern_procctl_single(struct thread *td, struct proc *p, int com, void *data)
906 PROC_LOCK_ASSERT(p, MA_OWNED);
907 return (procctl_cmds_info[com].exec(td, p, data));
911 kern_procctl(struct thread *td, idtype_t idtype, id_t id, int com, void *data)
915 const struct procctl_cmd_info *cmd_info;
916 int error, first_error, ok;
918 MPASS(com > 0 && com < nitems(procctl_cmds_info));
919 cmd_info = &procctl_cmds_info[com];
920 if (idtype != P_PID && cmd_info->one_proc)
923 switch (cmd_info->lock_tree) {
925 sx_xlock(&proctree_lock);
928 sx_slock(&proctree_lock);
943 error = cmd_info->esrch_is_einval ?
947 error = cmd_info->need_candebug ? p_candebug(td, p) :
951 error = kern_procctl_single(td, p, com, data);
956 * Attempt to apply the operation to all members of the
957 * group. Ignore processes in the group that can't be
958 * seen. Ignore errors so long as at least one process is
959 * able to complete the request successfully.
969 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
971 if (p->p_state == PRS_NEW ||
972 p->p_state == PRS_ZOMBIE ||
973 (cmd_info->need_candebug ? p_candebug(td, p) :
974 p_cansee(td, p)) != 0) {
978 error = kern_procctl_single(td, p, com, data);
982 else if (first_error == 0)
987 else if (first_error != 0)
991 * Was not able to see any processes in the
1001 switch (cmd_info->lock_tree) {
1003 sx_xunlock(&proctree_lock);
1006 sx_sunlock(&proctree_lock);