2 * Copyright (c) 2014 John Baldwin
3 * Copyright (c) 2014, 2016 The FreeBSD Foundation
5 * Portions of this software were developed by Konstantin Belousov
6 * under sponsorship from the FreeBSD Foundation.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include "opt_ktrace.h"
32 #include <sys/param.h>
33 #include <sys/_unrhdr.h>
34 #include <sys/systm.h>
35 #include <sys/capsicum.h>
38 #include <sys/mutex.h>
41 #include <sys/procctl.h>
43 #include <sys/syscallsubr.h>
44 #include <sys/sysproto.h>
45 #include <sys/taskqueue.h>
50 #include <vm/vm_map.h>
51 #include <vm/vm_extern.h>
54 protect_setchild(struct thread *td, struct proc *p, int flags)
57 PROC_LOCK_ASSERT(p, MA_OWNED);
58 if (p->p_flag & P_SYSTEM || p_cansched(td, p) != 0)
60 if (flags & PPROT_SET) {
61 p->p_flag |= P_PROTECTED;
62 if (flags & PPROT_INHERIT)
63 p->p_flag2 |= P2_INHERIT_PROTECTED;
65 p->p_flag &= ~P_PROTECTED;
66 p->p_flag2 &= ~P2_INHERIT_PROTECTED;
72 protect_setchildren(struct thread *td, struct proc *top, int flags)
79 sx_assert(&proctree_lock, SX_LOCKED);
81 ret |= protect_setchild(td, p, flags);
84 * If this process has children, descend to them next,
85 * otherwise do any siblings, and if done with this level,
86 * follow back up the tree (but not past top).
88 if (!LIST_EMPTY(&p->p_children))
89 p = LIST_FIRST(&p->p_children);
95 if (LIST_NEXT(p, p_sibling)) {
96 p = LIST_NEXT(p, p_sibling);
106 protect_set(struct thread *td, struct proc *p, void *data)
108 int error, flags, ret;
110 flags = *(int *)data;
111 switch (PPROT_OP(flags)) {
119 if ((PPROT_FLAGS(flags) & ~(PPROT_DESCEND | PPROT_INHERIT)) != 0)
122 error = priv_check(td, PRIV_VM_MADV_PROTECT);
126 if (flags & PPROT_DESCEND)
127 ret = protect_setchildren(td, p, flags);
129 ret = protect_setchild(td, p, flags);
136 reap_acquire(struct thread *td, struct proc *p, void *data __unused)
139 sx_assert(&proctree_lock, SX_XLOCKED);
140 if (p != td->td_proc)
142 if ((p->p_treeflag & P_TREE_REAPER) != 0)
144 p->p_treeflag |= P_TREE_REAPER;
146 * We do not reattach existing children and the whole tree
147 * under them to us, since p->p_reaper already seen them.
153 reap_release(struct thread *td, struct proc *p, void *data __unused)
156 sx_assert(&proctree_lock, SX_XLOCKED);
157 if (p != td->td_proc)
161 if ((p->p_treeflag & P_TREE_REAPER) == 0)
163 reaper_abandon_children(p, false);
168 reap_status(struct thread *td, struct proc *p, void *data)
170 struct proc *reap, *p2, *first_p;
171 struct procctl_reaper_status *rs;
174 sx_assert(&proctree_lock, SX_LOCKED);
175 if ((p->p_treeflag & P_TREE_REAPER) == 0) {
179 rs->rs_flags |= REAPER_STATUS_OWNED;
181 if (reap == initproc)
182 rs->rs_flags |= REAPER_STATUS_REALINIT;
183 rs->rs_reaper = reap->p_pid;
184 rs->rs_descendants = 0;
186 if (!LIST_EMPTY(&reap->p_reaplist)) {
187 first_p = LIST_FIRST(&reap->p_children);
189 first_p = LIST_FIRST(&reap->p_reaplist);
190 rs->rs_pid = first_p->p_pid;
191 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) {
192 if (proc_realparent(p2) == reap)
194 rs->rs_descendants++;
203 reap_getpids(struct thread *td, struct proc *p, void *data)
205 struct proc *reap, *p2;
206 struct procctl_reaper_pidinfo *pi, *pip;
207 struct procctl_reaper_pids *rp;
212 sx_assert(&proctree_lock, SX_LOCKED);
214 reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p;
217 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling)
219 sx_unlock(&proctree_lock);
220 if (rp->rp_count < n)
222 pi = malloc(n * sizeof(*pi), M_TEMP, M_WAITOK);
223 sx_slock(&proctree_lock);
224 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) {
228 bzero(pip, sizeof(*pip));
229 pip->pi_pid = p2->p_pid;
230 pip->pi_subtree = p2->p_reapsubtree;
231 pip->pi_flags = REAPER_PIDINFO_VALID;
232 if (proc_realparent(p2) == reap)
233 pip->pi_flags |= REAPER_PIDINFO_CHILD;
234 if ((p2->p_treeflag & P_TREE_REAPER) != 0)
235 pip->pi_flags |= REAPER_PIDINFO_REAPER;
236 if ((p2->p_flag & P_STOPPED) != 0)
237 pip->pi_flags |= REAPER_PIDINFO_STOPPED;
238 if (p2->p_state == PRS_ZOMBIE)
239 pip->pi_flags |= REAPER_PIDINFO_ZOMBIE;
240 else if ((p2->p_flag & P_WEXIT) != 0)
241 pip->pi_flags |= REAPER_PIDINFO_EXITING;
244 sx_sunlock(&proctree_lock);
245 error = copyout(pi, rp->rp_pids, i * sizeof(*pi));
247 sx_slock(&proctree_lock);
252 struct reap_kill_proc_work {
256 struct procctl_reaper_kill *rk;
262 reap_kill_proc_locked(struct reap_kill_proc_work *w)
267 PROC_LOCK_ASSERT(w->target, MA_OWNED);
268 PROC_ASSERT_HELD(w->target);
270 error1 = cr_cansignal(w->cr, w->target, w->rk->rk_sig);
272 if (*w->error == ESRCH) {
273 w->rk->rk_fpid = w->target->p_pid;
280 * The need_stop indicates if the target process needs to be
281 * suspended before being signalled. This is needed when we
282 * guarantee that all processes in subtree are signalled,
283 * avoiding the race with some process not yet fully linked
284 * into all structures during fork, ignored by iterator, and
285 * then escaping signalling.
287 * The thread cannot usefully stop itself anyway, and if other
288 * thread of the current process forks while the current
289 * thread signals the whole subtree, it is an application
292 if ((w->target->p_flag & (P_KPROC | P_SYSTEM | P_STOPPED)) == 0)
293 need_stop = thread_single(w->target, SINGLE_ALLPROC) == 0;
297 (void)pksignal(w->target, w->rk->rk_sig, w->ksi);
302 thread_single_end(w->target, SINGLE_ALLPROC);
306 reap_kill_proc_work(void *arg, int pending __unused)
308 struct reap_kill_proc_work *w;
311 PROC_LOCK(w->target);
312 if ((w->target->p_flag2 & P2_WEXIT) == 0)
313 reap_kill_proc_locked(w);
314 PROC_UNLOCK(w->target);
316 sx_xlock(&proctree_lock);
319 sx_xunlock(&proctree_lock);
322 struct reap_kill_tracker {
324 TAILQ_ENTRY(reap_kill_tracker) link;
327 TAILQ_HEAD(reap_kill_tracker_head, reap_kill_tracker);
330 reap_kill_sched(struct reap_kill_tracker_head *tracker, struct proc *p2)
332 struct reap_kill_tracker *t;
335 if ((p2->p_flag2 & P2_WEXIT) != 0) {
341 t = malloc(sizeof(struct reap_kill_tracker), M_TEMP, M_WAITOK);
343 TAILQ_INSERT_TAIL(tracker, t, link);
347 reap_kill_sched_free(struct reap_kill_tracker *t)
354 reap_kill_children(struct thread *td, struct proc *reaper,
355 struct procctl_reaper_kill *rk, ksiginfo_t *ksi, int *error)
360 LIST_FOREACH(p2, &reaper->p_children, p_sibling) {
362 if ((p2->p_flag2 & P2_WEXIT) == 0) {
363 error1 = p_cansignal(td, p2, rk->rk_sig);
365 if (*error == ESRCH) {
366 rk->rk_fpid = p2->p_pid;
371 * Do not end the loop on error,
372 * signal everything we can.
375 (void)pksignal(p2, rk->rk_sig, ksi);
384 reap_kill_subtree_once(struct thread *td, struct proc *p, struct proc *reaper,
385 struct unrhdr *pids, struct reap_kill_proc_work *w)
387 struct reap_kill_tracker_head tracker;
388 struct reap_kill_tracker *t;
394 TAILQ_INIT(&tracker);
395 reap_kill_sched(&tracker, reaper);
396 while ((t = TAILQ_FIRST(&tracker)) != NULL) {
397 TAILQ_REMOVE(&tracker, t, link);
400 * Since reap_kill_proc() drops proctree_lock sx, it
401 * is possible that the tracked reaper is no longer.
402 * In this case the subtree is reparented to the new
403 * reaper, which should handle it.
405 if ((t->parent->p_treeflag & P_TREE_REAPER) == 0) {
406 reap_kill_sched_free(t);
411 LIST_FOREACH(p2, &t->parent->p_reaplist, p_reapsibling) {
412 if (t->parent == reaper &&
413 (w->rk->rk_flags & REAPER_KILL_SUBTREE) != 0 &&
414 p2->p_reapsubtree != w->rk->rk_subtree)
416 if ((p2->p_treeflag & P_TREE_REAPER) != 0)
417 reap_kill_sched(&tracker, p2);
420 * Handle possible pid reuse. If we recorded
421 * p2 as killed but its p_flag2 does not
422 * confirm it, that means that the process
423 * terminated and its id was reused by other
424 * process in the reaper subtree.
426 * Unlocked read of p2->p_flag2 is fine, it is
427 * our thread that set the tested flag.
429 if (alloc_unr_specific(pids, p2->p_pid) != p2->p_pid &&
430 (atomic_load_int(&p2->p_flag2) &
431 (P2_REAPKILLED | P2_WEXIT)) != 0)
434 if (p2 == td->td_proc) {
435 if ((p2->p_flag & P_HADTHREADS) != 0 &&
436 (p2->p_flag2 & P2_WEXIT) == 0) {
437 xlocked = sx_xlocked(&proctree_lock);
438 sx_unlock(&proctree_lock);
445 * sapblk ensures that only one thread
446 * in the system sets this flag.
448 p2->p_flag2 |= P2_REAPKILLED;
450 r = thread_single(p2, SINGLE_NO_EXIT);
451 (void)pksignal(p2, w->rk->rk_sig, w->ksi);
454 thread_single_end(p2, SINGLE_NO_EXIT);
458 sx_xlock(&proctree_lock);
460 sx_slock(&proctree_lock);
464 if ((p2->p_flag2 & P2_WEXIT) == 0) {
466 p2->p_flag2 |= P2_REAPKILLED;
469 taskqueue_enqueue(taskqueue_thread,
471 while (w->target != NULL) {
473 &proctree_lock, PWAIT,
483 reap_kill_sched_free(t);
489 reap_kill_subtree(struct thread *td, struct proc *p, struct proc *reaper,
490 struct reap_kill_proc_work *w)
498 * pids records processes which were already signalled, to
499 * avoid doubling signals to them if iteration needs to be
502 init_unrhdr(&pids, 1, PID_MAX, UNR_NO_MTX);
503 PROC_LOCK(td->td_proc);
504 if ((td->td_proc->p_flag2 & P2_WEXIT) != 0) {
505 PROC_UNLOCK(td->td_proc);
508 PROC_UNLOCK(td->td_proc);
509 while (reap_kill_subtree_once(td, p, reaper, &pids, w))
512 ihandle = create_iter_unr(&pids);
513 while ((pid = next_iter_unr(ihandle)) != -1) {
516 p2->p_flag2 &= ~P2_REAPKILLED;
520 free_iter_unr(ihandle);
528 reap_kill_sapblk(struct thread *td __unused, void *data)
530 struct procctl_reaper_kill *rk;
533 return ((rk->rk_flags & REAPER_KILL_CHILDREN) == 0);
537 reap_kill(struct thread *td, struct proc *p, void *data)
539 struct reap_kill_proc_work w;
542 struct procctl_reaper_kill *rk;
546 sx_assert(&proctree_lock, SX_LOCKED);
548 ktrcapfail(CAPFAIL_SIGNAL, &rk->rk_sig);
549 if (IN_CAPABILITY_MODE(td))
551 if (rk->rk_sig <= 0 || rk->rk_sig > _SIG_MAXSIG ||
552 (rk->rk_flags & ~(REAPER_KILL_CHILDREN |
553 REAPER_KILL_SUBTREE)) != 0 || (rk->rk_flags &
554 (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE)) ==
555 (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE))
558 reaper = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p;
560 ksi.ksi_signo = rk->rk_sig;
561 ksi.ksi_code = SI_USER;
562 ksi.ksi_pid = td->td_proc->p_pid;
563 ksi.ksi_uid = td->td_ucred->cr_ruid;
567 if ((rk->rk_flags & REAPER_KILL_CHILDREN) != 0) {
568 reap_kill_children(td, reaper, rk, &ksi, &error);
570 w.cr = crhold(td->td_ucred);
574 TASK_INIT(&w.t, 0, reap_kill_proc_work, &w);
577 * Prevent swapout, since w, ksi, and possibly rk, are
578 * allocated on the stack. We sleep in
579 * reap_kill_subtree_once() waiting for task to
580 * complete single-threading.
584 reap_kill_subtree(td, p, reaper, &w);
593 trace_ctl(struct thread *td, struct proc *p, void *data)
597 PROC_LOCK_ASSERT(p, MA_OWNED);
598 state = *(int *)data;
601 * Ktrace changes p_traceflag from or to zero under the
602 * process lock, so the test does not need to acquire ktrace
605 if ((p->p_flag & P_TRACED) != 0 || p->p_traceflag != 0)
609 case PROC_TRACE_CTL_ENABLE:
610 if (td->td_proc != p)
612 p->p_flag2 &= ~(P2_NOTRACE | P2_NOTRACE_EXEC);
614 case PROC_TRACE_CTL_DISABLE_EXEC:
615 p->p_flag2 |= P2_NOTRACE_EXEC | P2_NOTRACE;
617 case PROC_TRACE_CTL_DISABLE:
618 if ((p->p_flag2 & P2_NOTRACE_EXEC) != 0) {
619 KASSERT((p->p_flag2 & P2_NOTRACE) != 0,
620 ("dandling P2_NOTRACE_EXEC"));
621 if (td->td_proc != p)
623 p->p_flag2 &= ~P2_NOTRACE_EXEC;
625 p->p_flag2 |= P2_NOTRACE;
635 trace_status(struct thread *td, struct proc *p, void *data)
640 if ((p->p_flag2 & P2_NOTRACE) != 0) {
641 KASSERT((p->p_flag & P_TRACED) == 0,
642 ("%d traced but tracing disabled", p->p_pid));
644 } else if ((p->p_flag & P_TRACED) != 0) {
645 *status = p->p_pptr->p_pid;
653 trapcap_ctl(struct thread *td, struct proc *p, void *data)
657 PROC_LOCK_ASSERT(p, MA_OWNED);
658 state = *(int *)data;
661 case PROC_TRAPCAP_CTL_ENABLE:
662 p->p_flag2 |= P2_TRAPCAP;
664 case PROC_TRAPCAP_CTL_DISABLE:
665 p->p_flag2 &= ~P2_TRAPCAP;
674 trapcap_status(struct thread *td, struct proc *p, void *data)
679 *status = (p->p_flag2 & P2_TRAPCAP) != 0 ? PROC_TRAPCAP_CTL_ENABLE :
680 PROC_TRAPCAP_CTL_DISABLE;
685 no_new_privs_ctl(struct thread *td, struct proc *p, void *data)
689 PROC_LOCK_ASSERT(p, MA_OWNED);
690 state = *(int *)data;
692 if (state != PROC_NO_NEW_PRIVS_ENABLE)
694 p->p_flag2 |= P2_NO_NEW_PRIVS;
699 no_new_privs_status(struct thread *td, struct proc *p, void *data)
702 *(int *)data = (p->p_flag2 & P2_NO_NEW_PRIVS) != 0 ?
703 PROC_NO_NEW_PRIVS_ENABLE : PROC_NO_NEW_PRIVS_DISABLE;
708 protmax_ctl(struct thread *td, struct proc *p, void *data)
712 PROC_LOCK_ASSERT(p, MA_OWNED);
713 state = *(int *)data;
716 case PROC_PROTMAX_FORCE_ENABLE:
717 p->p_flag2 &= ~P2_PROTMAX_DISABLE;
718 p->p_flag2 |= P2_PROTMAX_ENABLE;
720 case PROC_PROTMAX_FORCE_DISABLE:
721 p->p_flag2 |= P2_PROTMAX_DISABLE;
722 p->p_flag2 &= ~P2_PROTMAX_ENABLE;
724 case PROC_PROTMAX_NOFORCE:
725 p->p_flag2 &= ~(P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE);
734 protmax_status(struct thread *td, struct proc *p, void *data)
738 switch (p->p_flag2 & (P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE)) {
740 d = PROC_PROTMAX_NOFORCE;
742 case P2_PROTMAX_ENABLE:
743 d = PROC_PROTMAX_FORCE_ENABLE;
745 case P2_PROTMAX_DISABLE:
746 d = PROC_PROTMAX_FORCE_DISABLE;
749 if (kern_mmap_maxprot(p, PROT_READ) == PROT_READ)
750 d |= PROC_PROTMAX_ACTIVE;
756 aslr_ctl(struct thread *td, struct proc *p, void *data)
760 PROC_LOCK_ASSERT(p, MA_OWNED);
761 state = *(int *)data;
764 case PROC_ASLR_FORCE_ENABLE:
765 p->p_flag2 &= ~P2_ASLR_DISABLE;
766 p->p_flag2 |= P2_ASLR_ENABLE;
768 case PROC_ASLR_FORCE_DISABLE:
769 p->p_flag2 |= P2_ASLR_DISABLE;
770 p->p_flag2 &= ~P2_ASLR_ENABLE;
772 case PROC_ASLR_NOFORCE:
773 p->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE);
782 aslr_status(struct thread *td, struct proc *p, void *data)
787 switch (p->p_flag2 & (P2_ASLR_ENABLE | P2_ASLR_DISABLE)) {
789 d = PROC_ASLR_NOFORCE;
792 d = PROC_ASLR_FORCE_ENABLE;
794 case P2_ASLR_DISABLE:
795 d = PROC_ASLR_FORCE_DISABLE;
798 if ((p->p_flag & P_WEXIT) == 0) {
801 vm = vmspace_acquire_ref(p);
803 if ((vm->vm_map.flags & MAP_ASLR) != 0)
804 d |= PROC_ASLR_ACTIVE;
815 stackgap_ctl(struct thread *td, struct proc *p, void *data)
819 PROC_LOCK_ASSERT(p, MA_OWNED);
820 state = *(int *)data;
822 if ((state & ~(PROC_STACKGAP_ENABLE | PROC_STACKGAP_DISABLE |
823 PROC_STACKGAP_ENABLE_EXEC | PROC_STACKGAP_DISABLE_EXEC)) != 0)
825 switch (state & (PROC_STACKGAP_ENABLE | PROC_STACKGAP_DISABLE)) {
826 case PROC_STACKGAP_ENABLE:
827 if ((p->p_flag2 & P2_STKGAP_DISABLE) != 0)
830 case PROC_STACKGAP_DISABLE:
831 p->p_flag2 |= P2_STKGAP_DISABLE;
838 switch (state & (PROC_STACKGAP_ENABLE_EXEC |
839 PROC_STACKGAP_DISABLE_EXEC)) {
840 case PROC_STACKGAP_ENABLE_EXEC:
841 p->p_flag2 &= ~P2_STKGAP_DISABLE_EXEC;
843 case PROC_STACKGAP_DISABLE_EXEC:
844 p->p_flag2 |= P2_STKGAP_DISABLE_EXEC;
855 stackgap_status(struct thread *td, struct proc *p, void *data)
859 PROC_LOCK_ASSERT(p, MA_OWNED);
861 d = (p->p_flag2 & P2_STKGAP_DISABLE) != 0 ? PROC_STACKGAP_DISABLE :
862 PROC_STACKGAP_ENABLE;
863 d |= (p->p_flag2 & P2_STKGAP_DISABLE_EXEC) != 0 ?
864 PROC_STACKGAP_DISABLE_EXEC : PROC_STACKGAP_ENABLE_EXEC;
870 wxmap_ctl(struct thread *td, struct proc *p, void *data)
876 PROC_LOCK_ASSERT(p, MA_OWNED);
877 if ((p->p_flag & P_WEXIT) != 0)
879 state = *(int *)data;
882 case PROC_WX_MAPPINGS_PERMIT:
883 p->p_flag2 |= P2_WXORX_DISABLE;
886 vm = vmspace_acquire_ref(p);
890 map->flags &= ~MAP_WXORX;
897 case PROC_WX_MAPPINGS_DISALLOW_EXEC:
898 p->p_flag2 |= P2_WXORX_ENABLE_EXEC;
908 wxmap_status(struct thread *td, struct proc *p, void *data)
913 PROC_LOCK_ASSERT(p, MA_OWNED);
914 if ((p->p_flag & P_WEXIT) != 0)
918 if ((p->p_flag2 & P2_WXORX_DISABLE) != 0)
919 d |= PROC_WX_MAPPINGS_PERMIT;
920 if ((p->p_flag2 & P2_WXORX_ENABLE_EXEC) != 0)
921 d |= PROC_WX_MAPPINGS_DISALLOW_EXEC;
924 vm = vmspace_acquire_ref(p);
926 if ((vm->vm_map.flags & MAP_WXORX) != 0)
927 d |= PROC_WXORX_ENFORCE;
937 pdeathsig_ctl(struct thread *td, struct proc *p, void *data)
941 signum = *(int *)data;
942 if (p != td->td_proc || (signum != 0 && !_SIG_VALID(signum)))
944 p->p_pdeathsig = signum;
949 pdeathsig_status(struct thread *td, struct proc *p, void *data)
951 if (p != td->td_proc)
953 *(int *)data = p->p_pdeathsig;
963 struct procctl_cmd_info {
966 bool esrch_is_einval : 1;
967 bool copyout_on_error : 1;
968 bool no_nonnull_data : 1;
969 bool need_candebug : 1;
972 int (*exec)(struct thread *, struct proc *, void *);
973 bool (*sapblk)(struct thread *, void *);
975 static const struct procctl_cmd_info procctl_cmds_info[] = {
977 { .lock_tree = PCTL_SLOCKED, .one_proc = false,
978 .esrch_is_einval = false, .no_nonnull_data = false,
979 .need_candebug = false,
980 .copyin_sz = sizeof(int), .copyout_sz = 0,
981 .exec = protect_set, .copyout_on_error = false, },
982 [PROC_REAP_ACQUIRE] =
983 { .lock_tree = PCTL_XLOCKED, .one_proc = true,
984 .esrch_is_einval = false, .no_nonnull_data = true,
985 .need_candebug = false,
986 .copyin_sz = 0, .copyout_sz = 0,
987 .exec = reap_acquire, .copyout_on_error = false, },
988 [PROC_REAP_RELEASE] =
989 { .lock_tree = PCTL_XLOCKED, .one_proc = true,
990 .esrch_is_einval = false, .no_nonnull_data = true,
991 .need_candebug = false,
992 .copyin_sz = 0, .copyout_sz = 0,
993 .exec = reap_release, .copyout_on_error = false, },
995 { .lock_tree = PCTL_SLOCKED, .one_proc = true,
996 .esrch_is_einval = false, .no_nonnull_data = false,
997 .need_candebug = false,
999 .copyout_sz = sizeof(struct procctl_reaper_status),
1000 .exec = reap_status, .copyout_on_error = false, },
1001 [PROC_REAP_GETPIDS] =
1002 { .lock_tree = PCTL_SLOCKED, .one_proc = true,
1003 .esrch_is_einval = false, .no_nonnull_data = false,
1004 .need_candebug = false,
1005 .copyin_sz = sizeof(struct procctl_reaper_pids),
1007 .exec = reap_getpids, .copyout_on_error = false, },
1009 { .lock_tree = PCTL_SLOCKED, .one_proc = true,
1010 .esrch_is_einval = false, .no_nonnull_data = false,
1011 .need_candebug = false,
1012 .copyin_sz = sizeof(struct procctl_reaper_kill),
1013 .copyout_sz = sizeof(struct procctl_reaper_kill),
1014 .exec = reap_kill, .copyout_on_error = true,
1015 .sapblk = reap_kill_sapblk, },
1017 { .lock_tree = PCTL_SLOCKED, .one_proc = false,
1018 .esrch_is_einval = false, .no_nonnull_data = false,
1019 .need_candebug = true,
1020 .copyin_sz = sizeof(int), .copyout_sz = 0,
1021 .exec = trace_ctl, .copyout_on_error = false, },
1022 [PROC_TRACE_STATUS] =
1023 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1024 .esrch_is_einval = false, .no_nonnull_data = false,
1025 .need_candebug = false,
1026 .copyin_sz = 0, .copyout_sz = sizeof(int),
1027 .exec = trace_status, .copyout_on_error = false, },
1028 [PROC_TRAPCAP_CTL] =
1029 { .lock_tree = PCTL_SLOCKED, .one_proc = false,
1030 .esrch_is_einval = false, .no_nonnull_data = false,
1031 .need_candebug = true,
1032 .copyin_sz = sizeof(int), .copyout_sz = 0,
1033 .exec = trapcap_ctl, .copyout_on_error = false, },
1034 [PROC_TRAPCAP_STATUS] =
1035 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1036 .esrch_is_einval = false, .no_nonnull_data = false,
1037 .need_candebug = false,
1038 .copyin_sz = 0, .copyout_sz = sizeof(int),
1039 .exec = trapcap_status, .copyout_on_error = false, },
1040 [PROC_PDEATHSIG_CTL] =
1041 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1042 .esrch_is_einval = true, .no_nonnull_data = false,
1043 .need_candebug = false,
1044 .copyin_sz = sizeof(int), .copyout_sz = 0,
1045 .exec = pdeathsig_ctl, .copyout_on_error = false, },
1046 [PROC_PDEATHSIG_STATUS] =
1047 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1048 .esrch_is_einval = true, .no_nonnull_data = false,
1049 .need_candebug = false,
1050 .copyin_sz = 0, .copyout_sz = sizeof(int),
1051 .exec = pdeathsig_status, .copyout_on_error = false, },
1053 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1054 .esrch_is_einval = false, .no_nonnull_data = false,
1055 .need_candebug = true,
1056 .copyin_sz = sizeof(int), .copyout_sz = 0,
1057 .exec = aslr_ctl, .copyout_on_error = false, },
1058 [PROC_ASLR_STATUS] =
1059 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1060 .esrch_is_einval = false, .no_nonnull_data = false,
1061 .need_candebug = false,
1062 .copyin_sz = 0, .copyout_sz = sizeof(int),
1063 .exec = aslr_status, .copyout_on_error = false, },
1064 [PROC_PROTMAX_CTL] =
1065 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1066 .esrch_is_einval = false, .no_nonnull_data = false,
1067 .need_candebug = true,
1068 .copyin_sz = sizeof(int), .copyout_sz = 0,
1069 .exec = protmax_ctl, .copyout_on_error = false, },
1070 [PROC_PROTMAX_STATUS] =
1071 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1072 .esrch_is_einval = false, .no_nonnull_data = false,
1073 .need_candebug = false,
1074 .copyin_sz = 0, .copyout_sz = sizeof(int),
1075 .exec = protmax_status, .copyout_on_error = false, },
1076 [PROC_STACKGAP_CTL] =
1077 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1078 .esrch_is_einval = false, .no_nonnull_data = false,
1079 .need_candebug = true,
1080 .copyin_sz = sizeof(int), .copyout_sz = 0,
1081 .exec = stackgap_ctl, .copyout_on_error = false, },
1082 [PROC_STACKGAP_STATUS] =
1083 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1084 .esrch_is_einval = false, .no_nonnull_data = false,
1085 .need_candebug = false,
1086 .copyin_sz = 0, .copyout_sz = sizeof(int),
1087 .exec = stackgap_status, .copyout_on_error = false, },
1088 [PROC_NO_NEW_PRIVS_CTL] =
1089 { .lock_tree = PCTL_SLOCKED, .one_proc = true,
1090 .esrch_is_einval = false, .no_nonnull_data = false,
1091 .need_candebug = true,
1092 .copyin_sz = sizeof(int), .copyout_sz = 0,
1093 .exec = no_new_privs_ctl, .copyout_on_error = false, },
1094 [PROC_NO_NEW_PRIVS_STATUS] =
1095 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1096 .esrch_is_einval = false, .no_nonnull_data = false,
1097 .need_candebug = false,
1098 .copyin_sz = 0, .copyout_sz = sizeof(int),
1099 .exec = no_new_privs_status, .copyout_on_error = false, },
1101 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1102 .esrch_is_einval = false, .no_nonnull_data = false,
1103 .need_candebug = true,
1104 .copyin_sz = sizeof(int), .copyout_sz = 0,
1105 .exec = wxmap_ctl, .copyout_on_error = false, },
1106 [PROC_WXMAP_STATUS] =
1107 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1108 .esrch_is_einval = false, .no_nonnull_data = false,
1109 .need_candebug = false,
1110 .copyin_sz = 0, .copyout_sz = sizeof(int),
1111 .exec = wxmap_status, .copyout_on_error = false, },
1115 sys_procctl(struct thread *td, struct procctl_args *uap)
1118 struct procctl_reaper_status rs;
1119 struct procctl_reaper_pids rp;
1120 struct procctl_reaper_kill rk;
1123 const struct procctl_cmd_info *cmd_info;
1126 if (uap->com >= PROC_PROCCTL_MD_MIN)
1127 return (cpu_procctl(td, uap->idtype, uap->id,
1128 uap->com, uap->data));
1129 if (uap->com <= 0 || uap->com >= nitems(procctl_cmds_info))
1131 cmd_info = &procctl_cmds_info[uap->com];
1132 bzero(&x, sizeof(x));
1134 if (cmd_info->copyin_sz > 0) {
1135 error = copyin(uap->data, &x, cmd_info->copyin_sz);
1138 } else if (cmd_info->no_nonnull_data && uap->data != NULL) {
1142 error = kern_procctl(td, uap->idtype, uap->id, uap->com, &x);
1144 if (cmd_info->copyout_sz > 0 && (error == 0 ||
1145 cmd_info->copyout_on_error)) {
1146 error1 = copyout(&x, uap->data, cmd_info->copyout_sz);
1154 kern_procctl_single(struct thread *td, struct proc *p, int com, void *data)
1157 PROC_LOCK_ASSERT(p, MA_OWNED);
1158 return (procctl_cmds_info[com].exec(td, p, data));
1162 kern_procctl(struct thread *td, idtype_t idtype, id_t id, int com, void *data)
1166 const struct procctl_cmd_info *cmd_info;
1167 int error, first_error, ok;
1170 MPASS(com > 0 && com < nitems(procctl_cmds_info));
1171 cmd_info = &procctl_cmds_info[com];
1172 if (idtype != P_PID && cmd_info->one_proc)
1176 if (cmd_info->sapblk != NULL) {
1177 sapblk = cmd_info->sapblk(td, data);
1178 if (sapblk && !stop_all_proc_block())
1182 switch (cmd_info->lock_tree) {
1184 sx_xlock(&proctree_lock);
1187 sx_slock(&proctree_lock);
1202 error = cmd_info->esrch_is_einval ?
1206 error = cmd_info->need_candebug ? p_candebug(td, p) :
1210 error = kern_procctl_single(td, p, com, data);
1215 * Attempt to apply the operation to all members of the
1216 * group. Ignore processes in the group that can't be
1217 * seen. Ignore errors so long as at least one process is
1218 * able to complete the request successfully.
1228 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
1230 if (p->p_state == PRS_NEW ||
1231 p->p_state == PRS_ZOMBIE ||
1232 (cmd_info->need_candebug ? p_candebug(td, p) :
1233 p_cansee(td, p)) != 0) {
1237 error = kern_procctl_single(td, p, com, data);
1241 else if (first_error == 0)
1242 first_error = error;
1246 else if (first_error != 0)
1247 error = first_error;
1250 * Was not able to see any processes in the
1260 switch (cmd_info->lock_tree) {
1262 sx_xunlock(&proctree_lock);
1265 sx_sunlock(&proctree_lock);
1271 stop_all_proc_unblock();