2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * @(#)kern_proc.c 8.7 (Berkeley) 2/14/95
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include "opt_compat.h"
37 #include "opt_kdtrace.h"
38 #include "opt_ktrace.h"
39 #include "opt_kstack_pages.h"
40 #include "opt_stack.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
46 #include <sys/malloc.h>
47 #include <sys/mount.h>
48 #include <sys/mutex.h>
50 #include <sys/refcount.h>
52 #include <sys/sysent.h>
53 #include <sys/sched.h>
55 #include <sys/stack.h>
56 #include <sys/sysctl.h>
57 #include <sys/filedesc.h>
60 #include <sys/signalvar.h>
64 #include <sys/vnode.h>
65 #include <sys/eventhandler.h>
68 #include <sys/ktrace.h>
76 #include <vm/vm_extern.h>
78 #include <vm/vm_map.h>
79 #include <vm/vm_object.h>
82 SDT_PROVIDER_DEFINE(proc);
83 SDT_PROBE_DEFINE(proc, kernel, ctor, entry);
84 SDT_PROBE_ARGTYPE(proc, kernel, ctor, entry, 0, "struct proc *");
85 SDT_PROBE_ARGTYPE(proc, kernel, ctor, entry, 1, "int");
86 SDT_PROBE_ARGTYPE(proc, kernel, ctor, entry, 2, "void *");
87 SDT_PROBE_ARGTYPE(proc, kernel, ctor, entry, 3, "int");
88 SDT_PROBE_DEFINE(proc, kernel, ctor, return);
89 SDT_PROBE_ARGTYPE(proc, kernel, ctor, return, 0, "struct proc *");
90 SDT_PROBE_ARGTYPE(proc, kernel, ctor, return, 1, "int");
91 SDT_PROBE_ARGTYPE(proc, kernel, ctor, return, 2, "void *");
92 SDT_PROBE_ARGTYPE(proc, kernel, ctor, return, 3, "int");
93 SDT_PROBE_DEFINE(proc, kernel, dtor, entry);
94 SDT_PROBE_ARGTYPE(proc, kernel, dtor, entry, 0, "struct proc *");
95 SDT_PROBE_ARGTYPE(proc, kernel, dtor, entry, 1, "int");
96 SDT_PROBE_ARGTYPE(proc, kernel, dtor, entry, 2, "void *");
97 SDT_PROBE_ARGTYPE(proc, kernel, dtor, entry, 3, "struct thread *");
98 SDT_PROBE_DEFINE(proc, kernel, dtor, return);
99 SDT_PROBE_ARGTYPE(proc, kernel, dtor, return, 0, "struct proc *");
100 SDT_PROBE_ARGTYPE(proc, kernel, dtor, return, 1, "int");
101 SDT_PROBE_ARGTYPE(proc, kernel, dtor, return, 2, "void *");
102 SDT_PROBE_DEFINE(proc, kernel, init, entry);
103 SDT_PROBE_ARGTYPE(proc, kernel, init, entry, 0, "struct proc *");
104 SDT_PROBE_ARGTYPE(proc, kernel, init, entry, 1, "int");
105 SDT_PROBE_ARGTYPE(proc, kernel, init, entry, 2, "int");
106 SDT_PROBE_DEFINE(proc, kernel, init, return);
107 SDT_PROBE_ARGTYPE(proc, kernel, init, return, 0, "struct proc *");
108 SDT_PROBE_ARGTYPE(proc, kernel, init, return, 1, "int");
109 SDT_PROBE_ARGTYPE(proc, kernel, init, return, 2, "int");
111 MALLOC_DEFINE(M_PGRP, "pgrp", "process group header");
112 MALLOC_DEFINE(M_SESSION, "session", "session header");
113 static MALLOC_DEFINE(M_PROC, "proc", "Proc structures");
114 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures");
116 static void doenterpgrp(struct proc *, struct pgrp *);
117 static void orphanpg(struct pgrp *pg);
118 static void fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp);
119 static void fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp,
121 static void pgadjustjobc(struct pgrp *pgrp, int entering);
122 static void pgdelete(struct pgrp *);
123 static int proc_ctor(void *mem, int size, void *arg, int flags);
124 static void proc_dtor(void *mem, int size, void *arg);
125 static int proc_init(void *mem, int size, int flags);
126 static void proc_fini(void *mem, int size);
127 static void pargs_free(struct pargs *pa);
130 * Other process lists
132 struct pidhashhead *pidhashtbl;
134 struct pgrphashhead *pgrphashtbl;
136 struct proclist allproc;
137 struct proclist zombproc;
138 struct sx allproc_lock;
139 struct sx proctree_lock;
140 struct mtx ppeers_lock;
141 uma_zone_t proc_zone;
142 uma_zone_t ithread_zone;
144 int kstack_pages = KSTACK_PAGES;
145 SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0, "");
147 CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE);
150 * Initialize global process hashing structures.
156 sx_init(&allproc_lock, "allproc");
157 sx_init(&proctree_lock, "proctree");
158 mtx_init(&ppeers_lock, "p_peers", NULL, MTX_DEF);
160 LIST_INIT(&zombproc);
161 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
162 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
163 proc_zone = uma_zcreate("PROC", sched_sizeof_proc(),
164 proc_ctor, proc_dtor, proc_init, proc_fini,
165 UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
170 * Prepare a proc for use.
173 proc_ctor(void *mem, int size, void *arg, int flags)
177 p = (struct proc *)mem;
178 SDT_PROBE(proc, kernel, ctor, entry, p, size, arg, flags, 0);
179 EVENTHANDLER_INVOKE(process_ctor, p);
180 SDT_PROBE(proc, kernel, ctor, return, p, size, arg, flags, 0);
185 * Reclaim a proc after use.
188 proc_dtor(void *mem, int size, void *arg)
193 /* INVARIANTS checks go here */
194 p = (struct proc *)mem;
195 td = FIRST_THREAD_IN_PROC(p);
196 SDT_PROBE(proc, kernel, dtor, entry, p, size, arg, td, 0);
199 KASSERT((p->p_numthreads == 1),
200 ("bad number of threads in exiting process"));
201 KASSERT(STAILQ_EMPTY(&p->p_ktr), ("proc_dtor: non-empty p_ktr"));
204 /* Dispose of an alternate kstack, if it exists.
205 * XXX What if there are more than one thread in the proc?
206 * The first thread in the proc is special and not
207 * freed, so you gotta do this here.
209 if (((p->p_flag & P_KTHREAD) != 0) && (td->td_altkstack != 0))
210 vm_thread_dispose_altkstack(td);
212 EVENTHANDLER_INVOKE(process_dtor, p);
213 if (p->p_ksi != NULL)
214 KASSERT(! KSI_ONQ(p->p_ksi), ("SIGCHLD queue"));
215 SDT_PROBE(proc, kernel, dtor, return, p, size, arg, 0, 0);
219 * Initialize type-stable parts of a proc (when newly created).
222 proc_init(void *mem, int size, int flags)
226 p = (struct proc *)mem;
227 SDT_PROBE(proc, kernel, init, entry, p, size, flags, 0, 0);
228 p->p_sched = (struct p_sched *)&p[1];
229 bzero(&p->p_mtx, sizeof(struct mtx));
230 mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
231 mtx_init(&p->p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE);
232 TAILQ_INIT(&p->p_threads); /* all threads in proc */
233 EVENTHANDLER_INVOKE(process_init, p);
234 p->p_stats = pstats_alloc();
235 SDT_PROBE(proc, kernel, init, return, p, size, flags, 0, 0);
240 * UMA should ensure that this function is never called.
241 * Freeing a proc structure would violate type stability.
244 proc_fini(void *mem, int size)
249 p = (struct proc *)mem;
250 EVENTHANDLER_INVOKE(process_fini, p);
251 pstats_free(p->p_stats);
252 thread_free(FIRST_THREAD_IN_PROC(p));
253 mtx_destroy(&p->p_mtx);
254 if (p->p_ksi != NULL)
255 ksiginfo_free(p->p_ksi);
257 panic("proc reclaimed");
262 * Is p an inferior of the current process?
266 register struct proc *p;
269 sx_assert(&proctree_lock, SX_LOCKED);
270 for (; p != curproc; p = p->p_pptr)
277 * Locate a process by number; return only "live" processes -- i.e., neither
278 * zombies nor newly born but incompletely initialized processes. By not
279 * returning processes in the PRS_NEW state, we allow callers to avoid
280 * testing for that condition to avoid dereferencing p_ucred, et al.
286 register struct proc *p;
288 sx_slock(&allproc_lock);
289 LIST_FOREACH(p, PIDHASH(pid), p_hash)
290 if (p->p_pid == pid) {
291 if (p->p_state == PRS_NEW) {
298 sx_sunlock(&allproc_lock);
303 * Locate a process group by number.
304 * The caller must hold proctree_lock.
310 register struct pgrp *pgrp;
312 sx_assert(&proctree_lock, SX_LOCKED);
314 LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) {
315 if (pgrp->pg_id == pgid) {
324 * Create a new process group.
325 * pgid must be equal to the pid of p.
326 * Begin a new session if required.
329 enterpgrp(p, pgid, pgrp, sess)
330 register struct proc *p;
333 struct session *sess;
337 sx_assert(&proctree_lock, SX_XLOCKED);
339 KASSERT(pgrp != NULL, ("enterpgrp: pgrp == NULL"));
340 KASSERT(p->p_pid == pgid,
341 ("enterpgrp: new pgrp and pid != pgid"));
343 pgrp2 = pgfind(pgid);
345 KASSERT(pgrp2 == NULL,
346 ("enterpgrp: pgrp with pgid exists"));
347 KASSERT(!SESS_LEADER(p),
348 ("enterpgrp: session leader attempted setpgrp"));
350 mtx_init(&pgrp->pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK);
356 mtx_init(&sess->s_mtx, "session", NULL, MTX_DEF);
357 mtx_lock(&Giant); /* XXX TTY */
359 p->p_flag &= ~P_CONTROLT;
363 sess->s_sid = p->p_pid;
365 sess->s_ttyvp = NULL;
367 bcopy(p->p_session->s_login, sess->s_login,
368 sizeof(sess->s_login));
369 pgrp->pg_session = sess;
370 KASSERT(p == curproc,
371 ("enterpgrp: mksession and p != curproc"));
373 mtx_lock(&Giant); /* XXX TTY */
374 pgrp->pg_session = p->p_session;
375 SESS_LOCK(pgrp->pg_session);
376 pgrp->pg_session->s_count++;
377 SESS_UNLOCK(pgrp->pg_session);
381 LIST_INIT(&pgrp->pg_members);
384 * As we have an exclusive lock of proctree_lock,
385 * this should not deadlock.
387 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
389 SLIST_INIT(&pgrp->pg_sigiolst);
391 mtx_unlock(&Giant); /* XXX TTY */
393 doenterpgrp(p, pgrp);
399 * Move p to an existing process group
402 enterthispgrp(p, pgrp)
403 register struct proc *p;
407 sx_assert(&proctree_lock, SX_XLOCKED);
408 PROC_LOCK_ASSERT(p, MA_NOTOWNED);
409 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
410 PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
411 SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
412 KASSERT(pgrp->pg_session == p->p_session,
413 ("%s: pgrp's session %p, p->p_session %p.\n",
417 KASSERT(pgrp != p->p_pgrp,
418 ("%s: p belongs to pgrp.", __func__));
420 doenterpgrp(p, pgrp);
426 * Move p to a process group
433 struct pgrp *savepgrp;
435 sx_assert(&proctree_lock, SX_XLOCKED);
436 PROC_LOCK_ASSERT(p, MA_NOTOWNED);
437 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
438 PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
439 SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
441 savepgrp = p->p_pgrp;
444 * Adjust eligibility of affected pgrps to participate in job control.
445 * Increment eligibility counts before decrementing, otherwise we
446 * could reach 0 spuriously during the first call.
449 fixjobc(p, p->p_pgrp, 0);
451 mtx_lock(&Giant); /* XXX TTY */
455 LIST_REMOVE(p, p_pglist);
458 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist);
459 PGRP_UNLOCK(savepgrp);
461 mtx_unlock(&Giant); /* XXX TTY */
462 if (LIST_EMPTY(&savepgrp->pg_members))
467 * remove process from process group
471 register struct proc *p;
473 struct pgrp *savepgrp;
475 sx_assert(&proctree_lock, SX_XLOCKED);
476 savepgrp = p->p_pgrp;
477 mtx_lock(&Giant); /* XXX TTY */
480 LIST_REMOVE(p, p_pglist);
483 PGRP_UNLOCK(savepgrp);
484 mtx_unlock(&Giant); /* XXX TTY */
485 if (LIST_EMPTY(&savepgrp->pg_members))
491 * delete a process group
495 register struct pgrp *pgrp;
497 struct session *savesess;
499 sx_assert(&proctree_lock, SX_XLOCKED);
500 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
501 SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
504 * Reset any sigio structures pointing to us as a result of
505 * F_SETOWN with our pgid.
507 funsetownlst(&pgrp->pg_sigiolst);
509 mtx_lock(&Giant); /* XXX TTY */
511 if (pgrp->pg_session->s_ttyp != NULL &&
512 pgrp->pg_session->s_ttyp->t_pgrp == pgrp)
513 pgrp->pg_session->s_ttyp->t_pgrp = NULL;
514 LIST_REMOVE(pgrp, pg_hash);
515 savesess = pgrp->pg_session;
518 mtx_destroy(&pgrp->pg_mtx);
520 mtx_unlock(&Giant); /* XXX TTY */
524 pgadjustjobc(pgrp, entering)
534 if (pgrp->pg_jobc == 0)
541 * Adjust pgrp jobc counters when specified process changes process group.
542 * We count the number of processes in each process group that "qualify"
543 * the group for terminal job control (those with a parent in a different
544 * process group of the same session). If that count reaches zero, the
545 * process group becomes orphaned. Check both the specified process'
546 * process group and that of its children.
547 * entering == 0 => p is leaving specified group.
548 * entering == 1 => p is entering specified group.
551 fixjobc(p, pgrp, entering)
552 register struct proc *p;
553 register struct pgrp *pgrp;
556 register struct pgrp *hispgrp;
557 register struct session *mysession;
559 sx_assert(&proctree_lock, SX_LOCKED);
560 PROC_LOCK_ASSERT(p, MA_NOTOWNED);
561 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
562 SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
565 * Check p's parent to see whether p qualifies its own process
566 * group; if so, adjust count for p's process group.
568 mysession = pgrp->pg_session;
569 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp &&
570 hispgrp->pg_session == mysession)
571 pgadjustjobc(pgrp, entering);
574 * Check this process' children to see whether they qualify
575 * their process groups; if so, adjust counts for children's
578 LIST_FOREACH(p, &p->p_children, p_sibling) {
580 if (hispgrp == pgrp ||
581 hispgrp->pg_session != mysession)
584 if (p->p_state == PRS_ZOMBIE) {
589 pgadjustjobc(hispgrp, entering);
594 * A process group has become orphaned;
595 * if there are any stopped processes in the group,
596 * hang-up all process in that group.
602 register struct proc *p;
604 PGRP_LOCK_ASSERT(pg, MA_OWNED);
606 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
608 if (P_SHOULDSTOP(p)) {
610 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
623 sessrele(struct session *s)
631 if (s->s_ttyp != NULL)
633 mtx_destroy(&s->s_mtx);
642 DB_SHOW_COMMAND(pgrpdump, pgrpdump)
644 register struct pgrp *pgrp;
645 register struct proc *p;
648 for (i = 0; i <= pgrphash; i++) {
649 if (!LIST_EMPTY(&pgrphashtbl[i])) {
650 printf("\tindx %d\n", i);
651 LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) {
653 "\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n",
654 (void *)pgrp, (long)pgrp->pg_id,
655 (void *)pgrp->pg_session,
656 pgrp->pg_session->s_count,
657 (void *)LIST_FIRST(&pgrp->pg_members));
658 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
659 printf("\t\tpid %ld addr %p pgrp %p\n",
660 (long)p->p_pid, (void *)p,
670 * Clear kinfo_proc and fill in any information that is common
671 * to all threads in the process.
672 * Must be called with the target process locked.
675 fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp)
683 bzero(kp, sizeof(*kp));
685 kp->ki_structsize = sizeof(*kp);
687 PROC_LOCK_ASSERT(p, MA_OWNED);
688 kp->ki_addr =/* p->p_addr; */0; /* XXXKSE */
689 kp->ki_args = p->p_args;
690 kp->ki_textvp = p->p_textvp;
692 kp->ki_tracep = p->p_tracevp;
693 mtx_lock(&ktrace_mtx);
694 kp->ki_traceflag = p->p_traceflag;
695 mtx_unlock(&ktrace_mtx);
698 kp->ki_vmspace = p->p_vmspace;
699 kp->ki_flag = p->p_flag;
702 kp->ki_uid = cred->cr_uid;
703 kp->ki_ruid = cred->cr_ruid;
704 kp->ki_svuid = cred->cr_svuid;
705 /* XXX bde doesn't like KI_NGROUPS */
706 kp->ki_ngroups = min(cred->cr_ngroups, KI_NGROUPS);
707 bcopy(cred->cr_groups, kp->ki_groups,
708 kp->ki_ngroups * sizeof(gid_t));
709 kp->ki_rgid = cred->cr_rgid;
710 kp->ki_svgid = cred->cr_svgid;
711 /* If jailed(cred), emulate the old P_JAILED flag. */
713 kp->ki_flag |= P_JAILED;
714 /* If inside a jail, use 0 as a jail ID. */
715 if (!jailed(curthread->td_ucred))
716 kp->ki_jid = cred->cr_prison->pr_id;
721 mtx_lock(&ps->ps_mtx);
722 kp->ki_sigignore = ps->ps_sigignore;
723 kp->ki_sigcatch = ps->ps_sigcatch;
724 mtx_unlock(&ps->ps_mtx);
727 if (p->p_state != PRS_NEW &&
728 p->p_state != PRS_ZOMBIE &&
729 p->p_vmspace != NULL) {
730 struct vmspace *vm = p->p_vmspace;
732 kp->ki_size = vm->vm_map.size;
733 kp->ki_rssize = vmspace_resident_count(vm); /*XXX*/
734 FOREACH_THREAD_IN_PROC(p, td0) {
735 if (!TD_IS_SWAPPED(td0))
736 kp->ki_rssize += td0->td_kstack_pages;
737 if (td0->td_altkstack_obj != NULL)
738 kp->ki_rssize += td0->td_altkstack_pages;
740 kp->ki_swrss = vm->vm_swrss;
741 kp->ki_tsize = vm->vm_tsize;
742 kp->ki_dsize = vm->vm_dsize;
743 kp->ki_ssize = vm->vm_ssize;
744 } else if (p->p_state == PRS_ZOMBIE)
746 if (kp->ki_flag & P_INMEM)
747 kp->ki_sflag = PS_INMEM;
750 /* Calculate legacy swtime as seconds since 'swtick'. */
751 kp->ki_swtime = (ticks - p->p_swtick) / hz;
752 kp->ki_pid = p->p_pid;
753 kp->ki_nice = p->p_nice;
754 rufetch(p, &kp->ki_rusage);
755 kp->ki_runtime = cputick2usec(p->p_rux.rux_runtime);
757 if ((p->p_flag & P_INMEM) && p->p_stats != NULL) {
758 kp->ki_start = p->p_stats->p_start;
759 timevaladd(&kp->ki_start, &boottime);
761 calcru(p, &kp->ki_rusage.ru_utime, &kp->ki_rusage.ru_stime);
763 calccru(p, &kp->ki_childutime, &kp->ki_childstime);
765 /* Some callers want child-times in a single value */
766 kp->ki_childtime = kp->ki_childstime;
767 timevaladd(&kp->ki_childtime, &kp->ki_childutime);
771 kp->ki_pgid = p->p_pgrp->pg_id;
772 kp->ki_jobc = p->p_pgrp->pg_jobc;
773 sp = p->p_pgrp->pg_session;
776 kp->ki_sid = sp->s_sid;
778 strlcpy(kp->ki_login, sp->s_login,
779 sizeof(kp->ki_login));
781 kp->ki_kiflag |= KI_CTTY;
783 kp->ki_kiflag |= KI_SLEADER;
788 if ((p->p_flag & P_CONTROLT) && tp != NULL) {
789 kp->ki_tdev = dev2udev(tp->t_dev);
790 kp->ki_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
792 kp->ki_tsid = tp->t_session->s_sid;
795 if (p->p_comm[0] != '\0')
796 strlcpy(kp->ki_comm, p->p_comm, sizeof(kp->ki_comm));
797 if (p->p_sysent && p->p_sysent->sv_name != NULL &&
798 p->p_sysent->sv_name[0] != '\0')
799 strlcpy(kp->ki_emul, p->p_sysent->sv_name, sizeof(kp->ki_emul));
800 kp->ki_siglist = p->p_siglist;
801 kp->ki_xstat = p->p_xstat;
802 kp->ki_acflag = p->p_acflag;
803 kp->ki_lock = p->p_lock;
805 kp->ki_ppid = p->p_pptr->p_pid;
809 * Fill in information that is thread specific. Must be called with p_slock
810 * locked. If 'preferthread' is set, overwrite certain process-related
811 * fields that are maintained for both threads and processes.
814 fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp, int preferthread)
819 PROC_SLOCK_ASSERT(p, MA_OWNED);
822 if (td->td_wmesg != NULL)
823 strlcpy(kp->ki_wmesg, td->td_wmesg, sizeof(kp->ki_wmesg));
825 bzero(kp->ki_wmesg, sizeof(kp->ki_wmesg));
826 if (td->td_name[0] != '\0')
827 strlcpy(kp->ki_ocomm, td->td_name, sizeof(kp->ki_ocomm));
828 if (TD_ON_LOCK(td)) {
829 kp->ki_kiflag |= KI_LOCKBLOCK;
830 strlcpy(kp->ki_lockname, td->td_lockname,
831 sizeof(kp->ki_lockname));
833 kp->ki_kiflag &= ~KI_LOCKBLOCK;
834 bzero(kp->ki_lockname, sizeof(kp->ki_lockname));
837 if (p->p_state == PRS_NORMAL) { /* XXXKSE very approximate */
838 if (TD_ON_RUNQ(td) ||
842 } else if (P_SHOULDSTOP(p)) {
844 } else if (TD_IS_SLEEPING(td)) {
845 kp->ki_stat = SSLEEP;
846 } else if (TD_ON_LOCK(td)) {
851 } else if (p->p_state == PRS_ZOMBIE) {
857 /* Things in the thread */
858 kp->ki_wchan = td->td_wchan;
859 kp->ki_pri.pri_level = td->td_priority;
860 kp->ki_pri.pri_native = td->td_base_pri;
861 kp->ki_lastcpu = td->td_lastcpu;
862 kp->ki_oncpu = td->td_oncpu;
863 kp->ki_tdflags = td->td_flags;
864 kp->ki_tid = td->td_tid;
865 kp->ki_numthreads = p->p_numthreads;
866 kp->ki_pcb = td->td_pcb;
867 kp->ki_kstack = (void *)td->td_kstack;
868 kp->ki_pctcpu = sched_pctcpu(td);
869 kp->ki_estcpu = td->td_estcpu;
870 kp->ki_slptime = (ticks - td->td_slptick) / hz;
871 kp->ki_pri.pri_class = td->td_pri_class;
872 kp->ki_pri.pri_user = td->td_user_pri;
875 kp->ki_runtime = cputick2usec(td->td_runtime);
877 /* We can't get this anymore but ps etc never used it anyway. */
880 SIGSETOR(kp->ki_siglist, td->td_siglist);
881 kp->ki_sigmask = td->td_sigmask;
886 * Fill in a kinfo_proc structure for the specified process.
887 * Must be called with the target process locked.
890 fill_kinfo_proc(struct proc *p, struct kinfo_proc *kp)
893 fill_kinfo_proc_only(p, kp);
895 if (FIRST_THREAD_IN_PROC(p) != NULL)
896 fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), kp, 0);
904 return (malloc(sizeof(struct pstats), M_SUBPROC, M_ZERO|M_WAITOK));
908 * Copy parts of p_stats; zero the rest of p_stats (statistics).
911 pstats_fork(struct pstats *src, struct pstats *dst)
914 bzero(&dst->pstat_startzero,
915 __rangeof(struct pstats, pstat_startzero, pstat_endzero));
916 bcopy(&src->pstat_startcopy, &dst->pstat_startcopy,
917 __rangeof(struct pstats, pstat_startcopy, pstat_endcopy));
921 pstats_free(struct pstats *ps)
928 * Locate a zombie process by number
935 sx_slock(&allproc_lock);
936 LIST_FOREACH(p, &zombproc, p_list)
937 if (p->p_pid == pid) {
941 sx_sunlock(&allproc_lock);
945 #define KERN_PROC_ZOMBMASK 0x3
946 #define KERN_PROC_NOTHREADS 0x4
949 * Must be called with the process locked and will return with it unlocked.
952 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags)
955 struct kinfo_proc kinfo_proc;
958 pid_t pid = p->p_pid;
960 PROC_LOCK_ASSERT(p, MA_OWNED);
962 fill_kinfo_proc_only(p, &kinfo_proc);
963 if (flags & KERN_PROC_NOTHREADS) {
965 if (FIRST_THREAD_IN_PROC(p) != NULL)
966 fill_kinfo_thread(FIRST_THREAD_IN_PROC(p),
969 error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc,
973 if (FIRST_THREAD_IN_PROC(p) != NULL)
974 FOREACH_THREAD_IN_PROC(p, td) {
975 fill_kinfo_thread(td, &kinfo_proc, 1);
976 error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc,
982 error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc,
989 if (flags & KERN_PROC_ZOMBMASK)
1007 sysctl_kern_proc(SYSCTL_HANDLER_ARGS)
1009 int *name = (int*) arg1;
1010 u_int namelen = arg2;
1012 int flags, doingzomb, oid_number;
1015 oid_number = oidp->oid_number;
1016 if (oid_number != KERN_PROC_ALL &&
1017 (oid_number & KERN_PROC_INC_THREAD) == 0)
1018 flags = KERN_PROC_NOTHREADS;
1021 oid_number &= ~KERN_PROC_INC_THREAD;
1023 if (oid_number == KERN_PROC_PID) {
1026 error = sysctl_wire_old_buffer(req, 0);
1029 p = pfind((pid_t)name[0]);
1032 if ((error = p_cansee(curthread, p))) {
1036 error = sysctl_out_proc(p, req, flags);
1040 switch (oid_number) {
1045 case KERN_PROC_PROC:
1046 if (namelen != 0 && namelen != 1)
1056 /* overestimate by 5 procs */
1057 error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5);
1061 error = sysctl_wire_old_buffer(req, 0);
1064 sx_slock(&allproc_lock);
1065 for (doingzomb=0 ; doingzomb < 2 ; doingzomb++) {
1067 p = LIST_FIRST(&allproc);
1069 p = LIST_FIRST(&zombproc);
1070 for (; p != 0; p = LIST_NEXT(p, p_list)) {
1072 * Skip embryonic processes.
1075 if (p->p_state == PRS_NEW) {
1081 KASSERT(p->p_ucred != NULL,
1082 ("process credential is NULL for non-NEW proc"));
1084 * Show a user only appropriate processes.
1086 if (p_cansee(curthread, p)) {
1091 * TODO - make more efficient (see notes below).
1094 switch (oid_number) {
1097 if (p->p_ucred->cr_gid != (gid_t)name[0]) {
1103 case KERN_PROC_PGRP:
1104 /* could do this by traversing pgrp */
1105 if (p->p_pgrp == NULL ||
1106 p->p_pgrp->pg_id != (pid_t)name[0]) {
1112 case KERN_PROC_RGID:
1113 if (p->p_ucred->cr_rgid != (gid_t)name[0]) {
1119 case KERN_PROC_SESSION:
1120 if (p->p_session == NULL ||
1121 p->p_session->s_sid != (pid_t)name[0]) {
1128 if ((p->p_flag & P_CONTROLT) == 0 ||
1129 p->p_session == NULL) {
1133 SESS_LOCK(p->p_session);
1134 if (p->p_session->s_ttyp == NULL ||
1135 dev2udev(p->p_session->s_ttyp->t_dev) !=
1137 SESS_UNLOCK(p->p_session);
1141 SESS_UNLOCK(p->p_session);
1145 if (p->p_ucred->cr_uid != (uid_t)name[0]) {
1151 case KERN_PROC_RUID:
1152 if (p->p_ucred->cr_ruid != (uid_t)name[0]) {
1158 case KERN_PROC_PROC:
1166 error = sysctl_out_proc(p, req, flags | doingzomb);
1168 sx_sunlock(&allproc_lock);
1173 sx_sunlock(&allproc_lock);
1178 pargs_alloc(int len)
1182 MALLOC(pa, struct pargs *, sizeof(struct pargs) + len, M_PARGS,
1184 refcount_init(&pa->ar_ref, 1);
1185 pa->ar_length = len;
1190 pargs_free(struct pargs *pa)
1197 pargs_hold(struct pargs *pa)
1202 refcount_acquire(&pa->ar_ref);
1206 pargs_drop(struct pargs *pa)
1211 if (refcount_release(&pa->ar_ref))
1216 * This sysctl allows a process to retrieve the argument list or process
1217 * title for another process without groping around in the address space
1218 * of the other process. It also allow a process to set its own "process
1219 * title to a string of its own choice.
1222 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS)
1224 int *name = (int*) arg1;
1225 u_int namelen = arg2;
1226 struct pargs *newpa, *pa;
1233 p = pfind((pid_t)name[0]);
1237 if ((error = p_cansee(curthread, p)) != 0) {
1242 if (req->newptr && curproc != p) {
1250 if (req->oldptr != NULL && pa != NULL)
1251 error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length);
1253 if (error != 0 || req->newptr == NULL)
1256 if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit)
1258 newpa = pargs_alloc(req->newlen);
1259 error = SYSCTL_IN(req, newpa->ar_args, req->newlen);
1273 * This sysctl allows a process to retrieve the path of the executable for
1274 * itself or another process.
1277 sysctl_kern_proc_pathname(SYSCTL_HANDLER_ARGS)
1279 pid_t *pidp = (pid_t *)arg1;
1280 unsigned int arglen = arg2;
1283 char *retbuf, *freebuf;
1284 int error, vfslocked;
1288 if (*pidp == -1) { /* -1 means this process */
1289 p = req->td->td_proc;
1294 if ((error = p_cansee(curthread, p)) != 0) {
1309 error = vn_fullpath(req->td, vp, &retbuf, &freebuf);
1310 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1312 VFS_UNLOCK_GIANT(vfslocked);
1315 error = SYSCTL_OUT(req, retbuf, strlen(retbuf) + 1);
1316 free(freebuf, M_TEMP);
1321 sysctl_kern_proc_sv_name(SYSCTL_HANDLER_ARGS)
1334 if ((p = pfind((pid_t)name[0])) == NULL)
1336 if ((error = p_cansee(curthread, p))) {
1340 sv_name = p->p_sysent->sv_name;
1342 return (sysctl_handle_string(oidp, sv_name, 0, req));
1345 #ifdef KINFO_OVMENTRY_SIZE
1346 CTASSERT(sizeof(struct kinfo_ovmentry) == KINFO_OVMENTRY_SIZE);
1349 /* Compatability with early 7-stable */
1351 sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS)
1353 vm_map_entry_t entry, tmp_entry;
1354 unsigned int last_timestamp;
1355 char *fullpath, *freepath;
1356 struct kinfo_ovmentry *kve;
1366 if ((p = pfind((pid_t)name[0])) == NULL)
1368 if (p->p_flag & P_WEXIT) {
1372 if ((error = p_candebug(curthread, p))) {
1378 vm = vmspace_acquire_ref(p);
1383 kve = malloc(sizeof(*kve), M_TEMP, M_WAITOK);
1385 map = &p->p_vmspace->vm_map; /* XXXRW: More locking required? */
1386 vm_map_lock_read(map);
1387 for (entry = map->header.next; entry != &map->header;
1388 entry = entry->next) {
1389 vm_object_t obj, tobj, lobj;
1393 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
1396 bzero(kve, sizeof(*kve));
1397 kve->kve_structsize = sizeof(*kve);
1399 kve->kve_private_resident = 0;
1400 obj = entry->object.vm_object;
1402 VM_OBJECT_LOCK(obj);
1403 if (obj->shadow_count == 1)
1404 kve->kve_private_resident =
1405 obj->resident_page_count;
1407 kve->kve_resident = 0;
1408 addr = entry->start;
1409 while (addr < entry->end) {
1410 if (pmap_extract(map->pmap, addr))
1411 kve->kve_resident++;
1415 for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
1417 VM_OBJECT_LOCK(tobj);
1419 VM_OBJECT_UNLOCK(lobj);
1423 kve->kve_start = (void*)entry->start;
1424 kve->kve_end = (void*)entry->end;
1425 kve->kve_offset = (off_t)entry->offset;
1427 if (entry->protection & VM_PROT_READ)
1428 kve->kve_protection |= KVME_PROT_READ;
1429 if (entry->protection & VM_PROT_WRITE)
1430 kve->kve_protection |= KVME_PROT_WRITE;
1431 if (entry->protection & VM_PROT_EXECUTE)
1432 kve->kve_protection |= KVME_PROT_EXEC;
1434 if (entry->eflags & MAP_ENTRY_COW)
1435 kve->kve_flags |= KVME_FLAG_COW;
1436 if (entry->eflags & MAP_ENTRY_NEEDS_COPY)
1437 kve->kve_flags |= KVME_FLAG_NEEDS_COPY;
1439 last_timestamp = map->timestamp;
1440 vm_map_unlock_read(map);
1442 kve->kve_fileid = 0;
1448 switch (lobj->type) {
1450 kve->kve_type = KVME_TYPE_DEFAULT;
1453 kve->kve_type = KVME_TYPE_VNODE;
1458 kve->kve_type = KVME_TYPE_SWAP;
1461 kve->kve_type = KVME_TYPE_DEVICE;
1464 kve->kve_type = KVME_TYPE_PHYS;
1467 kve->kve_type = KVME_TYPE_DEAD;
1470 kve->kve_type = KVME_TYPE_UNKNOWN;
1474 VM_OBJECT_UNLOCK(lobj);
1476 kve->kve_ref_count = obj->ref_count;
1477 kve->kve_shadow_count = obj->shadow_count;
1478 VM_OBJECT_UNLOCK(obj);
1480 vn_fullpath(curthread, vp, &fullpath,
1482 cred = curthread->td_ucred;
1483 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1484 vn_lock(vp, LK_SHARED | LK_RETRY, curthread);
1485 if (VOP_GETATTR(vp, &va, cred, curthread) == 0) {
1486 kve->kve_fileid = va.va_fileid;
1487 kve->kve_fsid = va.va_fsid;
1490 VFS_UNLOCK_GIANT(vfslocked);
1493 kve->kve_type = KVME_TYPE_NONE;
1494 kve->kve_ref_count = 0;
1495 kve->kve_shadow_count = 0;
1498 strlcpy(kve->kve_path, fullpath, sizeof(kve->kve_path));
1499 if (freepath != NULL)
1500 free(freepath, M_TEMP);
1502 error = SYSCTL_OUT(req, kve, sizeof(*kve));
1503 vm_map_lock_read(map);
1506 if (last_timestamp != map->timestamp) {
1507 vm_map_lookup_entry(map, addr - 1, &tmp_entry);
1511 vm_map_unlock_read(map);
1518 #ifdef KINFO_VMENTRY_SIZE
1519 CTASSERT(sizeof(struct kinfo_vmentry) == KINFO_VMENTRY_SIZE);
1523 sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS)
1525 vm_map_entry_t entry, tmp_entry;
1526 unsigned int last_timestamp;
1527 char *fullpath, *freepath;
1528 struct kinfo_vmentry *kve;
1538 if ((p = pfind((pid_t)name[0])) == NULL)
1540 if (p->p_flag & P_WEXIT) {
1544 if ((error = p_candebug(curthread, p))) {
1550 vm = vmspace_acquire_ref(p);
1555 kve = malloc(sizeof(*kve), M_TEMP, M_WAITOK);
1557 map = &vm->vm_map; /* XXXRW: More locking required? */
1558 vm_map_lock_read(map);
1559 for (entry = map->header.next; entry != &map->header;
1560 entry = entry->next) {
1561 vm_object_t obj, tobj, lobj;
1565 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
1568 bzero(kve, sizeof(*kve));
1570 kve->kve_private_resident = 0;
1571 obj = entry->object.vm_object;
1573 VM_OBJECT_LOCK(obj);
1574 if (obj->shadow_count == 1)
1575 kve->kve_private_resident =
1576 obj->resident_page_count;
1578 kve->kve_resident = 0;
1579 addr = entry->start;
1580 while (addr < entry->end) {
1581 if (pmap_extract(map->pmap, addr))
1582 kve->kve_resident++;
1586 for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
1588 VM_OBJECT_LOCK(tobj);
1590 VM_OBJECT_UNLOCK(lobj);
1594 kve->kve_start = entry->start;
1595 kve->kve_end = entry->end;
1596 kve->kve_offset = entry->offset;
1598 if (entry->protection & VM_PROT_READ)
1599 kve->kve_protection |= KVME_PROT_READ;
1600 if (entry->protection & VM_PROT_WRITE)
1601 kve->kve_protection |= KVME_PROT_WRITE;
1602 if (entry->protection & VM_PROT_EXECUTE)
1603 kve->kve_protection |= KVME_PROT_EXEC;
1605 if (entry->eflags & MAP_ENTRY_COW)
1606 kve->kve_flags |= KVME_FLAG_COW;
1607 if (entry->eflags & MAP_ENTRY_NEEDS_COPY)
1608 kve->kve_flags |= KVME_FLAG_NEEDS_COPY;
1610 last_timestamp = map->timestamp;
1611 vm_map_unlock_read(map);
1613 kve->kve_fileid = 0;
1619 switch (lobj->type) {
1621 kve->kve_type = KVME_TYPE_DEFAULT;
1624 kve->kve_type = KVME_TYPE_VNODE;
1629 kve->kve_type = KVME_TYPE_SWAP;
1632 kve->kve_type = KVME_TYPE_DEVICE;
1635 kve->kve_type = KVME_TYPE_PHYS;
1638 kve->kve_type = KVME_TYPE_DEAD;
1641 kve->kve_type = KVME_TYPE_UNKNOWN;
1645 VM_OBJECT_UNLOCK(lobj);
1647 kve->kve_ref_count = obj->ref_count;
1648 kve->kve_shadow_count = obj->shadow_count;
1649 VM_OBJECT_UNLOCK(obj);
1651 vn_fullpath(curthread, vp, &fullpath,
1653 cred = curthread->td_ucred;
1654 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1655 vn_lock(vp, LK_SHARED | LK_RETRY, curthread);
1656 if (VOP_GETATTR(vp, &va, cred, curthread) == 0) {
1657 kve->kve_fileid = va.va_fileid;
1658 kve->kve_fsid = va.va_fsid;
1661 VFS_UNLOCK_GIANT(vfslocked);
1664 kve->kve_type = KVME_TYPE_NONE;
1665 kve->kve_ref_count = 0;
1666 kve->kve_shadow_count = 0;
1669 strlcpy(kve->kve_path, fullpath, sizeof(kve->kve_path));
1670 if (freepath != NULL)
1671 free(freepath, M_TEMP);
1673 /* Pack record size down */
1674 kve->kve_structsize = offsetof(struct kinfo_vmentry, kve_path) +
1675 strlen(kve->kve_path) + 1;
1676 kve->kve_structsize = roundup(kve->kve_structsize,
1678 error = SYSCTL_OUT(req, kve, kve->kve_structsize);
1679 vm_map_lock_read(map);
1682 if (last_timestamp != map->timestamp) {
1683 vm_map_lookup_entry(map, addr - 1, &tmp_entry);
1687 vm_map_unlock_read(map);
1694 #if defined(STACK) || defined(DDB)
1696 sysctl_kern_proc_kstack(SYSCTL_HANDLER_ARGS)
1698 struct kinfo_kstack *kkstp;
1699 int error, i, *name, numthreads;
1700 lwpid_t *lwpidarray;
1707 if ((p = pfind((pid_t)name[0])) == NULL)
1709 /* XXXRW: Not clear ESRCH is the right error during proc execve(). */
1710 if (p->p_flag & P_WEXIT || p->p_flag & P_INEXEC) {
1714 if ((error = p_candebug(curthread, p))) {
1721 kkstp = malloc(sizeof(*kkstp), M_TEMP, M_WAITOK);
1722 st = stack_create();
1728 if (numthreads < p->p_numthreads) {
1729 if (lwpidarray != NULL) {
1730 free(lwpidarray, M_TEMP);
1733 numthreads = p->p_numthreads;
1735 lwpidarray = malloc(sizeof(*lwpidarray) * numthreads, M_TEMP,
1744 * XXXRW: During the below loop, execve(2) and countless other sorts
1745 * of changes could have taken place. Should we check to see if the
1746 * vmspace has been replaced, or the like, in order to prevent
1747 * giving a snapshot that spans, say, execve(2), with some threads
1748 * before and some after? Among other things, the credentials could
1749 * have changed, in which case the right to extract debug info might
1750 * no longer be assured.
1753 FOREACH_THREAD_IN_PROC(p, td) {
1754 KASSERT(i < numthreads,
1755 ("sysctl_kern_proc_kstack: numthreads"));
1756 lwpidarray[i] = td->td_tid;
1760 for (i = 0; i < numthreads; i++) {
1761 td = thread_find(p, lwpidarray[i]);
1765 bzero(kkstp, sizeof(*kkstp));
1766 (void)sbuf_new(&sb, kkstp->kkst_trace,
1767 sizeof(kkstp->kkst_trace), SBUF_FIXEDLEN);
1769 kkstp->kkst_tid = td->td_tid;
1770 if (TD_IS_SWAPPED(td))
1771 kkstp->kkst_state = KKST_STATE_SWAPPED;
1772 else if (TD_IS_RUNNING(td))
1773 kkstp->kkst_state = KKST_STATE_RUNNING;
1775 kkstp->kkst_state = KKST_STATE_STACKOK;
1776 stack_save_td(st, td);
1780 stack_sbuf_print(&sb, st);
1783 error = SYSCTL_OUT(req, kkstp, sizeof(*kkstp));
1790 if (lwpidarray != NULL)
1791 free(lwpidarray, M_TEMP);
1793 free(kkstp, M_TEMP);
1798 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table");
1800 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT|
1801 CTLFLAG_MPSAFE, 0, 0, sysctl_kern_proc, "S,proc",
1802 "Return entire process table");
1804 static SYSCTL_NODE(_kern_proc, KERN_PROC_GID, gid, CTLFLAG_RD | CTLFLAG_MPSAFE,
1805 sysctl_kern_proc, "Process table");
1807 static SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD | CTLFLAG_MPSAFE,
1808 sysctl_kern_proc, "Process table");
1810 static SYSCTL_NODE(_kern_proc, KERN_PROC_RGID, rgid, CTLFLAG_RD | CTLFLAG_MPSAFE,
1811 sysctl_kern_proc, "Process table");
1813 static SYSCTL_NODE(_kern_proc, KERN_PROC_SESSION, sid, CTLFLAG_RD |
1814 CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table");
1816 static SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD | CTLFLAG_MPSAFE,
1817 sysctl_kern_proc, "Process table");
1819 static SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD | CTLFLAG_MPSAFE,
1820 sysctl_kern_proc, "Process table");
1822 static SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD | CTLFLAG_MPSAFE,
1823 sysctl_kern_proc, "Process table");
1825 static SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD | CTLFLAG_MPSAFE,
1826 sysctl_kern_proc, "Process table");
1828 static SYSCTL_NODE(_kern_proc, KERN_PROC_PROC, proc, CTLFLAG_RD | CTLFLAG_MPSAFE,
1829 sysctl_kern_proc, "Return process table, no threads");
1831 static SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args,
1832 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE,
1833 sysctl_kern_proc_args, "Process argument list");
1835 static SYSCTL_NODE(_kern_proc, KERN_PROC_PATHNAME, pathname, CTLFLAG_RD |
1836 CTLFLAG_MPSAFE, sysctl_kern_proc_pathname, "Process executable path");
1838 static SYSCTL_NODE(_kern_proc, KERN_PROC_SV_NAME, sv_name, CTLFLAG_RD |
1839 CTLFLAG_MPSAFE, sysctl_kern_proc_sv_name,
1840 "Process syscall vector name (ABI type)");
1842 static SYSCTL_NODE(_kern_proc, (KERN_PROC_GID | KERN_PROC_INC_THREAD), gid_td,
1843 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table");
1845 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_INC_THREAD), pgrp_td,
1846 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table");
1848 static SYSCTL_NODE(_kern_proc, (KERN_PROC_RGID | KERN_PROC_INC_THREAD), rgid_td,
1849 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table");
1851 static SYSCTL_NODE(_kern_proc, (KERN_PROC_SESSION | KERN_PROC_INC_THREAD),
1852 sid_td, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table");
1854 static SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_INC_THREAD), tty_td,
1855 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table");
1857 static SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_INC_THREAD), uid_td,
1858 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table");
1860 static SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_INC_THREAD), ruid_td,
1861 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table");
1863 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_INC_THREAD), pid_td,
1864 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table");
1866 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PROC | KERN_PROC_INC_THREAD), proc_td,
1867 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc,
1868 "Return process table, no threads");
1870 static SYSCTL_NODE(_kern_proc, KERN_PROC_OVMMAP, ovmmap, CTLFLAG_RD |
1871 CTLFLAG_MPSAFE, sysctl_kern_proc_ovmmap, "Old Process vm map entries");
1873 static SYSCTL_NODE(_kern_proc, KERN_PROC_VMMAP, vmmap, CTLFLAG_RD |
1874 CTLFLAG_MPSAFE, sysctl_kern_proc_vmmap, "Process vm map entries");
1876 #if defined(STACK) || defined(DDB)
1877 static SYSCTL_NODE(_kern_proc, KERN_PROC_KSTACK, kstack, CTLFLAG_RD |
1878 CTLFLAG_MPSAFE, sysctl_kern_proc_kstack, "Process kernel stacks");