2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
40 #include "opt_compat.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/sysproto.h>
46 #include <sys/kernel.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
52 #include <sys/refcount.h>
53 #include <sys/racct.h>
54 #include <sys/resourcevar.h>
55 #include <sys/rwlock.h>
56 #include <sys/sched.h>
58 #include <sys/syscallsubr.h>
59 #include <sys/sysctl.h>
60 #include <sys/sysent.h>
65 #include <vm/vm_param.h>
67 #include <vm/vm_map.h>
70 static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures");
71 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
72 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
73 static struct rwlock uihashtbl_lock;
74 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
75 static u_long uihash; /* size of hash table - 1 */
77 static void calcru1(struct proc *p, struct rusage_ext *ruxp,
78 struct timeval *up, struct timeval *sp);
79 static int donice(struct thread *td, struct proc *chgp, int n);
80 static struct uidinfo *uilookup(uid_t uid);
81 static void ruxagg_locked(struct rusage_ext *rux, struct thread *td);
83 static __inline int lim_shared(struct plimit *limp);
86 * Resource controls and accounting.
88 #ifndef _SYS_SYSPROTO_H_
89 struct getpriority_args {
95 sys_getpriority(struct thread *td, register struct getpriority_args *uap)
103 switch (uap->which) {
107 low = td->td_proc->p_nice;
112 if (p_cansee(td, p) == 0)
119 sx_slock(&proctree_lock);
121 pg = td->td_proc->p_pgrp;
124 pg = pgfind(uap->who);
126 sx_sunlock(&proctree_lock);
130 sx_sunlock(&proctree_lock);
131 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
133 if (p->p_state == PRS_NORMAL &&
134 p_cansee(td, p) == 0) {
145 uap->who = td->td_ucred->cr_uid;
146 sx_slock(&allproc_lock);
147 FOREACH_PROC_IN_SYSTEM(p) {
149 if (p->p_state == PRS_NORMAL &&
150 p_cansee(td, p) == 0 &&
151 p->p_ucred->cr_uid == uap->who) {
157 sx_sunlock(&allproc_lock);
164 if (low == PRIO_MAX + 1 && error == 0)
166 td->td_retval[0] = low;
170 #ifndef _SYS_SYSPROTO_H_
171 struct setpriority_args {
178 sys_setpriority(struct thread *td, struct setpriority_args *uap)
180 struct proc *curp, *p;
182 int found = 0, error = 0;
185 switch (uap->which) {
189 error = donice(td, curp, uap->prio);
195 error = p_cansee(td, p);
197 error = donice(td, p, uap->prio);
204 sx_slock(&proctree_lock);
209 pg = pgfind(uap->who);
211 sx_sunlock(&proctree_lock);
215 sx_sunlock(&proctree_lock);
216 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
218 if (p->p_state == PRS_NORMAL &&
219 p_cansee(td, p) == 0) {
220 error = donice(td, p, uap->prio);
230 uap->who = td->td_ucred->cr_uid;
231 sx_slock(&allproc_lock);
232 FOREACH_PROC_IN_SYSTEM(p) {
234 if (p->p_state == PRS_NORMAL &&
235 p->p_ucred->cr_uid == uap->who &&
236 p_cansee(td, p) == 0) {
237 error = donice(td, p, uap->prio);
242 sx_sunlock(&allproc_lock);
249 if (found == 0 && error == 0)
255 * Set "nice" for a (whole) process.
258 donice(struct thread *td, struct proc *p, int n)
262 PROC_LOCK_ASSERT(p, MA_OWNED);
263 if ((error = p_cansched(td, p)))
269 if (n < p->p_nice && priv_check(td, PRIV_SCHED_SETPRIORITY) != 0)
275 static int unprivileged_idprio;
276 SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_idprio, CTLFLAG_RW,
277 &unprivileged_idprio, 0, "Allow non-root users to set an idle priority");
280 * Set realtime priority for LWP.
282 #ifndef _SYS_SYSPROTO_H_
283 struct rtprio_thread_args {
290 sys_rtprio_thread(struct thread *td, struct rtprio_thread_args *uap)
297 /* Perform copyin before acquiring locks if needed. */
298 if (uap->function == RTP_SET)
299 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
303 if (uap->lwpid == 0 || uap->lwpid == td->td_tid) {
308 /* Only look up thread in current process */
309 td1 = tdfind(uap->lwpid, curproc->p_pid);
315 switch (uap->function) {
317 if ((error = p_cansee(td, p)))
319 pri_to_rtp(td1, &rtp);
321 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
323 if ((error = p_cansched(td, p)) || (error = cierror))
326 /* Disallow setting rtprio in most cases if not superuser. */
329 * Realtime priority has to be restricted for reasons which
330 * should be obvious. However, for idleprio processes, there is
331 * a potential for system deadlock if an idleprio process gains
332 * a lock on a resource that other processes need (and the
333 * idleprio process can't run due to a CPU-bound normal
334 * process). Fix me! XXX
336 * This problem is not only related to idleprio process.
337 * A user level program can obtain a file lock and hold it
338 * indefinitely. Additionally, without idleprio processes it is
339 * still conceivable that a program with low priority will never
340 * get to run. In short, allowing this feature might make it
341 * easier to lock a resource indefinitely, but it is not the
342 * only thing that makes it possible.
344 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME ||
345 (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE &&
346 unprivileged_idprio == 0)) {
347 error = priv_check(td, PRIV_SCHED_RTPRIO);
351 error = rtp_to_pri(&rtp, td1);
362 * Set realtime priority.
364 #ifndef _SYS_SYSPROTO_H_
372 sys_rtprio(struct thread *td, register struct rtprio_args *uap)
379 /* Perform copyin before acquiring locks if needed. */
380 if (uap->function == RTP_SET)
381 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
394 switch (uap->function) {
396 if ((error = p_cansee(td, p)))
399 * Return OUR priority if no pid specified,
400 * or if one is, report the highest priority
401 * in the process. There isn't much more you can do as
402 * there is only room to return a single priority.
403 * Note: specifying our own pid is not the same
404 * as leaving it zero.
407 pri_to_rtp(td, &rtp);
411 rtp.type = RTP_PRIO_IDLE;
412 rtp.prio = RTP_PRIO_MAX;
413 FOREACH_THREAD_IN_PROC(p, tdp) {
414 pri_to_rtp(tdp, &rtp2);
415 if (rtp2.type < rtp.type ||
416 (rtp2.type == rtp.type &&
417 rtp2.prio < rtp.prio)) {
418 rtp.type = rtp2.type;
419 rtp.prio = rtp2.prio;
424 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
426 if ((error = p_cansched(td, p)) || (error = cierror))
430 * Disallow setting rtprio in most cases if not superuser.
431 * See the comment in sys_rtprio_thread about idprio
432 * threads holding a lock.
434 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME ||
435 (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE &&
436 !unprivileged_idprio)) {
437 error = priv_check(td, PRIV_SCHED_RTPRIO);
443 * If we are setting our own priority, set just our
444 * thread but if we are doing another process,
445 * do all the threads on that process. If we
446 * specify our own pid we do the latter.
449 error = rtp_to_pri(&rtp, td);
451 FOREACH_THREAD_IN_PROC(p, td) {
452 if ((error = rtp_to_pri(&rtp, td)) != 0)
466 rtp_to_pri(struct rtprio *rtp, struct thread *td)
468 u_char newpri, oldclass, oldpri;
470 switch (RTP_PRIO_BASE(rtp->type)) {
471 case RTP_PRIO_REALTIME:
472 if (rtp->prio > RTP_PRIO_MAX)
474 newpri = PRI_MIN_REALTIME + rtp->prio;
476 case RTP_PRIO_NORMAL:
477 if (rtp->prio > (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE))
479 newpri = PRI_MIN_TIMESHARE + rtp->prio;
482 if (rtp->prio > RTP_PRIO_MAX)
484 newpri = PRI_MIN_IDLE + rtp->prio;
491 oldclass = td->td_pri_class;
492 sched_class(td, rtp->type); /* XXX fix */
493 oldpri = td->td_user_pri;
494 sched_user_prio(td, newpri);
495 if (td->td_user_pri != oldpri && (oldclass != RTP_PRIO_NORMAL ||
496 td->td_pri_class != RTP_PRIO_NORMAL))
497 sched_prio(td, td->td_user_pri);
498 if (TD_ON_UPILOCK(td) && oldpri != newpri) {
501 umtx_pi_adjust(td, oldpri);
509 pri_to_rtp(struct thread *td, struct rtprio *rtp)
513 switch (PRI_BASE(td->td_pri_class)) {
515 rtp->prio = td->td_base_user_pri - PRI_MIN_REALTIME;
518 rtp->prio = td->td_base_user_pri - PRI_MIN_TIMESHARE;
521 rtp->prio = td->td_base_user_pri - PRI_MIN_IDLE;
526 rtp->type = td->td_pri_class;
530 #if defined(COMPAT_43)
531 #ifndef _SYS_SYSPROTO_H_
532 struct osetrlimit_args {
538 osetrlimit(struct thread *td, register struct osetrlimit_args *uap)
544 if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit))))
546 lim.rlim_cur = olim.rlim_cur;
547 lim.rlim_max = olim.rlim_max;
548 error = kern_setrlimit(td, uap->which, &lim);
552 #ifndef _SYS_SYSPROTO_H_
553 struct ogetrlimit_args {
559 ogetrlimit(struct thread *td, register struct ogetrlimit_args *uap)
566 if (uap->which >= RLIM_NLIMITS)
570 lim_rlimit(p, uap->which, &rl);
574 * XXX would be more correct to convert only RLIM_INFINITY to the
575 * old RLIM_INFINITY and fail with EOVERFLOW for other larger
576 * values. Most 64->32 and 32->16 conversions, including not
577 * unimportant ones of uids are even more broken than what we
578 * do here (they blindly truncate). We don't do this correctly
579 * here since we have little experience with EOVERFLOW yet.
580 * Elsewhere, getuid() can't fail...
582 olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur;
583 olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max;
584 error = copyout(&olim, uap->rlp, sizeof(olim));
587 #endif /* COMPAT_43 */
589 #ifndef _SYS_SYSPROTO_H_
590 struct __setrlimit_args {
596 sys_setrlimit(struct thread *td, register struct __setrlimit_args *uap)
601 if ((error = copyin(uap->rlp, &alim, sizeof(struct rlimit))))
603 error = kern_setrlimit(td, uap->which, &alim);
615 PROC_LOCK_ASSERT(p, MA_OWNED);
617 * Check if the process exceeds its cpu resource allocation. If
618 * it reaches the max, arrange to kill the process in ast().
620 if (p->p_cpulimit == RLIM_INFINITY)
623 FOREACH_THREAD_IN_PROC(p, td) {
627 if (p->p_rux.rux_runtime > p->p_cpulimit * cpu_tickrate()) {
628 lim_rlimit(p, RLIMIT_CPU, &rlim);
629 if (p->p_rux.rux_runtime >= rlim.rlim_max * cpu_tickrate()) {
630 killproc(p, "exceeded maximum CPU limit");
632 if (p->p_cpulimit < rlim.rlim_max)
634 kern_psignal(p, SIGXCPU);
637 if ((p->p_flag & P_WEXIT) == 0)
638 callout_reset_sbt(&p->p_limco, SBT_1S, 0,
639 lim_cb, p, C_PREL(1));
643 kern_setrlimit(struct thread *td, u_int which, struct rlimit *limp)
646 return (kern_proc_setrlimit(td, td->td_proc, which, limp));
650 kern_proc_setrlimit(struct thread *td, struct proc *p, u_int which,
653 struct plimit *newlim, *oldlim;
654 register struct rlimit *alimp;
655 struct rlimit oldssiz;
658 if (which >= RLIM_NLIMITS)
662 * Preserve historical bugs by treating negative limits as unsigned.
664 if (limp->rlim_cur < 0)
665 limp->rlim_cur = RLIM_INFINITY;
666 if (limp->rlim_max < 0)
667 limp->rlim_max = RLIM_INFINITY;
669 oldssiz.rlim_cur = 0;
672 if (lim_shared(p->p_limit)) {
674 newlim = lim_alloc();
678 alimp = &oldlim->pl_rlimit[which];
679 if (limp->rlim_cur > alimp->rlim_max ||
680 limp->rlim_max > alimp->rlim_max)
681 if ((error = priv_check(td, PRIV_PROC_SETRLIMIT))) {
687 if (limp->rlim_cur > limp->rlim_max)
688 limp->rlim_cur = limp->rlim_max;
689 if (newlim != NULL) {
690 lim_copy(newlim, oldlim);
691 alimp = &newlim->pl_rlimit[which];
697 if (limp->rlim_cur != RLIM_INFINITY &&
698 p->p_cpulimit == RLIM_INFINITY)
699 callout_reset_sbt(&p->p_limco, SBT_1S, 0,
700 lim_cb, p, C_PREL(1));
701 p->p_cpulimit = limp->rlim_cur;
704 if (limp->rlim_cur > maxdsiz)
705 limp->rlim_cur = maxdsiz;
706 if (limp->rlim_max > maxdsiz)
707 limp->rlim_max = maxdsiz;
711 if (limp->rlim_cur > maxssiz)
712 limp->rlim_cur = maxssiz;
713 if (limp->rlim_max > maxssiz)
714 limp->rlim_max = maxssiz;
716 if (p->p_sysent->sv_fixlimit != NULL)
717 p->p_sysent->sv_fixlimit(&oldssiz,
722 if (limp->rlim_cur > maxfilesperproc)
723 limp->rlim_cur = maxfilesperproc;
724 if (limp->rlim_max > maxfilesperproc)
725 limp->rlim_max = maxfilesperproc;
729 if (limp->rlim_cur > maxprocperuid)
730 limp->rlim_cur = maxprocperuid;
731 if (limp->rlim_max > maxprocperuid)
732 limp->rlim_max = maxprocperuid;
733 if (limp->rlim_cur < 1)
735 if (limp->rlim_max < 1)
739 if (p->p_sysent->sv_fixlimit != NULL)
740 p->p_sysent->sv_fixlimit(limp, which);
748 if (which == RLIMIT_STACK &&
750 * Skip calls from exec_new_vmspace(), done when stack is
753 (td != curthread || (p->p_flag & P_INEXEC) == 0)) {
755 * Stack is allocated to the max at exec time with only
756 * "rlim_cur" bytes accessible. If stack limit is going
757 * up make more accessible, if going down make inaccessible.
759 if (limp->rlim_cur != oldssiz.rlim_cur) {
764 if (limp->rlim_cur > oldssiz.rlim_cur) {
765 prot = p->p_sysent->sv_stackprot;
766 size = limp->rlim_cur - oldssiz.rlim_cur;
767 addr = p->p_sysent->sv_usrstack -
771 size = oldssiz.rlim_cur - limp->rlim_cur;
772 addr = p->p_sysent->sv_usrstack -
775 addr = trunc_page(addr);
776 size = round_page(size);
777 (void)vm_map_protect(&p->p_vmspace->vm_map,
778 addr, addr + size, prot, FALSE);
785 #ifndef _SYS_SYSPROTO_H_
786 struct __getrlimit_args {
793 sys_getrlimit(struct thread *td, register struct __getrlimit_args *uap)
799 if (uap->which >= RLIM_NLIMITS)
803 lim_rlimit(p, uap->which, &rlim);
805 error = copyout(&rlim, uap->rlp, sizeof(struct rlimit));
810 * Transform the running time and tick information for children of proc p
811 * into user and system time usage.
814 calccru(struct proc *p, struct timeval *up, struct timeval *sp)
817 PROC_LOCK_ASSERT(p, MA_OWNED);
818 calcru1(p, &p->p_crux, up, sp);
822 * Transform the running time and tick information in proc p into user
823 * and system time usage. If appropriate, include the current time slice
827 calcru(struct proc *p, struct timeval *up, struct timeval *sp)
832 PROC_LOCK_ASSERT(p, MA_OWNED);
833 PROC_STATLOCK_ASSERT(p, MA_OWNED);
835 * If we are getting stats for the current process, then add in the
836 * stats that this thread has accumulated in its current time slice.
837 * We reset the thread and CPU state as if we had performed a context
841 if (td->td_proc == p) {
843 runtime = u - PCPU_GET(switchtime);
844 td->td_runtime += runtime;
845 td->td_incruntime += runtime;
846 PCPU_SET(switchtime, u);
848 /* Make sure the per-thread stats are current. */
849 FOREACH_THREAD_IN_PROC(p, td) {
850 if (td->td_incruntime == 0)
854 calcru1(p, &p->p_rux, up, sp);
857 /* Collect resource usage for a single thread. */
859 rufetchtd(struct thread *td, struct rusage *ru)
865 PROC_STATLOCK_ASSERT(p, MA_OWNED);
866 THREAD_LOCK_ASSERT(td, MA_OWNED);
868 * If we are getting stats for the current thread, then add in the
869 * stats that this thread has accumulated in its current time slice.
870 * We reset the thread and CPU state as if we had performed a context
873 if (td == curthread) {
875 runtime = u - PCPU_GET(switchtime);
876 td->td_runtime += runtime;
877 td->td_incruntime += runtime;
878 PCPU_SET(switchtime, u);
882 calcru1(p, &td->td_rux, &ru->ru_utime, &ru->ru_stime);
886 calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up,
889 /* {user, system, interrupt, total} {ticks, usec}: */
890 uint64_t ut, uu, st, su, it, tt, tu;
892 ut = ruxp->rux_uticks;
893 st = ruxp->rux_sticks;
894 it = ruxp->rux_iticks;
897 /* Avoid divide by zero */
901 tu = cputick2usec(ruxp->rux_runtime);
902 if ((int64_t)tu < 0) {
903 /* XXX: this should be an assert /phk */
904 printf("calcru: negative runtime of %jd usec for pid %d (%s)\n",
905 (intmax_t)tu, p->p_pid, p->p_comm);
909 if (tu >= ruxp->rux_tu) {
911 * The normal case, time increased.
912 * Enforce monotonicity of bucketed numbers.
915 if (uu < ruxp->rux_uu)
918 if (su < ruxp->rux_su)
920 } else if (tu + 3 > ruxp->rux_tu || 101 * tu > 100 * ruxp->rux_tu) {
922 * When we calibrate the cputicker, it is not uncommon to
923 * see the presumably fixed frequency increase slightly over
924 * time as a result of thermal stabilization and NTP
925 * discipline (of the reference clock). We therefore ignore
926 * a bit of backwards slop because we expect to catch up
927 * shortly. We use a 3 microsecond limit to catch low
928 * counts and a 1% limit for high counts.
933 } else { /* tu < ruxp->rux_tu */
935 * What happened here was likely that a laptop, which ran at
936 * a reduced clock frequency at boot, kicked into high gear.
937 * The wisdom of spamming this message in that case is
938 * dubious, but it might also be indicative of something
939 * serious, so lets keep it and hope laptops can be made
940 * more truthful about their CPU speed via ACPI.
942 printf("calcru: runtime went backwards from %ju usec "
943 "to %ju usec for pid %d (%s)\n",
944 (uintmax_t)ruxp->rux_tu, (uintmax_t)tu,
945 p->p_pid, p->p_comm);
954 up->tv_sec = uu / 1000000;
955 up->tv_usec = uu % 1000000;
956 sp->tv_sec = su / 1000000;
957 sp->tv_usec = su % 1000000;
960 #ifndef _SYS_SYSPROTO_H_
961 struct getrusage_args {
963 struct rusage *rusage;
967 sys_getrusage(register struct thread *td, register struct getrusage_args *uap)
972 error = kern_getrusage(td, uap->who, &ru);
974 error = copyout(&ru, uap->rusage, sizeof(struct rusage));
979 kern_getrusage(struct thread *td, int who, struct rusage *rup)
989 rufetchcalc(p, rup, &rup->ru_utime,
993 case RUSAGE_CHILDREN:
994 *rup = p->p_stats->p_cru;
995 calccru(p, &rup->ru_utime, &rup->ru_stime);
1014 rucollect(struct rusage *ru, struct rusage *ru2)
1019 if (ru->ru_maxrss < ru2->ru_maxrss)
1020 ru->ru_maxrss = ru2->ru_maxrss;
1022 ip2 = &ru2->ru_first;
1023 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
1028 ruadd(struct rusage *ru, struct rusage_ext *rux, struct rusage *ru2,
1029 struct rusage_ext *rux2)
1032 rux->rux_runtime += rux2->rux_runtime;
1033 rux->rux_uticks += rux2->rux_uticks;
1034 rux->rux_sticks += rux2->rux_sticks;
1035 rux->rux_iticks += rux2->rux_iticks;
1036 rux->rux_uu += rux2->rux_uu;
1037 rux->rux_su += rux2->rux_su;
1038 rux->rux_tu += rux2->rux_tu;
1043 * Aggregate tick counts into the proc's rusage_ext.
1046 ruxagg_locked(struct rusage_ext *rux, struct thread *td)
1049 THREAD_LOCK_ASSERT(td, MA_OWNED);
1050 PROC_STATLOCK_ASSERT(td->td_proc, MA_OWNED);
1051 rux->rux_runtime += td->td_incruntime;
1052 rux->rux_uticks += td->td_uticks;
1053 rux->rux_sticks += td->td_sticks;
1054 rux->rux_iticks += td->td_iticks;
1058 ruxagg(struct proc *p, struct thread *td)
1062 ruxagg_locked(&p->p_rux, td);
1063 ruxagg_locked(&td->td_rux, td);
1064 td->td_incruntime = 0;
1072 * Update the rusage_ext structure and fetch a valid aggregate rusage
1073 * for proc p if storage for one is supplied.
1076 rufetch(struct proc *p, struct rusage *ru)
1080 PROC_STATLOCK_ASSERT(p, MA_OWNED);
1083 if (p->p_numthreads > 0) {
1084 FOREACH_THREAD_IN_PROC(p, td) {
1086 rucollect(ru, &td->td_ru);
1092 * Atomically perform a rufetch and a calcru together.
1093 * Consumers, can safely assume the calcru is executed only once
1094 * rufetch is completed.
1097 rufetchcalc(struct proc *p, struct rusage *ru, struct timeval *up,
1108 * Allocate a new resource limits structure and initialize its
1109 * reference count and mutex pointer.
1114 struct plimit *limp;
1116 limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK);
1117 refcount_init(&limp->pl_refcnt, 1);
1122 lim_hold(struct plimit *limp)
1125 refcount_acquire(&limp->pl_refcnt);
1130 lim_shared(struct plimit *limp)
1133 return (limp->pl_refcnt > 1);
1137 lim_fork(struct proc *p1, struct proc *p2)
1140 PROC_LOCK_ASSERT(p1, MA_OWNED);
1141 PROC_LOCK_ASSERT(p2, MA_OWNED);
1143 p2->p_limit = lim_hold(p1->p_limit);
1144 callout_init_mtx(&p2->p_limco, &p2->p_mtx, 0);
1145 if (p1->p_cpulimit != RLIM_INFINITY)
1146 callout_reset_sbt(&p2->p_limco, SBT_1S, 0,
1147 lim_cb, p2, C_PREL(1));
1151 lim_free(struct plimit *limp)
1154 if (refcount_release(&limp->pl_refcnt))
1155 free((void *)limp, M_PLIMIT);
1159 * Make a copy of the plimit structure.
1160 * We share these structures copy-on-write after fork.
1163 lim_copy(struct plimit *dst, struct plimit *src)
1166 KASSERT(!lim_shared(dst), ("lim_copy to shared limit"));
1167 bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit));
1171 * Return the hard limit for a particular system resource. The
1172 * which parameter specifies the index into the rlimit array.
1175 lim_max(struct proc *p, int which)
1179 lim_rlimit(p, which, &rl);
1180 return (rl.rlim_max);
1184 * Return the current (soft) limit for a particular system resource.
1185 * The which parameter which specifies the index into the rlimit array
1188 lim_cur(struct proc *p, int which)
1192 lim_rlimit(p, which, &rl);
1193 return (rl.rlim_cur);
1197 * Return a copy of the entire rlimit structure for the system limit
1198 * specified by 'which' in the rlimit structure pointed to by 'rlp'.
1201 lim_rlimit(struct proc *p, int which, struct rlimit *rlp)
1204 PROC_LOCK_ASSERT(p, MA_OWNED);
1205 KASSERT(which >= 0 && which < RLIM_NLIMITS,
1206 ("request for invalid resource limit"));
1207 *rlp = p->p_limit->pl_rlimit[which];
1208 if (p->p_sysent->sv_fixlimit != NULL)
1209 p->p_sysent->sv_fixlimit(rlp, which);
1216 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
1217 rw_init(&uihashtbl_lock, "uidinfo hash");
1221 * Look up a uidinfo struct for the parameter uid.
1222 * uihashtbl_lock must be locked.
1223 * Increase refcount on uidinfo struct returned.
1225 static struct uidinfo *
1228 struct uihashhead *uipp;
1229 struct uidinfo *uip;
1231 rw_assert(&uihashtbl_lock, RA_LOCKED);
1233 LIST_FOREACH(uip, uipp, ui_hash)
1234 if (uip->ui_uid == uid) {
1243 * Find or allocate a struct uidinfo for a particular uid.
1244 * Returns with uidinfo struct referenced.
1245 * uifree() should be called on a struct uidinfo when released.
1250 struct uidinfo *new_uip, *uip;
1252 rw_rlock(&uihashtbl_lock);
1253 uip = uilookup(uid);
1254 rw_runlock(&uihashtbl_lock);
1258 new_uip = malloc(sizeof(*new_uip), M_UIDINFO, M_WAITOK | M_ZERO);
1259 racct_create(&new_uip->ui_racct);
1260 refcount_init(&new_uip->ui_ref, 1);
1261 new_uip->ui_uid = uid;
1262 mtx_init(&new_uip->ui_vmsize_mtx, "ui_vmsize", NULL, MTX_DEF);
1264 rw_wlock(&uihashtbl_lock);
1266 * There's a chance someone created our uidinfo while we
1267 * were in malloc and not holding the lock, so we have to
1268 * make sure we don't insert a duplicate uidinfo.
1270 if ((uip = uilookup(uid)) == NULL) {
1271 LIST_INSERT_HEAD(UIHASH(uid), new_uip, ui_hash);
1272 rw_wunlock(&uihashtbl_lock);
1275 rw_wunlock(&uihashtbl_lock);
1276 racct_destroy(&new_uip->ui_racct);
1277 mtx_destroy(&new_uip->ui_vmsize_mtx);
1278 free(new_uip, M_UIDINFO);
1284 * Place another refcount on a uidinfo struct.
1287 uihold(struct uidinfo *uip)
1290 refcount_acquire(&uip->ui_ref);
1294 * Since uidinfo structs have a long lifetime, we use an
1295 * opportunistic refcounting scheme to avoid locking the lookup hash
1298 * If the refcount hits 0, we need to free the structure,
1299 * which means we need to lock the hash.
1301 * After locking the struct and lowering the refcount, if we find
1302 * that we don't need to free, simply unlock and return.
1304 * If refcount lowering results in need to free, bump the count
1305 * back up, lose the lock and acquire the locks in the proper
1306 * order to try again.
1309 uifree(struct uidinfo *uip)
1313 /* Prepare for optimal case. */
1315 if (old > 1 && atomic_cmpset_int(&uip->ui_ref, old, old - 1))
1318 /* Prepare for suboptimal case. */
1319 rw_wlock(&uihashtbl_lock);
1320 if (refcount_release(&uip->ui_ref) == 0) {
1321 rw_wunlock(&uihashtbl_lock);
1325 racct_destroy(&uip->ui_racct);
1326 LIST_REMOVE(uip, ui_hash);
1327 rw_wunlock(&uihashtbl_lock);
1329 if (uip->ui_sbsize != 0)
1330 printf("freeing uidinfo: uid = %d, sbsize = %ld\n",
1331 uip->ui_uid, uip->ui_sbsize);
1332 if (uip->ui_proccnt != 0)
1333 printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
1334 uip->ui_uid, uip->ui_proccnt);
1335 if (uip->ui_vmsize != 0)
1336 printf("freeing uidinfo: uid = %d, swapuse = %lld\n",
1337 uip->ui_uid, (unsigned long long)uip->ui_vmsize);
1338 mtx_destroy(&uip->ui_vmsize_mtx);
1339 free(uip, M_UIDINFO);
1344 ui_racct_foreach(void (*callback)(struct racct *racct,
1345 void *arg2, void *arg3), void *arg2, void *arg3)
1347 struct uidinfo *uip;
1348 struct uihashhead *uih;
1350 rw_rlock(&uihashtbl_lock);
1351 for (uih = &uihashtbl[uihash]; uih >= uihashtbl; uih--) {
1352 LIST_FOREACH(uip, uih, ui_hash) {
1353 (callback)(uip->ui_racct, arg2, arg3);
1356 rw_runlock(&uihashtbl_lock);
1361 * Change the count associated with number of processes
1362 * a given user is using. When 'max' is 0, don't enforce a limit
1365 chgproccnt(struct uidinfo *uip, int diff, rlim_t max)
1368 /* Don't allow them to exceed max, but allow subtraction. */
1369 if (diff > 0 && max != 0) {
1370 if (atomic_fetchadd_long(&uip->ui_proccnt, (long)diff) + diff > max) {
1371 atomic_subtract_long(&uip->ui_proccnt, (long)diff);
1375 atomic_add_long(&uip->ui_proccnt, (long)diff);
1376 if (uip->ui_proccnt < 0)
1377 printf("negative proccnt for uid = %d\n", uip->ui_uid);
1383 * Change the total socket buffer size a user has used.
1386 chgsbsize(struct uidinfo *uip, u_int *hiwat, u_int to, rlim_t max)
1392 if (atomic_fetchadd_long(&uip->ui_sbsize, (long)diff) + diff > max) {
1393 atomic_subtract_long(&uip->ui_sbsize, (long)diff);
1397 atomic_add_long(&uip->ui_sbsize, (long)diff);
1398 if (uip->ui_sbsize < 0)
1399 printf("negative sbsize for uid = %d\n", uip->ui_uid);
1406 * Change the count associated with number of pseudo-terminals
1407 * a given user is using. When 'max' is 0, don't enforce a limit
1410 chgptscnt(struct uidinfo *uip, int diff, rlim_t max)
1413 /* Don't allow them to exceed max, but allow subtraction. */
1414 if (diff > 0 && max != 0) {
1415 if (atomic_fetchadd_long(&uip->ui_ptscnt, (long)diff) + diff > max) {
1416 atomic_subtract_long(&uip->ui_ptscnt, (long)diff);
1420 atomic_add_long(&uip->ui_ptscnt, (long)diff);
1421 if (uip->ui_ptscnt < 0)
1422 printf("negative ptscnt for uid = %d\n", uip->ui_uid);
1428 chgkqcnt(struct uidinfo *uip, int diff, rlim_t max)
1431 if (diff > 0 && max != 0) {
1432 if (atomic_fetchadd_long(&uip->ui_kqcnt, (long)diff) +
1434 atomic_subtract_long(&uip->ui_kqcnt, (long)diff);
1438 atomic_add_long(&uip->ui_kqcnt, (long)diff);
1439 if (uip->ui_kqcnt < 0)
1440 printf("negative kqcnt for uid = %d\n", uip->ui_uid);