2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
40 #include "opt_compat.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/sysproto.h>
46 #include <sys/kernel.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
51 #include <sys/refcount.h>
52 #include <sys/resourcevar.h>
53 #include <sys/sched.h>
55 #include <sys/syscallsubr.h>
56 #include <sys/sysent.h>
60 #include <vm/vm_param.h>
62 #include <vm/vm_map.h>
65 static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures");
66 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
67 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
68 static struct mtx uihashtbl_mtx;
69 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
70 static u_long uihash; /* size of hash table - 1 */
72 static void calcru1(struct proc *p, struct rusage_ext *ruxp,
73 struct timeval *up, struct timeval *sp);
74 static int donice(struct thread *td, struct proc *chgp, int n);
75 static struct uidinfo *uilookup(uid_t uid);
78 * Resource controls and accounting.
81 #ifndef _SYS_SYSPROTO_H_
82 struct getpriority_args {
93 register struct getpriority_args *uap;
101 switch (uap->which) {
105 low = td->td_proc->p_nice;
110 if (p_cansee(td, p) == 0)
117 sx_slock(&proctree_lock);
119 pg = td->td_proc->p_pgrp;
122 pg = pgfind(uap->who);
124 sx_sunlock(&proctree_lock);
128 sx_sunlock(&proctree_lock);
129 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
131 if (!p_cansee(td, p)) {
142 uap->who = td->td_ucred->cr_uid;
143 sx_slock(&allproc_lock);
144 LIST_FOREACH(p, &allproc, p_list) {
146 if (!p_cansee(td, p) &&
147 p->p_ucred->cr_uid == uap->who) {
153 sx_sunlock(&allproc_lock);
160 if (low == PRIO_MAX + 1 && error == 0)
162 td->td_retval[0] = low;
166 #ifndef _SYS_SYSPROTO_H_
167 struct setpriority_args {
179 struct setpriority_args *uap;
181 struct proc *curp, *p;
183 int found = 0, error = 0;
186 switch (uap->which) {
190 error = donice(td, curp, uap->prio);
196 if (p_cansee(td, p) == 0)
197 error = donice(td, p, uap->prio);
204 sx_slock(&proctree_lock);
209 pg = pgfind(uap->who);
211 sx_sunlock(&proctree_lock);
215 sx_sunlock(&proctree_lock);
216 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
218 if (!p_cansee(td, p)) {
219 error = donice(td, p, uap->prio);
229 uap->who = td->td_ucred->cr_uid;
230 sx_slock(&allproc_lock);
231 FOREACH_PROC_IN_SYSTEM(p) {
233 if (p->p_ucred->cr_uid == uap->who &&
235 error = donice(td, p, uap->prio);
240 sx_sunlock(&allproc_lock);
247 if (found == 0 && error == 0)
253 * Set "nice" for a (whole) process.
256 donice(struct thread *td, struct proc *p, int n)
260 PROC_LOCK_ASSERT(p, MA_OWNED);
261 if ((error = p_cansched(td, p)))
267 if (n < p->p_nice && suser(td) != 0)
269 mtx_lock_spin(&sched_lock);
271 mtx_unlock_spin(&sched_lock);
276 * Set realtime priority for LWP.
280 #ifndef _SYS_SYSPROTO_H_
281 struct rtprio_thread_args {
289 rtprio_thread(struct thread *td, struct rtprio_thread_args *uap)
297 /* Perform copyin before acquiring locks if needed. */
298 if (uap->function == RTP_SET)
299 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
305 * Though lwpid is unique, only current process is supported
306 * since there is no efficient way to look up a LWP yet.
311 switch (uap->function) {
313 if ((error = p_cansee(td, p)))
315 mtx_lock_spin(&sched_lock);
316 if (uap->lwpid == 0 || uap->lwpid == td->td_tid)
319 td1 = thread_find(p, uap->lwpid);
322 pri_to_rtp(td1->td_ksegrp, &rtp);
324 pri_to_rtp(td1, &rtp);
328 mtx_unlock_spin(&sched_lock);
330 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
332 if ((error = p_cansched(td, p)) || (error = cierror))
335 /* Disallow setting rtprio in most cases if not superuser. */
336 if (suser(td) != 0) {
337 /* can't set realtime priority */
339 * Realtime priority has to be restricted for reasons which should be
340 * obvious. However, for idle priority, there is a potential for
341 * system deadlock if an idleprio process gains a lock on a resource
342 * that other processes need (and the idleprio process can't run
343 * due to a CPU-bound normal process). Fix me! XXX
346 if (RTP_PRIO_IS_REALTIME(rtp.type)) {
348 if (rtp.type != RTP_PRIO_NORMAL) {
355 mtx_lock_spin(&sched_lock);
356 if (uap->lwpid == 0 || uap->lwpid == td->td_tid)
359 td1 = thread_find(p, uap->lwpid);
362 error = rtp_to_pri(&rtp, td1->td_ksegrp);
364 error = rtp_to_pri(&rtp, td1);
368 mtx_unlock_spin(&sched_lock);
379 * Set realtime priority.
383 #ifndef _SYS_SYSPROTO_H_
393 struct thread *td; /* curthread */
394 register struct rtprio_args *uap;
406 /* Perform copyin before acquiring locks if needed. */
407 if (uap->function == RTP_SET)
408 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
422 switch (uap->function) {
424 if ((error = p_cansee(td, p)))
426 mtx_lock_spin(&sched_lock);
428 * Return OUR priority if no pid specified,
429 * or if one is, report the highest priority
430 * in the process. There isn't much more you can do as
431 * there is only room to return a single priority.
432 * XXXKSE: maybe need a new interface to report
433 * priorities of multiple system scope threads.
434 * Note: specifying our own pid is not the same
435 * as leaving it zero.
439 pri_to_rtp(td->td_ksegrp, &rtp);
441 pri_to_rtp(td, &rtp);
446 rtp.type = RTP_PRIO_IDLE;
447 rtp.prio = RTP_PRIO_MAX;
449 FOREACH_KSEGRP_IN_PROC(p, kg) {
450 pri_to_rtp(kg, &rtp2);
452 FOREACH_THREAD_IN_PROC(p, tdp) {
453 pri_to_rtp(tdp, &rtp2);
455 if (rtp2.type < rtp.type ||
456 (rtp2.type == rtp.type &&
457 rtp2.prio < rtp.prio)) {
458 rtp.type = rtp2.type;
459 rtp.prio = rtp2.prio;
463 mtx_unlock_spin(&sched_lock);
465 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
467 if ((error = p_cansched(td, p)) || (error = cierror))
470 /* Disallow setting rtprio in most cases if not superuser. */
471 if (suser(td) != 0) {
472 /* can't set someone else's */
477 /* can't set realtime priority */
479 * Realtime priority has to be restricted for reasons which should be
480 * obvious. However, for idle priority, there is a potential for
481 * system deadlock if an idleprio process gains a lock on a resource
482 * that other processes need (and the idleprio process can't run
483 * due to a CPU-bound normal process). Fix me! XXX
486 if (RTP_PRIO_IS_REALTIME(rtp.type)) {
488 if (rtp.type != RTP_PRIO_NORMAL) {
497 * If we are setting our own priority, set just our
498 * KSEGRP but if we are doing another process,
499 * do all the groups on that process. If we
500 * specify our own pid we do the latter.
504 * If we are setting our own priority, set just our
505 * thread but if we are doing another process,
506 * do all the threads on that process. If we
507 * specify our own pid we do the latter.
510 mtx_lock_spin(&sched_lock);
513 error = rtp_to_pri(&rtp, td->td_ksegrp);
515 error = rtp_to_pri(&rtp, td);
519 FOREACH_KSEGRP_IN_PROC(p, kg) {
520 if ((error = rtp_to_pri(&rtp, kg)) != 0) {
524 FOREACH_THREAD_IN_PROC(p, td) {
525 if ((error = rtp_to_pri(&rtp, td)) != 0)
530 mtx_unlock_spin(&sched_lock);
542 rtp_to_pri(struct rtprio *rtp, struct ksegrp *kg)
544 rtp_to_pri(struct rtprio *rtp, struct thread *td)
548 mtx_assert(&sched_lock, MA_OWNED);
549 if (rtp->prio > RTP_PRIO_MAX)
551 switch (RTP_PRIO_BASE(rtp->type)) {
552 case RTP_PRIO_REALTIME:
554 kg->kg_user_pri = PRI_MIN_REALTIME + rtp->prio;
556 td->td_user_pri = PRI_MIN_REALTIME + rtp->prio;
559 case RTP_PRIO_NORMAL:
561 kg->kg_user_pri = PRI_MIN_TIMESHARE + rtp->prio;
563 td->td_user_pri = PRI_MIN_TIMESHARE + rtp->prio;
568 kg->kg_user_pri = PRI_MIN_IDLE + rtp->prio;
570 td->td_user_pri = PRI_MIN_IDLE + rtp->prio;
577 sched_class(kg, rtp->type);
578 if (curthread->td_ksegrp == kg) {
579 sched_prio(curthread, kg->kg_user_pri); /* XXX dubious */
582 sched_class(td, rtp->type); /* XXX fix */
584 sched_prio(curthread, td->td_user_pri); /* XXX dubious */
591 pri_to_rtp(struct ksegrp *kg, struct rtprio *rtp)
593 pri_to_rtp(struct thread *td, struct rtprio *rtp)
597 mtx_assert(&sched_lock, MA_OWNED);
599 switch (PRI_BASE(kg->kg_pri_class)) {
601 switch (PRI_BASE(td->td_pri_class)) {
605 rtp->prio = kg->kg_user_pri - PRI_MIN_REALTIME;
607 rtp->prio = td->td_user_pri - PRI_MIN_REALTIME;
612 rtp->prio = kg->kg_user_pri - PRI_MIN_TIMESHARE;
614 rtp->prio = td->td_user_pri - PRI_MIN_TIMESHARE;
619 rtp->prio = kg->kg_user_pri - PRI_MIN_IDLE;
621 rtp->prio = td->td_user_pri - PRI_MIN_IDLE;
628 rtp->type = kg->kg_pri_class;
630 rtp->type = td->td_pri_class;
634 #if defined(COMPAT_43)
635 #ifndef _SYS_SYSPROTO_H_
636 struct osetrlimit_args {
647 register struct osetrlimit_args *uap;
653 if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit))))
655 lim.rlim_cur = olim.rlim_cur;
656 lim.rlim_max = olim.rlim_max;
657 error = kern_setrlimit(td, uap->which, &lim);
661 #ifndef _SYS_SYSPROTO_H_
662 struct ogetrlimit_args {
673 register struct ogetrlimit_args *uap;
680 if (uap->which >= RLIM_NLIMITS)
684 lim_rlimit(p, uap->which, &rl);
688 * XXX would be more correct to convert only RLIM_INFINITY to the
689 * old RLIM_INFINITY and fail with EOVERFLOW for other larger
690 * values. Most 64->32 and 32->16 conversions, including not
691 * unimportant ones of uids are even more broken than what we
692 * do here (they blindly truncate). We don't do this correctly
693 * here since we have little experience with EOVERFLOW yet.
694 * Elsewhere, getuid() can't fail...
696 olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur;
697 olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max;
698 error = copyout(&olim, uap->rlp, sizeof(olim));
701 #endif /* COMPAT_43 */
703 #ifndef _SYS_SYSPROTO_H_
704 struct __setrlimit_args {
715 register struct __setrlimit_args *uap;
720 if ((error = copyin(uap->rlp, &alim, sizeof(struct rlimit))))
722 error = kern_setrlimit(td, uap->which, &alim);
727 kern_setrlimit(td, which, limp)
732 struct plimit *newlim, *oldlim;
734 register struct rlimit *alimp;
738 if (which >= RLIM_NLIMITS)
742 * Preserve historical bugs by treating negative limits as unsigned.
744 if (limp->rlim_cur < 0)
745 limp->rlim_cur = RLIM_INFINITY;
746 if (limp->rlim_max < 0)
747 limp->rlim_max = RLIM_INFINITY;
751 newlim = lim_alloc();
754 alimp = &oldlim->pl_rlimit[which];
755 if (limp->rlim_cur > alimp->rlim_max ||
756 limp->rlim_max > alimp->rlim_max)
757 if ((error = suser_cred(td->td_ucred, SUSER_ALLOWJAIL))) {
762 if (limp->rlim_cur > limp->rlim_max)
763 limp->rlim_cur = limp->rlim_max;
764 lim_copy(newlim, oldlim);
765 alimp = &newlim->pl_rlimit[which];
770 mtx_lock_spin(&sched_lock);
771 p->p_cpulimit = limp->rlim_cur;
772 mtx_unlock_spin(&sched_lock);
775 if (limp->rlim_cur > maxdsiz)
776 limp->rlim_cur = maxdsiz;
777 if (limp->rlim_max > maxdsiz)
778 limp->rlim_max = maxdsiz;
782 if (limp->rlim_cur > maxssiz)
783 limp->rlim_cur = maxssiz;
784 if (limp->rlim_max > maxssiz)
785 limp->rlim_max = maxssiz;
786 oldssiz = alimp->rlim_cur;
790 if (limp->rlim_cur > maxfilesperproc)
791 limp->rlim_cur = maxfilesperproc;
792 if (limp->rlim_max > maxfilesperproc)
793 limp->rlim_max = maxfilesperproc;
797 if (limp->rlim_cur > maxprocperuid)
798 limp->rlim_cur = maxprocperuid;
799 if (limp->rlim_max > maxprocperuid)
800 limp->rlim_max = maxprocperuid;
801 if (limp->rlim_cur < 1)
803 if (limp->rlim_max < 1)
812 if (which == RLIMIT_STACK) {
814 * Stack is allocated to the max at exec time with only
815 * "rlim_cur" bytes accessible. If stack limit is going
816 * up make more accessible, if going down make inaccessible.
818 if (limp->rlim_cur != oldssiz) {
823 if (limp->rlim_cur > oldssiz) {
824 prot = p->p_sysent->sv_stackprot;
825 size = limp->rlim_cur - oldssiz;
826 addr = p->p_sysent->sv_usrstack -
830 size = oldssiz - limp->rlim_cur;
831 addr = p->p_sysent->sv_usrstack - oldssiz;
833 addr = trunc_page(addr);
834 size = round_page(size);
835 (void)vm_map_protect(&p->p_vmspace->vm_map,
836 addr, addr + size, prot, FALSE);
841 * The data size limit may need to be changed to a value
842 * that makes sense for the 32 bit binary.
844 if (p->p_sysent->sv_fixlimits != NULL)
845 p->p_sysent->sv_fixlimits(p);
849 #ifndef _SYS_SYSPROTO_H_
850 struct __getrlimit_args {
862 register struct __getrlimit_args *uap;
868 if (uap->which >= RLIM_NLIMITS)
872 lim_rlimit(p, uap->which, &rlim);
874 error = copyout(&rlim, uap->rlp, sizeof(struct rlimit));
879 * Transform the running time and tick information for children of proc p
880 * into user and system time usage.
889 PROC_LOCK_ASSERT(p, MA_OWNED);
890 calcru1(p, &p->p_crux, up, sp);
894 * Transform the running time and tick information in proc p into user
895 * and system time usage. If appropriate, include the current time slice
899 calcru(struct proc *p, struct timeval *up, struct timeval *sp)
901 struct rusage_ext rux;
905 PROC_LOCK_ASSERT(p, MA_OWNED);
906 mtx_assert(&sched_lock, MA_NOTOWNED);
907 mtx_lock_spin(&sched_lock);
910 * If we are getting stats for the current process, then add in the
911 * stats that this thread has accumulated in its current time slice.
912 * We reset the thread and CPU state as if we had performed a context
915 if (curthread->td_proc == p) {
918 p->p_rux.rux_runtime += u - PCPU_GET(switchtime);
919 PCPU_SET(switchtime, u);
920 p->p_rux.rux_uticks += td->td_uticks;
922 p->p_rux.rux_iticks += td->td_iticks;
924 p->p_rux.rux_sticks += td->td_sticks;
927 /* Work on a copy of p_rux so we can let go of sched_lock */
929 mtx_unlock_spin(&sched_lock);
930 calcru1(p, &rux, up, sp);
931 /* Update the result from the p_rux copy */
932 p->p_rux.rux_uu = rux.rux_uu;
933 p->p_rux.rux_su = rux.rux_su;
934 p->p_rux.rux_tu = rux.rux_tu;
938 calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up,
941 /* {user, system, interrupt, total} {ticks, usec}: */
942 u_int64_t ut, uu, st, su, it, tt, tu;
944 ut = ruxp->rux_uticks;
945 st = ruxp->rux_sticks;
946 it = ruxp->rux_iticks;
949 /* Avoid divide by zero */
953 tu = cputick2usec(ruxp->rux_runtime);
954 if ((int64_t)tu < 0) {
955 /* XXX: this should be an assert /phk */
956 printf("calcru: negative runtime of %jd usec for pid %d (%s)\n",
957 (intmax_t)tu, p->p_pid, p->p_comm);
961 if (tu >= ruxp->rux_tu) {
963 * The normal case, time increased.
964 * Enforce monotonicity of bucketed numbers.
967 if (uu < ruxp->rux_uu)
970 if (su < ruxp->rux_su)
972 } else if (tu + 3 > ruxp->rux_tu || 101 * tu > 100 * ruxp->rux_tu) {
974 * When we calibrate the cputicker, it is not uncommon to
975 * see the presumably fixed frequency increase slightly over
976 * time as a result of thermal stabilization and NTP
977 * discipline (of the reference clock). We therefore ignore
978 * a bit of backwards slop because we expect to catch up
979 * shortly. We use a 3 microsecond limit to catch low
980 * counts and a 1% limit for high counts.
985 } else { /* tu < ruxp->rux_tu */
987 * What happene here was likely that a laptop, which ran at
988 * a reduced clock frequency at boot, kicked into high gear.
989 * The wisdom of spamming this message in that case is
990 * dubious, but it might also be indicative of something
991 * serious, so lets keep it and hope laptops can be made
992 * more truthful about their CPU speed via ACPI.
994 printf("calcru: runtime went backwards from %ju usec "
995 "to %ju usec for pid %d (%s)\n",
996 (uintmax_t)ruxp->rux_tu, (uintmax_t)tu,
997 p->p_pid, p->p_comm);
1006 up->tv_sec = uu / 1000000;
1007 up->tv_usec = uu % 1000000;
1008 sp->tv_sec = su / 1000000;
1009 sp->tv_usec = su % 1000000;
1012 #ifndef _SYS_SYSPROTO_H_
1013 struct getrusage_args {
1015 struct rusage *rusage;
1023 register struct thread *td;
1024 register struct getrusage_args *uap;
1029 error = kern_getrusage(td, uap->who, &ru);
1031 error = copyout(&ru, uap->rusage, sizeof(struct rusage));
1036 kern_getrusage(td, who, rup)
1048 *rup = p->p_stats->p_ru;
1049 calcru(p, &rup->ru_utime, &rup->ru_stime);
1052 case RUSAGE_CHILDREN:
1053 *rup = p->p_stats->p_cru;
1054 calccru(p, &rup->ru_utime, &rup->ru_stime);
1066 ruadd(ru, rux, ru2, rux2)
1068 struct rusage_ext *rux;
1070 struct rusage_ext *rux2;
1072 register long *ip, *ip2;
1075 rux->rux_runtime += rux2->rux_runtime;
1076 rux->rux_uticks += rux2->rux_uticks;
1077 rux->rux_sticks += rux2->rux_sticks;
1078 rux->rux_iticks += rux2->rux_iticks;
1079 rux->rux_uu += rux2->rux_uu;
1080 rux->rux_su += rux2->rux_su;
1081 rux->rux_tu += rux2->rux_tu;
1082 if (ru->ru_maxrss < ru2->ru_maxrss)
1083 ru->ru_maxrss = ru2->ru_maxrss;
1085 ip2 = &ru2->ru_first;
1086 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
1091 * Allocate a new resource limits structure and initialize its
1092 * reference count and mutex pointer.
1097 struct plimit *limp;
1099 limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK);
1100 refcount_init(&limp->pl_refcnt, 1);
1106 struct plimit *limp;
1109 refcount_acquire(&limp->pl_refcnt);
1115 struct plimit *limp;
1118 KASSERT(limp->pl_refcnt > 0, ("plimit refcnt underflow"));
1119 if (refcount_release(&limp->pl_refcnt))
1120 free((void *)limp, M_PLIMIT);
1124 * Make a copy of the plimit structure.
1125 * We share these structures copy-on-write after fork.
1129 struct plimit *dst, *src;
1132 KASSERT(dst->pl_refcnt == 1, ("lim_copy to shared limit"));
1133 bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit));
1137 * Return the hard limit for a particular system resource. The
1138 * which parameter specifies the index into the rlimit array.
1141 lim_max(struct proc *p, int which)
1145 lim_rlimit(p, which, &rl);
1146 return (rl.rlim_max);
1150 * Return the current (soft) limit for a particular system resource.
1151 * The which parameter which specifies the index into the rlimit array
1154 lim_cur(struct proc *p, int which)
1158 lim_rlimit(p, which, &rl);
1159 return (rl.rlim_cur);
1163 * Return a copy of the entire rlimit structure for the system limit
1164 * specified by 'which' in the rlimit structure pointed to by 'rlp'.
1167 lim_rlimit(struct proc *p, int which, struct rlimit *rlp)
1170 PROC_LOCK_ASSERT(p, MA_OWNED);
1171 KASSERT(which >= 0 && which < RLIM_NLIMITS,
1172 ("request for invalid resource limit"));
1173 *rlp = p->p_limit->pl_rlimit[which];
1177 * Find the uidinfo structure for a uid. This structure is used to
1178 * track the total resource consumption (process count, socket buffer
1179 * size, etc.) for the uid and impose limits.
1185 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
1186 mtx_init(&uihashtbl_mtx, "uidinfo hash", NULL, MTX_DEF);
1190 * Look up a uidinfo struct for the parameter uid.
1191 * uihashtbl_mtx must be locked.
1193 static struct uidinfo *
1197 struct uihashhead *uipp;
1198 struct uidinfo *uip;
1200 mtx_assert(&uihashtbl_mtx, MA_OWNED);
1202 LIST_FOREACH(uip, uipp, ui_hash)
1203 if (uip->ui_uid == uid)
1210 * Find or allocate a struct uidinfo for a particular uid.
1211 * Increase refcount on uidinfo struct returned.
1212 * uifree() should be called on a struct uidinfo when released.
1218 struct uidinfo *old_uip, *uip;
1220 mtx_lock(&uihashtbl_mtx);
1221 uip = uilookup(uid);
1223 mtx_unlock(&uihashtbl_mtx);
1224 uip = malloc(sizeof(*uip), M_UIDINFO, M_WAITOK | M_ZERO);
1225 mtx_lock(&uihashtbl_mtx);
1227 * There's a chance someone created our uidinfo while we
1228 * were in malloc and not holding the lock, so we have to
1229 * make sure we don't insert a duplicate uidinfo.
1231 if ((old_uip = uilookup(uid)) != NULL) {
1232 /* Someone else beat us to it. */
1233 free(uip, M_UIDINFO);
1236 uip->ui_mtxp = mtx_pool_alloc(mtxpool_sleep);
1238 LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
1242 mtx_unlock(&uihashtbl_mtx);
1247 * Place another refcount on a uidinfo struct.
1251 struct uidinfo *uip;
1256 UIDINFO_UNLOCK(uip);
1260 * Since uidinfo structs have a long lifetime, we use an
1261 * opportunistic refcounting scheme to avoid locking the lookup hash
1264 * If the refcount hits 0, we need to free the structure,
1265 * which means we need to lock the hash.
1267 * After locking the struct and lowering the refcount, if we find
1268 * that we don't need to free, simply unlock and return.
1270 * If refcount lowering results in need to free, bump the count
1271 * back up, lose the lock and aquire the locks in the proper
1272 * order to try again.
1276 struct uidinfo *uip;
1279 /* Prepare for optimal case. */
1282 if (--uip->ui_ref != 0) {
1283 UIDINFO_UNLOCK(uip);
1287 /* Prepare for suboptimal case. */
1289 UIDINFO_UNLOCK(uip);
1290 mtx_lock(&uihashtbl_mtx);
1294 * We must subtract one from the count again because we backed out
1295 * our initial subtraction before dropping the lock.
1296 * Since another thread may have added a reference after we dropped the
1297 * initial lock we have to test for zero again.
1299 if (--uip->ui_ref == 0) {
1300 LIST_REMOVE(uip, ui_hash);
1301 mtx_unlock(&uihashtbl_mtx);
1302 if (uip->ui_sbsize != 0)
1303 printf("freeing uidinfo: uid = %d, sbsize = %jd\n",
1304 uip->ui_uid, (intmax_t)uip->ui_sbsize);
1305 if (uip->ui_proccnt != 0)
1306 printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
1307 uip->ui_uid, uip->ui_proccnt);
1308 UIDINFO_UNLOCK(uip);
1309 FREE(uip, M_UIDINFO);
1313 mtx_unlock(&uihashtbl_mtx);
1314 UIDINFO_UNLOCK(uip);
1318 * Change the count associated with number of processes
1319 * a given user is using. When 'max' is 0, don't enforce a limit
1322 chgproccnt(uip, diff, max)
1323 struct uidinfo *uip;
1329 /* Don't allow them to exceed max, but allow subtraction. */
1330 if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
1331 UIDINFO_UNLOCK(uip);
1334 uip->ui_proccnt += diff;
1335 if (uip->ui_proccnt < 0)
1336 printf("negative proccnt for uid = %d\n", uip->ui_uid);
1337 UIDINFO_UNLOCK(uip);
1342 * Change the total socket buffer size a user has used.
1345 chgsbsize(uip, hiwat, to, max)
1346 struct uidinfo *uip;
1354 new = uip->ui_sbsize + to - *hiwat;
1355 /* Don't allow them to exceed max, but allow subtraction. */
1356 if (to > *hiwat && new > max) {
1357 UIDINFO_UNLOCK(uip);
1360 uip->ui_sbsize = new;
1361 UIDINFO_UNLOCK(uip);
1364 printf("negative sbsize for uid = %d\n", uip->ui_uid);