2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
40 #include "opt_compat.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/sysproto.h>
46 #include <sys/kernel.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
52 #include <sys/refcount.h>
53 #include <sys/resourcevar.h>
54 #include <sys/rwlock.h>
55 #include <sys/sched.h>
57 #include <sys/syscallsubr.h>
58 #include <sys/sysent.h>
63 #include <vm/vm_param.h>
65 #include <vm/vm_map.h>
68 static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures");
69 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
70 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
71 static struct rwlock uihashtbl_lock;
72 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
73 static u_long uihash; /* size of hash table - 1 */
75 static void calcru1(struct proc *p, struct rusage_ext *ruxp,
76 struct timeval *up, struct timeval *sp);
77 static int donice(struct thread *td, struct proc *chgp, int n);
78 static struct uidinfo *uilookup(uid_t uid);
81 * Resource controls and accounting.
83 #ifndef _SYS_SYSPROTO_H_
84 struct getpriority_args {
92 register struct getpriority_args *uap;
100 switch (uap->which) {
104 low = td->td_proc->p_nice;
109 if (p_cansee(td, p) == 0)
116 sx_slock(&proctree_lock);
118 pg = td->td_proc->p_pgrp;
121 pg = pgfind(uap->who);
123 sx_sunlock(&proctree_lock);
127 sx_sunlock(&proctree_lock);
128 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
130 if (p_cansee(td, p) == 0) {
141 uap->who = td->td_ucred->cr_uid;
142 sx_slock(&allproc_lock);
143 FOREACH_PROC_IN_SYSTEM(p) {
144 /* Do not bother to check PRS_NEW processes */
145 if (p->p_state == PRS_NEW)
148 if (p_cansee(td, p) == 0 &&
149 p->p_ucred->cr_uid == uap->who) {
155 sx_sunlock(&allproc_lock);
162 if (low == PRIO_MAX + 1 && error == 0)
164 td->td_retval[0] = low;
168 #ifndef _SYS_SYSPROTO_H_
169 struct setpriority_args {
178 struct setpriority_args *uap;
180 struct proc *curp, *p;
182 int found = 0, error = 0;
185 switch (uap->which) {
189 error = donice(td, curp, uap->prio);
195 error = p_cansee(td, p);
197 error = donice(td, p, uap->prio);
204 sx_slock(&proctree_lock);
209 pg = pgfind(uap->who);
211 sx_sunlock(&proctree_lock);
215 sx_sunlock(&proctree_lock);
216 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
218 if (p_cansee(td, p) == 0) {
219 error = donice(td, p, uap->prio);
229 uap->who = td->td_ucred->cr_uid;
230 sx_slock(&allproc_lock);
231 FOREACH_PROC_IN_SYSTEM(p) {
233 if (p->p_ucred->cr_uid == uap->who &&
234 p_cansee(td, p) == 0) {
235 error = donice(td, p, uap->prio);
240 sx_sunlock(&allproc_lock);
247 if (found == 0 && error == 0)
253 * Set "nice" for a (whole) process.
256 donice(struct thread *td, struct proc *p, int n)
260 PROC_LOCK_ASSERT(p, MA_OWNED);
261 if ((error = p_cansched(td, p)))
267 if (n < p->p_nice && priv_check(td, PRIV_SCHED_SETPRIORITY) != 0)
274 * Set realtime priority for LWP.
276 #ifndef _SYS_SYSPROTO_H_
277 struct rtprio_thread_args {
284 rtprio_thread(struct thread *td, struct rtprio_thread_args *uap)
291 /* Perform copyin before acquiring locks if needed. */
292 if (uap->function == RTP_SET)
293 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
298 * Though lwpid is unique, only current process is supported
299 * since there is no efficient way to look up a LWP yet.
304 switch (uap->function) {
306 if ((error = p_cansee(td, p)))
308 if (uap->lwpid == 0 || uap->lwpid == td->td_tid)
311 td1 = thread_find(p, uap->lwpid);
313 pri_to_rtp(td1, &rtp);
317 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
319 if ((error = p_cansched(td, p)) || (error = cierror))
322 /* Disallow setting rtprio in most cases if not superuser. */
324 * Realtime priority has to be restricted for reasons which should be
325 * obvious. However, for idle priority, there is a potential for
326 * system deadlock if an idleprio process gains a lock on a resource
327 * that other processes need (and the idleprio process can't run
328 * due to a CPU-bound normal process). Fix me! XXX
331 if (RTP_PRIO_IS_REALTIME(rtp.type)) {
333 if (rtp.type != RTP_PRIO_NORMAL) {
335 error = priv_check(td, PRIV_SCHED_RTPRIO);
340 if (uap->lwpid == 0 || uap->lwpid == td->td_tid)
343 td1 = thread_find(p, uap->lwpid);
345 error = rtp_to_pri(&rtp, td1);
358 * Set realtime priority.
360 #ifndef _SYS_SYSPROTO_H_
369 struct thread *td; /* curthread */
370 register struct rtprio_args *uap;
377 /* Perform copyin before acquiring locks if needed. */
378 if (uap->function == RTP_SET)
379 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
392 switch (uap->function) {
394 if ((error = p_cansee(td, p)))
397 * Return OUR priority if no pid specified,
398 * or if one is, report the highest priority
399 * in the process. There isn't much more you can do as
400 * there is only room to return a single priority.
401 * Note: specifying our own pid is not the same
402 * as leaving it zero.
405 pri_to_rtp(td, &rtp);
409 rtp.type = RTP_PRIO_IDLE;
410 rtp.prio = RTP_PRIO_MAX;
411 FOREACH_THREAD_IN_PROC(p, tdp) {
412 pri_to_rtp(tdp, &rtp2);
413 if (rtp2.type < rtp.type ||
414 (rtp2.type == rtp.type &&
415 rtp2.prio < rtp.prio)) {
416 rtp.type = rtp2.type;
417 rtp.prio = rtp2.prio;
422 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
424 if ((error = p_cansched(td, p)) || (error = cierror))
427 /* Disallow setting rtprio in most cases if not superuser. */
429 * Realtime priority has to be restricted for reasons which should be
430 * obvious. However, for idle priority, there is a potential for
431 * system deadlock if an idleprio process gains a lock on a resource
432 * that other processes need (and the idleprio process can't run
433 * due to a CPU-bound normal process). Fix me! XXX
436 if (RTP_PRIO_IS_REALTIME(rtp.type)) {
438 if (rtp.type != RTP_PRIO_NORMAL) {
440 error = priv_check(td, PRIV_SCHED_RTPRIO);
446 * If we are setting our own priority, set just our
447 * thread but if we are doing another process,
448 * do all the threads on that process. If we
449 * specify our own pid we do the latter.
452 error = rtp_to_pri(&rtp, td);
454 FOREACH_THREAD_IN_PROC(p, td) {
455 if ((error = rtp_to_pri(&rtp, td)) != 0)
469 rtp_to_pri(struct rtprio *rtp, struct thread *td)
475 switch (RTP_PRIO_BASE(rtp->type)) {
476 case RTP_PRIO_REALTIME:
477 if (rtp->prio > RTP_PRIO_MAX) {
481 newpri = PRI_MIN_REALTIME + rtp->prio;
483 case RTP_PRIO_NORMAL:
484 if (rtp->prio > (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE)) {
488 newpri = PRI_MIN_TIMESHARE + rtp->prio;
491 newpri = PRI_MIN_IDLE + rtp->prio;
497 sched_class(td, rtp->type); /* XXX fix */
498 oldpri = td->td_user_pri;
499 sched_user_prio(td, newpri);
501 sched_prio(curthread, td->td_user_pri); /* XXX dubious */
502 if (TD_ON_UPILOCK(td) && oldpri != newpri) {
504 umtx_pi_adjust(td, oldpri);
511 pri_to_rtp(struct thread *td, struct rtprio *rtp)
515 switch (PRI_BASE(td->td_pri_class)) {
517 rtp->prio = td->td_base_user_pri - PRI_MIN_REALTIME;
520 rtp->prio = td->td_base_user_pri - PRI_MIN_TIMESHARE;
523 rtp->prio = td->td_base_user_pri - PRI_MIN_IDLE;
528 rtp->type = td->td_pri_class;
532 #if defined(COMPAT_43)
533 #ifndef _SYS_SYSPROTO_H_
534 struct osetrlimit_args {
542 register struct osetrlimit_args *uap;
548 if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit))))
550 lim.rlim_cur = olim.rlim_cur;
551 lim.rlim_max = olim.rlim_max;
552 error = kern_setrlimit(td, uap->which, &lim);
556 #ifndef _SYS_SYSPROTO_H_
557 struct ogetrlimit_args {
565 register struct ogetrlimit_args *uap;
572 if (uap->which >= RLIM_NLIMITS)
576 lim_rlimit(p, uap->which, &rl);
580 * XXX would be more correct to convert only RLIM_INFINITY to the
581 * old RLIM_INFINITY and fail with EOVERFLOW for other larger
582 * values. Most 64->32 and 32->16 conversions, including not
583 * unimportant ones of uids are even more broken than what we
584 * do here (they blindly truncate). We don't do this correctly
585 * here since we have little experience with EOVERFLOW yet.
586 * Elsewhere, getuid() can't fail...
588 olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur;
589 olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max;
590 error = copyout(&olim, uap->rlp, sizeof(olim));
593 #endif /* COMPAT_43 */
595 #ifndef _SYS_SYSPROTO_H_
596 struct __setrlimit_args {
604 register struct __setrlimit_args *uap;
609 if ((error = copyin(uap->rlp, &alim, sizeof(struct rlimit))))
611 error = kern_setrlimit(td, uap->which, &alim);
623 PROC_LOCK_ASSERT(p, MA_OWNED);
625 * Check if the process exceeds its cpu resource allocation. If
626 * it reaches the max, arrange to kill the process in ast().
628 if (p->p_cpulimit == RLIM_INFINITY)
631 FOREACH_THREAD_IN_PROC(p, td) {
633 ruxagg(&p->p_rux, td);
637 if (p->p_rux.rux_runtime > p->p_cpulimit * cpu_tickrate()) {
638 lim_rlimit(p, RLIMIT_CPU, &rlim);
639 if (p->p_rux.rux_runtime >= rlim.rlim_max * cpu_tickrate()) {
640 killproc(p, "exceeded maximum CPU limit");
642 if (p->p_cpulimit < rlim.rlim_max)
647 if ((p->p_flag & P_WEXIT) == 0)
648 callout_reset(&p->p_limco, hz, lim_cb, p);
652 kern_setrlimit(td, which, limp)
657 struct plimit *newlim, *oldlim;
659 register struct rlimit *alimp;
660 struct rlimit oldssiz;
663 if (which >= RLIM_NLIMITS)
667 * Preserve historical bugs by treating negative limits as unsigned.
669 if (limp->rlim_cur < 0)
670 limp->rlim_cur = RLIM_INFINITY;
671 if (limp->rlim_max < 0)
672 limp->rlim_max = RLIM_INFINITY;
674 oldssiz.rlim_cur = 0;
676 newlim = lim_alloc();
679 alimp = &oldlim->pl_rlimit[which];
680 if (limp->rlim_cur > alimp->rlim_max ||
681 limp->rlim_max > alimp->rlim_max)
682 if ((error = priv_check(td, PRIV_PROC_SETRLIMIT))) {
687 if (limp->rlim_cur > limp->rlim_max)
688 limp->rlim_cur = limp->rlim_max;
689 lim_copy(newlim, oldlim);
690 alimp = &newlim->pl_rlimit[which];
695 if (limp->rlim_cur != RLIM_INFINITY &&
696 p->p_cpulimit == RLIM_INFINITY)
697 callout_reset(&p->p_limco, hz, lim_cb, p);
698 p->p_cpulimit = limp->rlim_cur;
701 if (limp->rlim_cur > maxdsiz)
702 limp->rlim_cur = maxdsiz;
703 if (limp->rlim_max > maxdsiz)
704 limp->rlim_max = maxdsiz;
708 if (limp->rlim_cur > maxssiz)
709 limp->rlim_cur = maxssiz;
710 if (limp->rlim_max > maxssiz)
711 limp->rlim_max = maxssiz;
713 if (td->td_proc->p_sysent->sv_fixlimit != NULL)
714 td->td_proc->p_sysent->sv_fixlimit(&oldssiz,
719 if (limp->rlim_cur > maxfilesperproc)
720 limp->rlim_cur = maxfilesperproc;
721 if (limp->rlim_max > maxfilesperproc)
722 limp->rlim_max = maxfilesperproc;
726 if (limp->rlim_cur > maxprocperuid)
727 limp->rlim_cur = maxprocperuid;
728 if (limp->rlim_max > maxprocperuid)
729 limp->rlim_max = maxprocperuid;
730 if (limp->rlim_cur < 1)
732 if (limp->rlim_max < 1)
736 if (td->td_proc->p_sysent->sv_fixlimit != NULL)
737 td->td_proc->p_sysent->sv_fixlimit(limp, which);
743 if (which == RLIMIT_STACK) {
745 * Stack is allocated to the max at exec time with only
746 * "rlim_cur" bytes accessible. If stack limit is going
747 * up make more accessible, if going down make inaccessible.
749 if (limp->rlim_cur != oldssiz.rlim_cur) {
754 if (limp->rlim_cur > oldssiz.rlim_cur) {
755 prot = p->p_sysent->sv_stackprot;
756 size = limp->rlim_cur - oldssiz.rlim_cur;
757 addr = p->p_sysent->sv_usrstack -
761 size = oldssiz.rlim_cur - limp->rlim_cur;
762 addr = p->p_sysent->sv_usrstack -
765 addr = trunc_page(addr);
766 size = round_page(size);
767 (void)vm_map_protect(&p->p_vmspace->vm_map,
768 addr, addr + size, prot, FALSE);
775 #ifndef _SYS_SYSPROTO_H_
776 struct __getrlimit_args {
785 register struct __getrlimit_args *uap;
791 if (uap->which >= RLIM_NLIMITS)
795 lim_rlimit(p, uap->which, &rlim);
797 error = copyout(&rlim, uap->rlp, sizeof(struct rlimit));
802 * Transform the running time and tick information for children of proc p
803 * into user and system time usage.
812 PROC_LOCK_ASSERT(p, MA_OWNED);
813 calcru1(p, &p->p_crux, up, sp);
817 * Transform the running time and tick information in proc p into user
818 * and system time usage. If appropriate, include the current time slice
822 calcru(struct proc *p, struct timeval *up, struct timeval *sp)
827 PROC_LOCK_ASSERT(p, MA_OWNED);
828 PROC_SLOCK_ASSERT(p, MA_OWNED);
830 * If we are getting stats for the current process, then add in the
831 * stats that this thread has accumulated in its current time slice.
832 * We reset the thread and CPU state as if we had performed a context
836 if (td->td_proc == p) {
838 p->p_rux.rux_runtime += u - PCPU_GET(switchtime);
839 PCPU_SET(switchtime, u);
841 /* Make sure the per-thread stats are current. */
842 FOREACH_THREAD_IN_PROC(p, td) {
843 if (td->td_incruntime == 0)
846 ruxagg(&p->p_rux, td);
849 calcru1(p, &p->p_rux, up, sp);
853 calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up,
856 /* {user, system, interrupt, total} {ticks, usec}: */
857 u_int64_t ut, uu, st, su, it, tt, tu;
859 ut = ruxp->rux_uticks;
860 st = ruxp->rux_sticks;
861 it = ruxp->rux_iticks;
864 /* Avoid divide by zero */
868 tu = cputick2usec(ruxp->rux_runtime);
869 if ((int64_t)tu < 0) {
870 /* XXX: this should be an assert /phk */
871 printf("calcru: negative runtime of %jd usec for pid %d (%s)\n",
872 (intmax_t)tu, p->p_pid, p->p_comm);
876 if (tu >= ruxp->rux_tu) {
878 * The normal case, time increased.
879 * Enforce monotonicity of bucketed numbers.
882 if (uu < ruxp->rux_uu)
885 if (su < ruxp->rux_su)
887 } else if (tu + 3 > ruxp->rux_tu || 101 * tu > 100 * ruxp->rux_tu) {
889 * When we calibrate the cputicker, it is not uncommon to
890 * see the presumably fixed frequency increase slightly over
891 * time as a result of thermal stabilization and NTP
892 * discipline (of the reference clock). We therefore ignore
893 * a bit of backwards slop because we expect to catch up
894 * shortly. We use a 3 microsecond limit to catch low
895 * counts and a 1% limit for high counts.
900 } else { /* tu < ruxp->rux_tu */
902 * What happened here was likely that a laptop, which ran at
903 * a reduced clock frequency at boot, kicked into high gear.
904 * The wisdom of spamming this message in that case is
905 * dubious, but it might also be indicative of something
906 * serious, so lets keep it and hope laptops can be made
907 * more truthful about their CPU speed via ACPI.
909 printf("calcru: runtime went backwards from %ju usec "
910 "to %ju usec for pid %d (%s)\n",
911 (uintmax_t)ruxp->rux_tu, (uintmax_t)tu,
912 p->p_pid, p->p_comm);
921 up->tv_sec = uu / 1000000;
922 up->tv_usec = uu % 1000000;
923 sp->tv_sec = su / 1000000;
924 sp->tv_usec = su % 1000000;
927 #ifndef _SYS_SYSPROTO_H_
928 struct getrusage_args {
930 struct rusage *rusage;
935 register struct thread *td;
936 register struct getrusage_args *uap;
941 error = kern_getrusage(td, uap->who, &ru);
943 error = copyout(&ru, uap->rusage, sizeof(struct rusage));
948 kern_getrusage(td, who, rup)
961 rufetchcalc(p, rup, &rup->ru_utime,
965 case RUSAGE_CHILDREN:
966 *rup = p->p_stats->p_cru;
967 calccru(p, &rup->ru_utime, &rup->ru_stime);
978 rucollect(struct rusage *ru, struct rusage *ru2)
983 if (ru->ru_maxrss < ru2->ru_maxrss)
984 ru->ru_maxrss = ru2->ru_maxrss;
986 ip2 = &ru2->ru_first;
987 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
992 ruadd(struct rusage *ru, struct rusage_ext *rux, struct rusage *ru2,
993 struct rusage_ext *rux2)
996 rux->rux_runtime += rux2->rux_runtime;
997 rux->rux_uticks += rux2->rux_uticks;
998 rux->rux_sticks += rux2->rux_sticks;
999 rux->rux_iticks += rux2->rux_iticks;
1000 rux->rux_uu += rux2->rux_uu;
1001 rux->rux_su += rux2->rux_su;
1002 rux->rux_tu += rux2->rux_tu;
1007 * Aggregate tick counts into the proc's rusage_ext.
1010 ruxagg(struct rusage_ext *rux, struct thread *td)
1013 THREAD_LOCK_ASSERT(td, MA_OWNED);
1014 PROC_SLOCK_ASSERT(td->td_proc, MA_OWNED);
1015 rux->rux_runtime += td->td_incruntime;
1016 rux->rux_uticks += td->td_uticks;
1017 rux->rux_sticks += td->td_sticks;
1018 rux->rux_iticks += td->td_iticks;
1019 td->td_incruntime = 0;
1026 * Update the rusage_ext structure and fetch a valid aggregate rusage
1027 * for proc p if storage for one is supplied.
1030 rufetch(struct proc *p, struct rusage *ru)
1034 PROC_SLOCK_ASSERT(p, MA_OWNED);
1037 if (p->p_numthreads > 0) {
1038 FOREACH_THREAD_IN_PROC(p, td) {
1040 ruxagg(&p->p_rux, td);
1042 rucollect(ru, &td->td_ru);
1048 * Atomically perform a rufetch and a calcru together.
1049 * Consumers, can safely assume the calcru is executed only once
1050 * rufetch is completed.
1053 rufetchcalc(struct proc *p, struct rusage *ru, struct timeval *up,
1064 * Allocate a new resource limits structure and initialize its
1065 * reference count and mutex pointer.
1070 struct plimit *limp;
1072 limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK);
1073 refcount_init(&limp->pl_refcnt, 1);
1079 struct plimit *limp;
1082 refcount_acquire(&limp->pl_refcnt);
1087 lim_fork(struct proc *p1, struct proc *p2)
1089 p2->p_limit = lim_hold(p1->p_limit);
1090 callout_init_mtx(&p2->p_limco, &p2->p_mtx, 0);
1091 if (p1->p_cpulimit != RLIM_INFINITY)
1092 callout_reset(&p2->p_limco, hz, lim_cb, p2);
1097 struct plimit *limp;
1100 KASSERT(limp->pl_refcnt > 0, ("plimit refcnt underflow"));
1101 if (refcount_release(&limp->pl_refcnt))
1102 free((void *)limp, M_PLIMIT);
1106 * Make a copy of the plimit structure.
1107 * We share these structures copy-on-write after fork.
1111 struct plimit *dst, *src;
1114 KASSERT(dst->pl_refcnt == 1, ("lim_copy to shared limit"));
1115 bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit));
1119 * Return the hard limit for a particular system resource. The
1120 * which parameter specifies the index into the rlimit array.
1123 lim_max(struct proc *p, int which)
1127 lim_rlimit(p, which, &rl);
1128 return (rl.rlim_max);
1132 * Return the current (soft) limit for a particular system resource.
1133 * The which parameter which specifies the index into the rlimit array
1136 lim_cur(struct proc *p, int which)
1140 lim_rlimit(p, which, &rl);
1141 return (rl.rlim_cur);
1145 * Return a copy of the entire rlimit structure for the system limit
1146 * specified by 'which' in the rlimit structure pointed to by 'rlp'.
1149 lim_rlimit(struct proc *p, int which, struct rlimit *rlp)
1152 PROC_LOCK_ASSERT(p, MA_OWNED);
1153 KASSERT(which >= 0 && which < RLIM_NLIMITS,
1154 ("request for invalid resource limit"));
1155 *rlp = p->p_limit->pl_rlimit[which];
1156 if (p->p_sysent->sv_fixlimit != NULL)
1157 p->p_sysent->sv_fixlimit(rlp, which);
1161 * Find the uidinfo structure for a uid. This structure is used to
1162 * track the total resource consumption (process count, socket buffer
1163 * size, etc.) for the uid and impose limits.
1169 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
1170 rw_init(&uihashtbl_lock, "uidinfo hash");
1174 * Look up a uidinfo struct for the parameter uid.
1175 * uihashtbl_lock must be locked.
1177 static struct uidinfo *
1181 struct uihashhead *uipp;
1182 struct uidinfo *uip;
1184 rw_assert(&uihashtbl_lock, RA_LOCKED);
1186 LIST_FOREACH(uip, uipp, ui_hash)
1187 if (uip->ui_uid == uid)
1194 * Find or allocate a struct uidinfo for a particular uid.
1195 * Increase refcount on uidinfo struct returned.
1196 * uifree() should be called on a struct uidinfo when released.
1202 struct uidinfo *old_uip, *uip;
1204 rw_rlock(&uihashtbl_lock);
1205 uip = uilookup(uid);
1207 rw_runlock(&uihashtbl_lock);
1208 uip = malloc(sizeof(*uip), M_UIDINFO, M_WAITOK | M_ZERO);
1209 rw_wlock(&uihashtbl_lock);
1211 * There's a chance someone created our uidinfo while we
1212 * were in malloc and not holding the lock, so we have to
1213 * make sure we don't insert a duplicate uidinfo.
1215 if ((old_uip = uilookup(uid)) != NULL) {
1216 /* Someone else beat us to it. */
1217 free(uip, M_UIDINFO);
1220 refcount_init(&uip->ui_ref, 0);
1222 mtx_init(&uip->ui_vmsize_mtx, "ui_vmsize", NULL,
1224 LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
1228 rw_unlock(&uihashtbl_lock);
1233 * Place another refcount on a uidinfo struct.
1237 struct uidinfo *uip;
1240 refcount_acquire(&uip->ui_ref);
1244 * Since uidinfo structs have a long lifetime, we use an
1245 * opportunistic refcounting scheme to avoid locking the lookup hash
1248 * If the refcount hits 0, we need to free the structure,
1249 * which means we need to lock the hash.
1251 * After locking the struct and lowering the refcount, if we find
1252 * that we don't need to free, simply unlock and return.
1254 * If refcount lowering results in need to free, bump the count
1255 * back up, lose the lock and acquire the locks in the proper
1256 * order to try again.
1260 struct uidinfo *uip;
1264 /* Prepare for optimal case. */
1266 if (old > 1 && atomic_cmpset_int(&uip->ui_ref, old, old - 1))
1269 /* Prepare for suboptimal case. */
1270 rw_wlock(&uihashtbl_lock);
1271 if (refcount_release(&uip->ui_ref)) {
1272 LIST_REMOVE(uip, ui_hash);
1273 rw_wunlock(&uihashtbl_lock);
1274 if (uip->ui_sbsize != 0)
1275 printf("freeing uidinfo: uid = %d, sbsize = %ld\n",
1276 uip->ui_uid, uip->ui_sbsize);
1277 if (uip->ui_proccnt != 0)
1278 printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
1279 uip->ui_uid, uip->ui_proccnt);
1280 if (uip->ui_vmsize != 0)
1281 printf("freeing uidinfo: uid = %d, swapuse = %lld\n",
1282 uip->ui_uid, (unsigned long long)uip->ui_vmsize);
1283 mtx_destroy(&uip->ui_vmsize_mtx);
1284 free(uip, M_UIDINFO);
1288 * Someone added a reference between atomic_cmpset_int() and
1289 * rw_wlock(&uihashtbl_lock).
1291 rw_wunlock(&uihashtbl_lock);
1295 * Change the count associated with number of processes
1296 * a given user is using. When 'max' is 0, don't enforce a limit
1299 chgproccnt(uip, diff, max)
1300 struct uidinfo *uip;
1305 /* Don't allow them to exceed max, but allow subtraction. */
1306 if (diff > 0 && max != 0) {
1307 if (atomic_fetchadd_long(&uip->ui_proccnt, (long)diff) + diff > max) {
1308 atomic_subtract_long(&uip->ui_proccnt, (long)diff);
1312 atomic_add_long(&uip->ui_proccnt, (long)diff);
1313 if (uip->ui_proccnt < 0)
1314 printf("negative proccnt for uid = %d\n", uip->ui_uid);
1320 * Change the total socket buffer size a user has used.
1323 chgsbsize(uip, hiwat, to, max)
1324 struct uidinfo *uip;
1333 if (atomic_fetchadd_long(&uip->ui_sbsize, (long)diff) + diff > max) {
1334 atomic_subtract_long(&uip->ui_sbsize, (long)diff);
1338 atomic_add_long(&uip->ui_sbsize, (long)diff);
1339 if (uip->ui_sbsize < 0)
1340 printf("negative sbsize for uid = %d\n", uip->ui_uid);
1347 * Change the count associated with number of pseudo-terminals
1348 * a given user is using. When 'max' is 0, don't enforce a limit
1351 chgptscnt(uip, diff, max)
1352 struct uidinfo *uip;
1357 /* Don't allow them to exceed max, but allow subtraction. */
1358 if (diff > 0 && max != 0) {
1359 if (atomic_fetchadd_long(&uip->ui_ptscnt, (long)diff) + diff > max) {
1360 atomic_subtract_long(&uip->ui_ptscnt, (long)diff);
1364 atomic_add_long(&uip->ui_ptscnt, (long)diff);
1365 if (uip->ui_ptscnt < 0)
1366 printf("negative ptscnt for uid = %d\n", uip->ui_uid);