2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
40 #include "opt_compat.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/sysproto.h>
46 #include <sys/imgact.h>
47 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/mutex.h>
52 #include <sys/resourcevar.h>
53 #include <sys/sched.h>
55 #include <sys/syscallsubr.h>
56 #include <sys/sysent.h>
60 #include <vm/vm_param.h>
62 #include <vm/vm_map.h>
65 static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures");
66 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
67 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
68 static struct mtx uihashtbl_mtx;
69 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
70 static u_long uihash; /* size of hash table - 1 */
72 static void calcru1(struct proc *p, struct rusage_ext *ruxp,
73 struct timeval *up, struct timeval *sp);
74 static int donice(struct thread *td, struct proc *chgp, int n);
75 static struct uidinfo *uilookup(uid_t uid);
78 * Resource controls and accounting.
81 #ifndef _SYS_SYSPROTO_H_
82 struct getpriority_args {
93 register struct getpriority_args *uap;
101 switch (uap->which) {
105 low = td->td_proc->p_nice;
110 if (p_cansee(td, p) == 0)
117 sx_slock(&proctree_lock);
119 pg = td->td_proc->p_pgrp;
122 pg = pgfind(uap->who);
124 sx_sunlock(&proctree_lock);
128 sx_sunlock(&proctree_lock);
129 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
131 if (!p_cansee(td, p)) {
142 uap->who = td->td_ucred->cr_uid;
143 sx_slock(&allproc_lock);
144 LIST_FOREACH(p, &allproc, p_list) {
146 if (!p_cansee(td, p) &&
147 p->p_ucred->cr_uid == uap->who) {
153 sx_sunlock(&allproc_lock);
160 if (low == PRIO_MAX + 1 && error == 0)
162 td->td_retval[0] = low;
166 #ifndef _SYS_SYSPROTO_H_
167 struct setpriority_args {
179 struct setpriority_args *uap;
181 struct proc *curp, *p;
183 int found = 0, error = 0;
186 switch (uap->which) {
190 error = donice(td, curp, uap->prio);
196 if (p_cansee(td, p) == 0)
197 error = donice(td, p, uap->prio);
204 sx_slock(&proctree_lock);
209 pg = pgfind(uap->who);
211 sx_sunlock(&proctree_lock);
215 sx_sunlock(&proctree_lock);
216 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
218 if (!p_cansee(td, p)) {
219 error = donice(td, p, uap->prio);
229 uap->who = td->td_ucred->cr_uid;
230 sx_slock(&allproc_lock);
231 FOREACH_PROC_IN_SYSTEM(p) {
232 /* Do not bother to check PRS_NEW processes */
233 if (p->p_state == PRS_NEW)
236 if (p->p_ucred->cr_uid == uap->who &&
238 error = donice(td, p, uap->prio);
243 sx_sunlock(&allproc_lock);
250 if (found == 0 && error == 0)
256 * Set "nice" for a (whole) process.
259 donice(struct thread *td, struct proc *p, int n)
263 PROC_LOCK_ASSERT(p, MA_OWNED);
264 if ((error = p_cansched(td, p)))
270 if (n < p->p_nice && suser(td) != 0)
272 mtx_lock_spin(&sched_lock);
274 mtx_unlock_spin(&sched_lock);
279 * Set realtime priority.
283 #ifndef _SYS_SYSPROTO_H_
293 struct thread *td; /* curthread */
294 register struct rtprio_args *uap;
302 /* Perform copyin before acquiring locks if needed. */
303 if (uap->function == RTP_SET)
304 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
318 switch (uap->function) {
320 if ((error = p_cansee(td, p)))
322 mtx_lock_spin(&sched_lock);
324 * Return OUR priority if no pid specified,
325 * or if one is, report the highest priority
326 * in the process. There isn't much more you can do as
327 * there is only room to return a single priority.
328 * XXXKSE: maybe need a new interface to report
329 * priorities of multiple system scope threads.
330 * Note: specifying our own pid is not the same
331 * as leaving it zero.
334 pri_to_rtp(td->td_ksegrp, &rtp);
338 rtp.type = RTP_PRIO_IDLE;
339 rtp.prio = RTP_PRIO_MAX;
340 FOREACH_KSEGRP_IN_PROC(p, kg) {
341 pri_to_rtp(kg, &rtp2);
342 if (rtp2.type < rtp.type ||
343 (rtp2.type == rtp.type &&
344 rtp2.prio < rtp.prio)) {
345 rtp.type = rtp2.type;
346 rtp.prio = rtp2.prio;
350 mtx_unlock_spin(&sched_lock);
352 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
354 if ((error = p_cansched(td, p)) || (error = cierror))
357 /* Disallow setting rtprio in most cases if not superuser. */
358 if (suser(td) != 0) {
359 /* can't set someone else's */
364 /* can't set realtime priority */
366 * Realtime priority has to be restricted for reasons which should be
367 * obvious. However, for idle priority, there is a potential for
368 * system deadlock if an idleprio process gains a lock on a resource
369 * that other processes need (and the idleprio process can't run
370 * due to a CPU-bound normal process). Fix me! XXX
373 if (RTP_PRIO_IS_REALTIME(rtp.type)) {
375 if (rtp.type != RTP_PRIO_NORMAL) {
383 * If we are setting our own priority, set just our
384 * KSEGRP but if we are doing another process,
385 * do all the groups on that process. If we
386 * specify our own pid we do the latter.
388 mtx_lock_spin(&sched_lock);
390 error = rtp_to_pri(&rtp, td->td_ksegrp);
392 FOREACH_KSEGRP_IN_PROC(p, kg) {
393 if ((error = rtp_to_pri(&rtp, kg)) != 0) {
398 mtx_unlock_spin(&sched_lock);
409 rtp_to_pri(struct rtprio *rtp, struct ksegrp *kg)
412 mtx_assert(&sched_lock, MA_OWNED);
413 if (rtp->prio > RTP_PRIO_MAX)
415 switch (RTP_PRIO_BASE(rtp->type)) {
416 case RTP_PRIO_REALTIME:
417 kg->kg_user_pri = PRI_MIN_REALTIME + rtp->prio;
419 case RTP_PRIO_NORMAL:
420 kg->kg_user_pri = PRI_MIN_TIMESHARE + rtp->prio;
423 kg->kg_user_pri = PRI_MIN_IDLE + rtp->prio;
428 sched_class(kg, rtp->type);
429 if (curthread->td_ksegrp == kg) {
430 sched_prio(curthread, kg->kg_user_pri); /* XXX dubious */
436 pri_to_rtp(struct ksegrp *kg, struct rtprio *rtp)
439 mtx_assert(&sched_lock, MA_OWNED);
440 switch (PRI_BASE(kg->kg_pri_class)) {
442 rtp->prio = kg->kg_user_pri - PRI_MIN_REALTIME;
445 rtp->prio = kg->kg_user_pri - PRI_MIN_TIMESHARE;
448 rtp->prio = kg->kg_user_pri - PRI_MIN_IDLE;
453 rtp->type = kg->kg_pri_class;
456 #if defined(COMPAT_43)
457 #ifndef _SYS_SYSPROTO_H_
458 struct osetrlimit_args {
469 register struct osetrlimit_args *uap;
475 if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit))))
477 lim.rlim_cur = olim.rlim_cur;
478 lim.rlim_max = olim.rlim_max;
479 error = kern_setrlimit(td, uap->which, &lim);
483 #ifndef _SYS_SYSPROTO_H_
484 struct ogetrlimit_args {
495 register struct ogetrlimit_args *uap;
502 if (uap->which >= RLIM_NLIMITS)
506 lim_rlimit(p, uap->which, &rl);
510 * XXX would be more correct to convert only RLIM_INFINITY to the
511 * old RLIM_INFINITY and fail with EOVERFLOW for other larger
512 * values. Most 64->32 and 32->16 conversions, including not
513 * unimportant ones of uids are even more broken than what we
514 * do here (they blindly truncate). We don't do this correctly
515 * here since we have little experience with EOVERFLOW yet.
516 * Elsewhere, getuid() can't fail...
518 olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur;
519 olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max;
520 error = copyout(&olim, uap->rlp, sizeof(olim));
523 #endif /* COMPAT_43 */
525 #ifndef _SYS_SYSPROTO_H_
526 struct __setrlimit_args {
537 register struct __setrlimit_args *uap;
542 if ((error = copyin(uap->rlp, &alim, sizeof(struct rlimit))))
544 error = kern_setrlimit(td, uap->which, &alim);
549 kern_setrlimit(td, which, limp)
554 struct plimit *newlim, *oldlim;
556 register struct rlimit *alimp;
560 if (which >= RLIM_NLIMITS)
564 * Preserve historical bugs by treating negative limits as unsigned.
566 if (limp->rlim_cur < 0)
567 limp->rlim_cur = RLIM_INFINITY;
568 if (limp->rlim_max < 0)
569 limp->rlim_max = RLIM_INFINITY;
573 newlim = lim_alloc();
576 alimp = &oldlim->pl_rlimit[which];
577 if (limp->rlim_cur > alimp->rlim_max ||
578 limp->rlim_max > alimp->rlim_max)
579 if ((error = suser_cred(td->td_ucred, SUSER_ALLOWJAIL))) {
584 if (limp->rlim_cur > limp->rlim_max)
585 limp->rlim_cur = limp->rlim_max;
586 lim_copy(newlim, oldlim);
587 alimp = &newlim->pl_rlimit[which];
592 mtx_lock_spin(&sched_lock);
593 p->p_cpulimit = limp->rlim_cur;
594 mtx_unlock_spin(&sched_lock);
597 if (limp->rlim_cur > maxdsiz)
598 limp->rlim_cur = maxdsiz;
599 if (limp->rlim_max > maxdsiz)
600 limp->rlim_max = maxdsiz;
604 if (limp->rlim_cur > maxssiz)
605 limp->rlim_cur = maxssiz;
606 if (limp->rlim_max > maxssiz)
607 limp->rlim_max = maxssiz;
608 oldssiz = alimp->rlim_cur;
612 if (limp->rlim_cur > maxfilesperproc)
613 limp->rlim_cur = maxfilesperproc;
614 if (limp->rlim_max > maxfilesperproc)
615 limp->rlim_max = maxfilesperproc;
619 if (limp->rlim_cur > maxprocperuid)
620 limp->rlim_cur = maxprocperuid;
621 if (limp->rlim_max > maxprocperuid)
622 limp->rlim_max = maxprocperuid;
623 if (limp->rlim_cur < 1)
625 if (limp->rlim_max < 1)
629 if (td->td_proc->p_sysent->sv_fixlimit != NULL)
630 td->td_proc->p_sysent->sv_fixlimit(limp, which);
636 if (which == RLIMIT_STACK) {
638 * Stack is allocated to the max at exec time with only
639 * "rlim_cur" bytes accessible. If stack limit is going
640 * up make more accessible, if going down make inaccessible.
642 if (limp->rlim_cur != oldssiz) {
647 if (limp->rlim_cur > oldssiz) {
648 prot = p->p_sysent->sv_stackprot;
649 size = limp->rlim_cur - oldssiz;
650 addr = p->p_sysent->sv_usrstack -
654 size = oldssiz - limp->rlim_cur;
655 addr = p->p_sysent->sv_usrstack - oldssiz;
657 addr = trunc_page(addr);
658 size = round_page(size);
659 (void)vm_map_protect(&p->p_vmspace->vm_map,
660 addr, addr + size, prot, FALSE);
667 #ifndef _SYS_SYSPROTO_H_
668 struct __getrlimit_args {
680 register struct __getrlimit_args *uap;
686 if (uap->which >= RLIM_NLIMITS)
690 lim_rlimit(p, uap->which, &rlim);
692 error = copyout(&rlim, uap->rlp, sizeof(struct rlimit));
697 * Transform the running time and tick information in proc p into user,
698 * system, and interrupt time usage.
707 struct rusage_ext rux;
711 PROC_LOCK_ASSERT(p, MA_OWNED);
712 mtx_assert(&sched_lock, MA_NOTOWNED);
714 mtx_lock_spin(&sched_lock);
716 FOREACH_THREAD_IN_PROC(p, td) {
717 if (TD_IS_RUNNING(td)) {
719 * Adjust for the current time slice. This is
720 * actually fairly important since the error here is
721 * on the order of a time quantum which is much
722 * greater than the precision of binuptime().
724 KASSERT(td->td_oncpu != NOCPU,
725 ("%s: running thread has no CPU pid: %d, tid %d",
726 __func__, p->p_pid, td->td_tid));
731 bintime_add(&rux.rux_runtime, &bt);
732 bintime_sub(&rux.rux_runtime,
733 &pcpu_find(td->td_oncpu)->pc_switchtime);
736 mtx_unlock_spin(&sched_lock);
737 calcru1(p, &rux, up, sp);
738 p->p_rux.rux_uu = rux.rux_uu;
739 p->p_rux.rux_su = rux.rux_su;
740 p->p_rux.rux_iu = rux.rux_iu;
750 PROC_LOCK_ASSERT(p, MA_OWNED);
751 calcru1(p, &p->p_crux, up, sp);
755 calcru1(p, ruxp, up, sp)
757 struct rusage_ext *ruxp;
762 /* {user, system, interrupt, total} {ticks, usec}; previous tu: */
763 u_int64_t ut, uu, st, su, it, iu, tt, tu, ptu;
765 ut = ruxp->rux_uticks;
766 st = ruxp->rux_sticks;
767 it = ruxp->rux_iticks;
773 bintime2timeval(&ruxp->rux_runtime, &tv);
774 tu = (u_int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
775 ptu = ruxp->rux_uu + ruxp->rux_su + ruxp->rux_iu;
778 "calcru: runtime went backwards from %ju usec to %ju usec for pid %d (%s)\n",
779 (uintmax_t)ptu, (uintmax_t)tu, p->p_pid, p->p_comm);
782 if ((int64_t)tu < 0) {
783 printf("calcru: negative runtime of %jd usec for pid %d (%s)\n",
784 (intmax_t)tu, p->p_pid, p->p_comm);
793 /* Enforce monotonicity. */
794 if (uu < ruxp->rux_uu || su < ruxp->rux_su || iu < ruxp->rux_iu) {
795 if (uu < ruxp->rux_uu)
797 else if (uu + ruxp->rux_su + ruxp->rux_iu > tu)
798 uu = tu - ruxp->rux_su - ruxp->rux_iu;
802 su = ((tu - uu) * st) / (st + it);
803 if (su < ruxp->rux_su)
805 else if (uu + su + ruxp->rux_iu > tu)
806 su = tu - uu - ruxp->rux_iu;
808 KASSERT(uu + su + ruxp->rux_iu <= tu,
809 ("calcru: monotonisation botch 1"));
811 KASSERT(iu >= ruxp->rux_iu,
812 ("calcru: monotonisation botch 2"));
818 up->tv_sec = uu / 1000000;
819 up->tv_usec = uu % 1000000;
820 sp->tv_sec = su / 1000000;
821 sp->tv_usec = su % 1000000;
824 #ifndef _SYS_SYSPROTO_H_
825 struct getrusage_args {
827 struct rusage *rusage;
835 register struct thread *td;
836 register struct getrusage_args *uap;
841 error = kern_getrusage(td, uap->who, &ru);
843 error = copyout(&ru, uap->rusage, sizeof(struct rusage));
848 kern_getrusage(td, who, rup)
860 *rup = p->p_stats->p_ru;
861 calcru(p, &rup->ru_utime, &rup->ru_stime);
864 case RUSAGE_CHILDREN:
865 *rup = p->p_stats->p_cru;
866 calccru(p, &rup->ru_utime, &rup->ru_stime);
878 ruadd(ru, rux, ru2, rux2)
880 struct rusage_ext *rux;
882 struct rusage_ext *rux2;
884 register long *ip, *ip2;
887 bintime_add(&rux->rux_runtime, &rux2->rux_runtime);
888 rux->rux_uticks += rux2->rux_uticks;
889 rux->rux_sticks += rux2->rux_sticks;
890 rux->rux_iticks += rux2->rux_iticks;
891 rux->rux_uu += rux2->rux_uu;
892 rux->rux_su += rux2->rux_su;
893 rux->rux_iu += rux2->rux_iu;
894 if (ru->ru_maxrss < ru2->ru_maxrss)
895 ru->ru_maxrss = ru2->ru_maxrss;
897 ip2 = &ru2->ru_first;
898 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
903 * Allocate a new resource limits structure and initialize its
904 * reference count and mutex pointer.
911 limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK);
913 limp->pl_mtx = mtx_pool_alloc(mtxpool_sleep);
934 KASSERT(limp->pl_refcnt > 0, ("plimit refcnt underflow"));
935 if (--limp->pl_refcnt == 0) {
937 free((void *)limp, M_PLIMIT);
944 * Make a copy of the plimit structure.
945 * We share these structures copy-on-write after fork.
949 struct plimit *dst, *src;
952 KASSERT(dst->pl_refcnt == 1, ("lim_copy to shared limit"));
953 bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit));
957 * Return the hard limit for a particular system resource. The
958 * which parameter specifies the index into the rlimit array.
961 lim_max(struct proc *p, int which)
965 lim_rlimit(p, which, &rl);
966 return (rl.rlim_max);
970 * Return the current (soft) limit for a particular system resource.
971 * The which parameter which specifies the index into the rlimit array
974 lim_cur(struct proc *p, int which)
978 lim_rlimit(p, which, &rl);
979 return (rl.rlim_cur);
983 * Return a copy of the entire rlimit structure for the system limit
984 * specified by 'which' in the rlimit structure pointed to by 'rlp'.
987 lim_rlimit(struct proc *p, int which, struct rlimit *rlp)
990 PROC_LOCK_ASSERT(p, MA_OWNED);
991 KASSERT(which >= 0 && which < RLIM_NLIMITS,
992 ("request for invalid resource limit"));
993 *rlp = p->p_limit->pl_rlimit[which];
994 if (p->p_sysent->sv_fixlimit != NULL)
995 p->p_sysent->sv_fixlimit(rlp, which);
999 * Find the uidinfo structure for a uid. This structure is used to
1000 * track the total resource consumption (process count, socket buffer
1001 * size, etc.) for the uid and impose limits.
1007 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
1008 mtx_init(&uihashtbl_mtx, "uidinfo hash", NULL, MTX_DEF);
1012 * Look up a uidinfo struct for the parameter uid.
1013 * uihashtbl_mtx must be locked.
1015 static struct uidinfo *
1019 struct uihashhead *uipp;
1020 struct uidinfo *uip;
1022 mtx_assert(&uihashtbl_mtx, MA_OWNED);
1024 LIST_FOREACH(uip, uipp, ui_hash)
1025 if (uip->ui_uid == uid)
1032 * Find or allocate a struct uidinfo for a particular uid.
1033 * Increase refcount on uidinfo struct returned.
1034 * uifree() should be called on a struct uidinfo when released.
1040 struct uidinfo *old_uip, *uip;
1042 mtx_lock(&uihashtbl_mtx);
1043 uip = uilookup(uid);
1045 mtx_unlock(&uihashtbl_mtx);
1046 uip = malloc(sizeof(*uip), M_UIDINFO, M_WAITOK | M_ZERO);
1047 mtx_lock(&uihashtbl_mtx);
1049 * There's a chance someone created our uidinfo while we
1050 * were in malloc and not holding the lock, so we have to
1051 * make sure we don't insert a duplicate uidinfo.
1053 if ((old_uip = uilookup(uid)) != NULL) {
1054 /* Someone else beat us to it. */
1055 free(uip, M_UIDINFO);
1058 uip->ui_mtxp = mtx_pool_alloc(mtxpool_sleep);
1060 LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
1064 mtx_unlock(&uihashtbl_mtx);
1069 * Place another refcount on a uidinfo struct.
1073 struct uidinfo *uip;
1078 UIDINFO_UNLOCK(uip);
1082 * Since uidinfo structs have a long lifetime, we use an
1083 * opportunistic refcounting scheme to avoid locking the lookup hash
1086 * If the refcount hits 0, we need to free the structure,
1087 * which means we need to lock the hash.
1089 * After locking the struct and lowering the refcount, if we find
1090 * that we don't need to free, simply unlock and return.
1092 * If refcount lowering results in need to free, bump the count
1093 * back up, loose the lock and aquire the locks in the proper
1094 * order to try again.
1098 struct uidinfo *uip;
1101 /* Prepare for optimal case. */
1104 if (--uip->ui_ref != 0) {
1105 UIDINFO_UNLOCK(uip);
1109 /* Prepare for suboptimal case. */
1111 UIDINFO_UNLOCK(uip);
1112 mtx_lock(&uihashtbl_mtx);
1116 * We must subtract one from the count again because we backed out
1117 * our initial subtraction before dropping the lock.
1118 * Since another thread may have added a reference after we dropped the
1119 * initial lock we have to test for zero again.
1121 if (--uip->ui_ref == 0) {
1122 LIST_REMOVE(uip, ui_hash);
1123 mtx_unlock(&uihashtbl_mtx);
1124 if (uip->ui_sbsize != 0)
1125 printf("freeing uidinfo: uid = %d, sbsize = %jd\n",
1126 uip->ui_uid, (intmax_t)uip->ui_sbsize);
1127 if (uip->ui_proccnt != 0)
1128 printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
1129 uip->ui_uid, uip->ui_proccnt);
1130 UIDINFO_UNLOCK(uip);
1131 FREE(uip, M_UIDINFO);
1135 mtx_unlock(&uihashtbl_mtx);
1136 UIDINFO_UNLOCK(uip);
1140 * Change the count associated with number of processes
1141 * a given user is using. When 'max' is 0, don't enforce a limit
1144 chgproccnt(uip, diff, max)
1145 struct uidinfo *uip;
1151 /* Don't allow them to exceed max, but allow subtraction. */
1152 if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
1153 UIDINFO_UNLOCK(uip);
1156 uip->ui_proccnt += diff;
1157 if (uip->ui_proccnt < 0)
1158 printf("negative proccnt for uid = %d\n", uip->ui_uid);
1159 UIDINFO_UNLOCK(uip);
1164 * Change the total socket buffer size a user has used.
1167 chgsbsize(uip, hiwat, to, max)
1168 struct uidinfo *uip;
1176 new = uip->ui_sbsize + to - *hiwat;
1177 /* Don't allow them to exceed max, but allow subtraction. */
1178 if (to > *hiwat && new > max) {
1179 UIDINFO_UNLOCK(uip);
1182 uip->ui_sbsize = new;
1183 UIDINFO_UNLOCK(uip);
1186 printf("negative sbsize for uid = %d\n", uip->ui_uid);