2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
40 #include "opt_compat.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/sysproto.h>
46 #include <sys/kernel.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
51 #include <sys/resourcevar.h>
52 #include <sys/sched.h>
54 #include <sys/syscallsubr.h>
55 #include <sys/sysent.h>
59 #include <vm/vm_param.h>
61 #include <vm/vm_map.h>
64 static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures");
65 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
66 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
67 static struct mtx uihashtbl_mtx;
68 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
69 static u_long uihash; /* size of hash table - 1 */
71 static void calcru1(struct proc *p, struct rusage_ext *ruxp,
72 struct timeval *up, struct timeval *sp);
73 static int donice(struct thread *td, struct proc *chgp, int n);
74 static struct uidinfo *uilookup(uid_t uid);
77 * Resource controls and accounting.
80 #ifndef _SYS_SYSPROTO_H_
81 struct getpriority_args {
92 register struct getpriority_args *uap;
100 switch (uap->which) {
104 low = td->td_proc->p_nice;
109 if (p_cansee(td, p) == 0)
116 sx_slock(&proctree_lock);
118 pg = td->td_proc->p_pgrp;
121 pg = pgfind(uap->who);
123 sx_sunlock(&proctree_lock);
127 sx_sunlock(&proctree_lock);
128 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
130 if (!p_cansee(td, p)) {
141 uap->who = td->td_ucred->cr_uid;
142 sx_slock(&allproc_lock);
143 LIST_FOREACH(p, &allproc, p_list) {
145 if (!p_cansee(td, p) &&
146 p->p_ucred->cr_uid == uap->who) {
152 sx_sunlock(&allproc_lock);
159 if (low == PRIO_MAX + 1 && error == 0)
161 td->td_retval[0] = low;
165 #ifndef _SYS_SYSPROTO_H_
166 struct setpriority_args {
178 struct setpriority_args *uap;
180 struct proc *curp, *p;
182 int found = 0, error = 0;
185 switch (uap->which) {
189 error = donice(td, curp, uap->prio);
195 if (p_cansee(td, p) == 0)
196 error = donice(td, p, uap->prio);
203 sx_slock(&proctree_lock);
208 pg = pgfind(uap->who);
210 sx_sunlock(&proctree_lock);
214 sx_sunlock(&proctree_lock);
215 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
217 if (!p_cansee(td, p)) {
218 error = donice(td, p, uap->prio);
228 uap->who = td->td_ucred->cr_uid;
229 sx_slock(&allproc_lock);
230 FOREACH_PROC_IN_SYSTEM(p) {
232 if (p->p_ucred->cr_uid == uap->who &&
234 error = donice(td, p, uap->prio);
239 sx_sunlock(&allproc_lock);
246 if (found == 0 && error == 0)
252 * Set "nice" for a (whole) process.
255 donice(struct thread *td, struct proc *p, int n)
259 PROC_LOCK_ASSERT(p, MA_OWNED);
260 if ((error = p_cansched(td, p)))
266 if (n < p->p_nice && suser(td) != 0)
268 mtx_lock_spin(&sched_lock);
270 mtx_unlock_spin(&sched_lock);
275 * Set realtime priority.
279 #ifndef _SYS_SYSPROTO_H_
289 struct thread *td; /* curthread */
290 register struct rtprio_args *uap;
298 /* Perform copyin before acquiring locks if needed. */
299 if (uap->function == RTP_SET)
300 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
314 switch (uap->function) {
316 if ((error = p_cansee(td, p)))
318 mtx_lock_spin(&sched_lock);
320 * Return OUR priority if no pid specified,
321 * or if one is, report the highest priority
322 * in the process. There isn't much more you can do as
323 * there is only room to return a single priority.
324 * XXXKSE: maybe need a new interface to report
325 * priorities of multiple system scope threads.
326 * Note: specifying our own pid is not the same
327 * as leaving it zero.
330 pri_to_rtp(td->td_ksegrp, &rtp);
334 rtp.type = RTP_PRIO_IDLE;
335 rtp.prio = RTP_PRIO_MAX;
336 FOREACH_KSEGRP_IN_PROC(p, kg) {
337 pri_to_rtp(kg, &rtp2);
338 if (rtp2.type < rtp.type ||
339 (rtp2.type == rtp.type &&
340 rtp2.prio < rtp.prio)) {
341 rtp.type = rtp2.type;
342 rtp.prio = rtp2.prio;
346 mtx_unlock_spin(&sched_lock);
348 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
350 if ((error = p_cansched(td, p)) || (error = cierror))
353 /* Disallow setting rtprio in most cases if not superuser. */
354 if (suser(td) != 0) {
355 /* can't set someone else's */
360 /* can't set realtime priority */
362 * Realtime priority has to be restricted for reasons which should be
363 * obvious. However, for idle priority, there is a potential for
364 * system deadlock if an idleprio process gains a lock on a resource
365 * that other processes need (and the idleprio process can't run
366 * due to a CPU-bound normal process). Fix me! XXX
369 if (RTP_PRIO_IS_REALTIME(rtp.type)) {
371 if (rtp.type != RTP_PRIO_NORMAL) {
379 * If we are setting our own priority, set just our
380 * KSEGRP but if we are doing another process,
381 * do all the groups on that process. If we
382 * specify our own pid we do the latter.
384 mtx_lock_spin(&sched_lock);
386 error = rtp_to_pri(&rtp, td->td_ksegrp);
388 FOREACH_KSEGRP_IN_PROC(p, kg) {
389 if ((error = rtp_to_pri(&rtp, kg)) != 0) {
394 mtx_unlock_spin(&sched_lock);
405 rtp_to_pri(struct rtprio *rtp, struct ksegrp *kg)
408 mtx_assert(&sched_lock, MA_OWNED);
409 if (rtp->prio > RTP_PRIO_MAX)
411 switch (RTP_PRIO_BASE(rtp->type)) {
412 case RTP_PRIO_REALTIME:
413 kg->kg_user_pri = PRI_MIN_REALTIME + rtp->prio;
415 case RTP_PRIO_NORMAL:
416 kg->kg_user_pri = PRI_MIN_TIMESHARE + rtp->prio;
419 kg->kg_user_pri = PRI_MIN_IDLE + rtp->prio;
424 sched_class(kg, rtp->type);
425 if (curthread->td_ksegrp == kg) {
426 sched_prio(curthread, kg->kg_user_pri); /* XXX dubious */
432 pri_to_rtp(struct ksegrp *kg, struct rtprio *rtp)
435 mtx_assert(&sched_lock, MA_OWNED);
436 switch (PRI_BASE(kg->kg_pri_class)) {
438 rtp->prio = kg->kg_user_pri - PRI_MIN_REALTIME;
441 rtp->prio = kg->kg_user_pri - PRI_MIN_TIMESHARE;
444 rtp->prio = kg->kg_user_pri - PRI_MIN_IDLE;
449 rtp->type = kg->kg_pri_class;
452 #if defined(COMPAT_43)
453 #ifndef _SYS_SYSPROTO_H_
454 struct osetrlimit_args {
465 register struct osetrlimit_args *uap;
471 if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit))))
473 lim.rlim_cur = olim.rlim_cur;
474 lim.rlim_max = olim.rlim_max;
475 error = kern_setrlimit(td, uap->which, &lim);
479 #ifndef _SYS_SYSPROTO_H_
480 struct ogetrlimit_args {
491 register struct ogetrlimit_args *uap;
498 if (uap->which >= RLIM_NLIMITS)
502 lim_rlimit(p, uap->which, &rl);
506 * XXX would be more correct to convert only RLIM_INFINITY to the
507 * old RLIM_INFINITY and fail with EOVERFLOW for other larger
508 * values. Most 64->32 and 32->16 conversions, including not
509 * unimportant ones of uids are even more broken than what we
510 * do here (they blindly truncate). We don't do this correctly
511 * here since we have little experience with EOVERFLOW yet.
512 * Elsewhere, getuid() can't fail...
514 olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur;
515 olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max;
516 error = copyout(&olim, uap->rlp, sizeof(olim));
519 #endif /* COMPAT_43 */
521 #ifndef _SYS_SYSPROTO_H_
522 struct __setrlimit_args {
533 register struct __setrlimit_args *uap;
538 if ((error = copyin(uap->rlp, &alim, sizeof(struct rlimit))))
540 error = kern_setrlimit(td, uap->which, &alim);
545 kern_setrlimit(td, which, limp)
550 struct plimit *newlim, *oldlim;
552 register struct rlimit *alimp;
556 if (which >= RLIM_NLIMITS)
560 * Preserve historical bugs by treating negative limits as unsigned.
562 if (limp->rlim_cur < 0)
563 limp->rlim_cur = RLIM_INFINITY;
564 if (limp->rlim_max < 0)
565 limp->rlim_max = RLIM_INFINITY;
569 newlim = lim_alloc();
572 alimp = &oldlim->pl_rlimit[which];
573 if (limp->rlim_cur > alimp->rlim_max ||
574 limp->rlim_max > alimp->rlim_max)
575 if ((error = suser_cred(td->td_ucred, SUSER_ALLOWJAIL))) {
580 if (limp->rlim_cur > limp->rlim_max)
581 limp->rlim_cur = limp->rlim_max;
582 lim_copy(newlim, oldlim);
583 alimp = &newlim->pl_rlimit[which];
588 mtx_lock_spin(&sched_lock);
589 p->p_cpulimit = limp->rlim_cur;
590 mtx_unlock_spin(&sched_lock);
593 if (limp->rlim_cur > maxdsiz)
594 limp->rlim_cur = maxdsiz;
595 if (limp->rlim_max > maxdsiz)
596 limp->rlim_max = maxdsiz;
600 if (limp->rlim_cur > maxssiz)
601 limp->rlim_cur = maxssiz;
602 if (limp->rlim_max > maxssiz)
603 limp->rlim_max = maxssiz;
604 oldssiz = alimp->rlim_cur;
608 if (limp->rlim_cur > maxfilesperproc)
609 limp->rlim_cur = maxfilesperproc;
610 if (limp->rlim_max > maxfilesperproc)
611 limp->rlim_max = maxfilesperproc;
615 if (limp->rlim_cur > maxprocperuid)
616 limp->rlim_cur = maxprocperuid;
617 if (limp->rlim_max > maxprocperuid)
618 limp->rlim_max = maxprocperuid;
619 if (limp->rlim_cur < 1)
621 if (limp->rlim_max < 1)
630 if (which == RLIMIT_STACK) {
632 * Stack is allocated to the max at exec time with only
633 * "rlim_cur" bytes accessible. If stack limit is going
634 * up make more accessible, if going down make inaccessible.
636 if (limp->rlim_cur != oldssiz) {
641 if (limp->rlim_cur > oldssiz) {
642 prot = p->p_sysent->sv_stackprot;
643 size = limp->rlim_cur - oldssiz;
644 addr = p->p_sysent->sv_usrstack -
648 size = oldssiz - limp->rlim_cur;
649 addr = p->p_sysent->sv_usrstack - oldssiz;
651 addr = trunc_page(addr);
652 size = round_page(size);
653 (void)vm_map_protect(&p->p_vmspace->vm_map,
654 addr, addr + size, prot, FALSE);
660 #ifndef _SYS_SYSPROTO_H_
661 struct __getrlimit_args {
673 register struct __getrlimit_args *uap;
679 if (uap->which >= RLIM_NLIMITS)
683 lim_rlimit(p, uap->which, &rlim);
685 error = copyout(&rlim, uap->rlp, sizeof(struct rlimit));
690 * Transform the running time and tick information in proc p into user,
691 * system, and interrupt time usage.
700 struct rusage_ext rux;
704 PROC_LOCK_ASSERT(p, MA_OWNED);
705 mtx_assert(&sched_lock, MA_NOTOWNED);
707 mtx_lock_spin(&sched_lock);
709 FOREACH_THREAD_IN_PROC(p, td) {
710 if (TD_IS_RUNNING(td)) {
712 * Adjust for the current time slice. This is
713 * actually fairly important since the error here is
714 * on the order of a time quantum which is much
715 * greater than the precision of binuptime().
717 KASSERT(td->td_oncpu != NOCPU,
718 ("%s: running thread has no CPU", __func__));
723 bintime_add(&rux.rux_runtime, &bt);
724 bintime_sub(&rux.rux_runtime,
725 &pcpu_find(td->td_oncpu)->pc_switchtime);
728 mtx_unlock_spin(&sched_lock);
729 calcru1(p, &rux, up, sp);
730 p->p_rux.rux_uu = rux.rux_uu;
731 p->p_rux.rux_su = rux.rux_su;
732 p->p_rux.rux_iu = rux.rux_iu;
742 PROC_LOCK_ASSERT(p, MA_OWNED);
743 calcru1(p, &p->p_crux, up, sp);
747 calcru1(p, ruxp, up, sp)
749 struct rusage_ext *ruxp;
754 /* {user, system, interrupt, total} {ticks, usec}; previous tu: */
755 u_int64_t ut, uu, st, su, it, iu, tt, tu, ptu;
757 ut = ruxp->rux_uticks;
758 st = ruxp->rux_sticks;
759 it = ruxp->rux_iticks;
765 bintime2timeval(&ruxp->rux_runtime, &tv);
766 tu = (u_int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
767 ptu = ruxp->rux_uu + ruxp->rux_su + ruxp->rux_iu;
770 "calcru: runtime went backwards from %ju usec to %ju usec for pid %d (%s)\n",
771 (uintmax_t)ptu, (uintmax_t)tu, p->p_pid, p->p_comm);
774 if ((int64_t)tu < 0) {
775 printf("calcru: negative runtime of %jd usec for pid %d (%s)\n",
776 (intmax_t)tu, p->p_pid, p->p_comm);
785 /* Enforce monotonicity. */
786 if (uu < ruxp->rux_uu || su < ruxp->rux_su || iu < ruxp->rux_iu) {
787 if (uu < ruxp->rux_uu)
789 else if (uu + ruxp->rux_su + ruxp->rux_iu > tu)
790 uu = tu - ruxp->rux_su - ruxp->rux_iu;
794 su = ((tu - uu) * st) / (st + it);
795 if (su < ruxp->rux_su)
797 else if (uu + su + ruxp->rux_iu > tu)
798 su = tu - uu - ruxp->rux_iu;
800 KASSERT(uu + su + ruxp->rux_iu <= tu,
801 ("calcru: monotonisation botch 1"));
803 KASSERT(iu >= ruxp->rux_iu,
804 ("calcru: monotonisation botch 2"));
810 up->tv_sec = uu / 1000000;
811 up->tv_usec = uu % 1000000;
812 sp->tv_sec = su / 1000000;
813 sp->tv_usec = su % 1000000;
816 #ifndef _SYS_SYSPROTO_H_
817 struct getrusage_args {
819 struct rusage *rusage;
827 register struct thread *td;
828 register struct getrusage_args *uap;
833 error = kern_getrusage(td, uap->who, &ru);
835 error = copyout(&ru, uap->rusage, sizeof(struct rusage));
840 kern_getrusage(td, who, rup)
852 *rup = p->p_stats->p_ru;
853 calcru(p, &rup->ru_utime, &rup->ru_stime);
856 case RUSAGE_CHILDREN:
857 *rup = p->p_stats->p_cru;
858 calccru(p, &rup->ru_utime, &rup->ru_stime);
870 ruadd(ru, rux, ru2, rux2)
872 struct rusage_ext *rux;
874 struct rusage_ext *rux2;
876 register long *ip, *ip2;
879 bintime_add(&rux->rux_runtime, &rux2->rux_runtime);
880 rux->rux_uticks += rux2->rux_uticks;
881 rux->rux_sticks += rux2->rux_sticks;
882 rux->rux_iticks += rux2->rux_iticks;
883 rux->rux_uu += rux2->rux_uu;
884 rux->rux_su += rux2->rux_su;
885 rux->rux_iu += rux2->rux_iu;
886 if (ru->ru_maxrss < ru2->ru_maxrss)
887 ru->ru_maxrss = ru2->ru_maxrss;
889 ip2 = &ru2->ru_first;
890 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
895 * Allocate a new resource limits structure and initialize its
896 * reference count and mutex pointer.
903 limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK);
905 limp->pl_mtx = mtx_pool_alloc(mtxpool_sleep);
926 KASSERT(limp->pl_refcnt > 0, ("plimit refcnt underflow"));
927 if (--limp->pl_refcnt == 0) {
929 free((void *)limp, M_PLIMIT);
936 * Make a copy of the plimit structure.
937 * We share these structures copy-on-write after fork.
941 struct plimit *dst, *src;
944 KASSERT(dst->pl_refcnt == 1, ("lim_copy to shared limit"));
945 bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit));
949 * Return the hard limit for a particular system resource. The
950 * which parameter specifies the index into the rlimit array.
953 lim_max(struct proc *p, int which)
957 lim_rlimit(p, which, &rl);
958 return (rl.rlim_max);
962 * Return the current (soft) limit for a particular system resource.
963 * The which parameter which specifies the index into the rlimit array
966 lim_cur(struct proc *p, int which)
970 lim_rlimit(p, which, &rl);
971 return (rl.rlim_cur);
975 * Return a copy of the entire rlimit structure for the system limit
976 * specified by 'which' in the rlimit structure pointed to by 'rlp'.
979 lim_rlimit(struct proc *p, int which, struct rlimit *rlp)
982 PROC_LOCK_ASSERT(p, MA_OWNED);
983 KASSERT(which >= 0 && which < RLIM_NLIMITS,
984 ("request for invalid resource limit"));
985 *rlp = p->p_limit->pl_rlimit[which];
989 * Find the uidinfo structure for a uid. This structure is used to
990 * track the total resource consumption (process count, socket buffer
991 * size, etc.) for the uid and impose limits.
997 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
998 mtx_init(&uihashtbl_mtx, "uidinfo hash", NULL, MTX_DEF);
1002 * Look up a uidinfo struct for the parameter uid.
1003 * uihashtbl_mtx must be locked.
1005 static struct uidinfo *
1009 struct uihashhead *uipp;
1010 struct uidinfo *uip;
1012 mtx_assert(&uihashtbl_mtx, MA_OWNED);
1014 LIST_FOREACH(uip, uipp, ui_hash)
1015 if (uip->ui_uid == uid)
1022 * Find or allocate a struct uidinfo for a particular uid.
1023 * Increase refcount on uidinfo struct returned.
1024 * uifree() should be called on a struct uidinfo when released.
1030 struct uidinfo *old_uip, *uip;
1032 mtx_lock(&uihashtbl_mtx);
1033 uip = uilookup(uid);
1035 mtx_unlock(&uihashtbl_mtx);
1036 uip = malloc(sizeof(*uip), M_UIDINFO, M_WAITOK | M_ZERO);
1037 mtx_lock(&uihashtbl_mtx);
1039 * There's a chance someone created our uidinfo while we
1040 * were in malloc and not holding the lock, so we have to
1041 * make sure we don't insert a duplicate uidinfo.
1043 if ((old_uip = uilookup(uid)) != NULL) {
1044 /* Someone else beat us to it. */
1045 free(uip, M_UIDINFO);
1048 uip->ui_mtxp = mtx_pool_alloc(mtxpool_sleep);
1050 LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
1054 mtx_unlock(&uihashtbl_mtx);
1059 * Place another refcount on a uidinfo struct.
1063 struct uidinfo *uip;
1068 UIDINFO_UNLOCK(uip);
1072 * Since uidinfo structs have a long lifetime, we use an
1073 * opportunistic refcounting scheme to avoid locking the lookup hash
1076 * If the refcount hits 0, we need to free the structure,
1077 * which means we need to lock the hash.
1079 * After locking the struct and lowering the refcount, if we find
1080 * that we don't need to free, simply unlock and return.
1082 * If refcount lowering results in need to free, bump the count
1083 * back up, loose the lock and aquire the locks in the proper
1084 * order to try again.
1088 struct uidinfo *uip;
1091 /* Prepare for optimal case. */
1094 if (--uip->ui_ref != 0) {
1095 UIDINFO_UNLOCK(uip);
1099 /* Prepare for suboptimal case. */
1101 UIDINFO_UNLOCK(uip);
1102 mtx_lock(&uihashtbl_mtx);
1106 * We must subtract one from the count again because we backed out
1107 * our initial subtraction before dropping the lock.
1108 * Since another thread may have added a reference after we dropped the
1109 * initial lock we have to test for zero again.
1111 if (--uip->ui_ref == 0) {
1112 LIST_REMOVE(uip, ui_hash);
1113 mtx_unlock(&uihashtbl_mtx);
1114 if (uip->ui_sbsize != 0)
1115 printf("freeing uidinfo: uid = %d, sbsize = %jd\n",
1116 uip->ui_uid, (intmax_t)uip->ui_sbsize);
1117 if (uip->ui_proccnt != 0)
1118 printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
1119 uip->ui_uid, uip->ui_proccnt);
1120 UIDINFO_UNLOCK(uip);
1121 FREE(uip, M_UIDINFO);
1125 mtx_unlock(&uihashtbl_mtx);
1126 UIDINFO_UNLOCK(uip);
1130 * Change the count associated with number of processes
1131 * a given user is using. When 'max' is 0, don't enforce a limit
1134 chgproccnt(uip, diff, max)
1135 struct uidinfo *uip;
1141 /* Don't allow them to exceed max, but allow subtraction. */
1142 if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
1143 UIDINFO_UNLOCK(uip);
1146 uip->ui_proccnt += diff;
1147 if (uip->ui_proccnt < 0)
1148 printf("negative proccnt for uid = %d\n", uip->ui_uid);
1149 UIDINFO_UNLOCK(uip);
1154 * Change the total socket buffer size a user has used.
1157 chgsbsize(uip, hiwat, to, max)
1158 struct uidinfo *uip;
1166 new = uip->ui_sbsize + to - *hiwat;
1167 /* Don't allow them to exceed max, but allow subtraction. */
1168 if (to > *hiwat && new > max) {
1169 UIDINFO_UNLOCK(uip);
1172 uip->ui_sbsize = new;
1173 UIDINFO_UNLOCK(uip);
1176 printf("negative sbsize for uid = %d\n", uip->ui_uid);