2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
39 #include <sys/cdefs.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/sysproto.h>
44 #include <sys/kernel.h>
46 #include <sys/malloc.h>
47 #include <sys/mutex.h>
50 #include <sys/refcount.h>
51 #include <sys/racct.h>
52 #include <sys/resourcevar.h>
53 #include <sys/rwlock.h>
54 #include <sys/sched.h>
56 #include <sys/syscallsubr.h>
57 #include <sys/sysctl.h>
58 #include <sys/sysent.h>
60 #include <sys/umtxvar.h>
63 #include <vm/vm_param.h>
65 #include <vm/vm_map.h>
67 static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures");
68 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
69 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
70 static struct rwlock uihashtbl_lock;
71 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
72 static u_long uihash; /* size of hash table - 1 */
74 static void calcru1(struct proc *p, struct rusage_ext *ruxp,
75 struct timeval *up, struct timeval *sp);
76 static int donice(struct thread *td, struct proc *chgp, int n);
77 static struct uidinfo *uilookup(uid_t uid);
78 static void ruxagg_ext_locked(struct rusage_ext *rux, struct thread *td);
81 * Resource controls and accounting.
83 #ifndef _SYS_SYSPROTO_H_
84 struct getpriority_args {
90 sys_getpriority(struct thread *td, struct getpriority_args *uap)
93 return (kern_getpriority(td, uap->which, uap->who));
97 kern_getpriority(struct thread *td, int which, int who)
108 low = td->td_proc->p_nice;
113 if (p_cansee(td, p) == 0)
120 sx_slock(&proctree_lock);
122 pg = td->td_proc->p_pgrp;
127 sx_sunlock(&proctree_lock);
131 sx_sunlock(&proctree_lock);
132 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
134 if (p->p_state == PRS_NORMAL &&
135 p_cansee(td, p) == 0) {
146 who = td->td_ucred->cr_uid;
147 sx_slock(&allproc_lock);
148 FOREACH_PROC_IN_SYSTEM(p) {
150 if (p->p_state == PRS_NORMAL &&
151 p_cansee(td, p) == 0 &&
152 p->p_ucred->cr_uid == who) {
158 sx_sunlock(&allproc_lock);
165 if (low == PRIO_MAX + 1 && error == 0)
167 td->td_retval[0] = low;
171 #ifndef _SYS_SYSPROTO_H_
172 struct setpriority_args {
179 sys_setpriority(struct thread *td, struct setpriority_args *uap)
182 return (kern_setpriority(td, uap->which, uap->who, uap->prio));
186 kern_setpriority(struct thread *td, int which, int who, int prio)
188 struct proc *curp, *p;
190 int found = 0, error = 0;
197 error = donice(td, curp, prio);
203 error = p_cansee(td, p);
205 error = donice(td, p, prio);
212 sx_slock(&proctree_lock);
219 sx_sunlock(&proctree_lock);
223 sx_sunlock(&proctree_lock);
224 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
226 if (p->p_state == PRS_NORMAL &&
227 p_cansee(td, p) == 0) {
228 error = donice(td, p, prio);
238 who = td->td_ucred->cr_uid;
239 sx_slock(&allproc_lock);
240 FOREACH_PROC_IN_SYSTEM(p) {
242 if (p->p_state == PRS_NORMAL &&
243 p->p_ucred->cr_uid == who &&
244 p_cansee(td, p) == 0) {
245 error = donice(td, p, prio);
250 sx_sunlock(&allproc_lock);
257 if (found == 0 && error == 0)
263 * Set "nice" for a (whole) process.
266 donice(struct thread *td, struct proc *p, int n)
270 PROC_LOCK_ASSERT(p, MA_OWNED);
271 if ((error = p_cansched(td, p)))
277 if (n < p->p_nice && priv_check(td, PRIV_SCHED_SETPRIORITY) != 0)
283 static int unprivileged_idprio;
284 SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_idprio, CTLFLAG_RW,
285 &unprivileged_idprio, 0,
286 "Allow non-root users to set an idle priority (deprecated)");
289 * Set realtime priority for LWP.
291 #ifndef _SYS_SYSPROTO_H_
292 struct rtprio_thread_args {
299 sys_rtprio_thread(struct thread *td, struct rtprio_thread_args *uap)
306 /* Perform copyin before acquiring locks if needed. */
307 if (uap->function == RTP_SET)
308 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
312 if (uap->lwpid == 0 || uap->lwpid == td->td_tid) {
317 td1 = tdfind(uap->lwpid, -1);
323 switch (uap->function) {
325 if ((error = p_cansee(td, p)))
327 pri_to_rtp(td1, &rtp);
329 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
331 if ((error = p_cansched(td, p)) || (error = cierror))
334 /* Disallow setting rtprio in most cases if not superuser. */
337 * Realtime priority has to be restricted for reasons which
338 * should be obvious. However, for idleprio processes, there is
339 * a potential for system deadlock if an idleprio process gains
340 * a lock on a resource that other processes need (and the
341 * idleprio process can't run due to a CPU-bound normal
342 * process). Fix me! XXX
344 * This problem is not only related to idleprio process.
345 * A user level program can obtain a file lock and hold it
346 * indefinitely. Additionally, without idleprio processes it is
347 * still conceivable that a program with low priority will never
348 * get to run. In short, allowing this feature might make it
349 * easier to lock a resource indefinitely, but it is not the
350 * only thing that makes it possible.
352 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME &&
353 (error = priv_check(td, PRIV_SCHED_RTPRIO)) != 0)
355 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE &&
356 unprivileged_idprio == 0 &&
357 (error = priv_check(td, PRIV_SCHED_IDPRIO)) != 0)
359 error = rtp_to_pri(&rtp, td1);
370 * Set realtime priority.
372 #ifndef _SYS_SYSPROTO_H_
380 sys_rtprio(struct thread *td, struct rtprio_args *uap)
387 /* Perform copyin before acquiring locks if needed. */
388 if (uap->function == RTP_SET)
389 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
402 switch (uap->function) {
404 if ((error = p_cansee(td, p)))
407 * Return OUR priority if no pid specified,
408 * or if one is, report the highest priority
409 * in the process. There isn't much more you can do as
410 * there is only room to return a single priority.
411 * Note: specifying our own pid is not the same
412 * as leaving it zero.
415 pri_to_rtp(td, &rtp);
419 rtp.type = RTP_PRIO_IDLE;
420 rtp.prio = RTP_PRIO_MAX;
421 FOREACH_THREAD_IN_PROC(p, tdp) {
422 pri_to_rtp(tdp, &rtp2);
423 if (rtp2.type < rtp.type ||
424 (rtp2.type == rtp.type &&
425 rtp2.prio < rtp.prio)) {
426 rtp.type = rtp2.type;
427 rtp.prio = rtp2.prio;
432 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
434 if ((error = p_cansched(td, p)) || (error = cierror))
438 * Disallow setting rtprio in most cases if not superuser.
439 * See the comment in sys_rtprio_thread about idprio
440 * threads holding a lock.
442 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME &&
443 (error = priv_check(td, PRIV_SCHED_RTPRIO)) != 0)
445 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE &&
446 unprivileged_idprio == 0 &&
447 (error = priv_check(td, PRIV_SCHED_IDPRIO)) != 0)
451 * If we are setting our own priority, set just our
452 * thread but if we are doing another process,
453 * do all the threads on that process. If we
454 * specify our own pid we do the latter.
457 error = rtp_to_pri(&rtp, td);
459 FOREACH_THREAD_IN_PROC(p, td) {
460 if ((error = rtp_to_pri(&rtp, td)) != 0)
474 rtp_to_pri(struct rtprio *rtp, struct thread *td)
476 u_char newpri, oldclass, oldpri;
478 switch (RTP_PRIO_BASE(rtp->type)) {
479 case RTP_PRIO_REALTIME:
480 if (rtp->prio > RTP_PRIO_MAX)
482 newpri = PRI_MIN_REALTIME + rtp->prio;
484 case RTP_PRIO_NORMAL:
485 if (rtp->prio > (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE))
487 newpri = PRI_MIN_TIMESHARE + rtp->prio;
490 if (rtp->prio > RTP_PRIO_MAX)
492 newpri = PRI_MIN_IDLE + rtp->prio;
499 oldclass = td->td_pri_class;
500 sched_class(td, rtp->type); /* XXX fix */
501 oldpri = td->td_user_pri;
502 sched_user_prio(td, newpri);
503 if (td->td_user_pri != oldpri && (oldclass != RTP_PRIO_NORMAL ||
504 td->td_pri_class != RTP_PRIO_NORMAL))
505 sched_prio(td, td->td_user_pri);
506 if (TD_ON_UPILOCK(td) && oldpri != newpri) {
509 umtx_pi_adjust(td, oldpri);
517 pri_to_rtp(struct thread *td, struct rtprio *rtp)
521 switch (PRI_BASE(td->td_pri_class)) {
523 rtp->prio = td->td_base_user_pri - PRI_MIN_REALTIME;
526 rtp->prio = td->td_base_user_pri - PRI_MIN_TIMESHARE;
529 rtp->prio = td->td_base_user_pri - PRI_MIN_IDLE;
534 rtp->type = td->td_pri_class;
538 #if defined(COMPAT_43)
539 #ifndef _SYS_SYSPROTO_H_
540 struct osetrlimit_args {
546 osetrlimit(struct thread *td, struct osetrlimit_args *uap)
552 if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit))))
554 lim.rlim_cur = olim.rlim_cur;
555 lim.rlim_max = olim.rlim_max;
556 error = kern_setrlimit(td, uap->which, &lim);
560 #ifndef _SYS_SYSPROTO_H_
561 struct ogetrlimit_args {
567 ogetrlimit(struct thread *td, struct ogetrlimit_args *uap)
573 if (uap->which >= RLIM_NLIMITS)
575 lim_rlimit(td, uap->which, &rl);
578 * XXX would be more correct to convert only RLIM_INFINITY to the
579 * old RLIM_INFINITY and fail with EOVERFLOW for other larger
580 * values. Most 64->32 and 32->16 conversions, including not
581 * unimportant ones of uids are even more broken than what we
582 * do here (they blindly truncate). We don't do this correctly
583 * here since we have little experience with EOVERFLOW yet.
584 * Elsewhere, getuid() can't fail...
586 olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur;
587 olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max;
588 error = copyout(&olim, uap->rlp, sizeof(olim));
591 #endif /* COMPAT_43 */
593 #ifndef _SYS_SYSPROTO_H_
594 struct setrlimit_args {
600 sys_setrlimit(struct thread *td, struct setrlimit_args *uap)
605 if ((error = copyin(uap->rlp, &alim, sizeof(struct rlimit))))
607 error = kern_setrlimit(td, uap->which, &alim);
619 PROC_LOCK_ASSERT(p, MA_OWNED);
621 * Check if the process exceeds its cpu resource allocation. If
622 * it reaches the max, arrange to kill the process in ast().
624 if (p->p_cpulimit == RLIM_INFINITY)
627 FOREACH_THREAD_IN_PROC(p, td) {
631 if (p->p_rux.rux_runtime > p->p_cpulimit * cpu_tickrate()) {
632 lim_rlimit_proc(p, RLIMIT_CPU, &rlim);
633 if (p->p_rux.rux_runtime >= rlim.rlim_max * cpu_tickrate()) {
634 killproc(p, "exceeded maximum CPU limit");
636 if (p->p_cpulimit < rlim.rlim_max)
638 kern_psignal(p, SIGXCPU);
641 if ((p->p_flag & P_WEXIT) == 0)
642 callout_reset_sbt(&p->p_limco, SBT_1S, 0,
643 lim_cb, p, C_PREL(1));
647 kern_setrlimit(struct thread *td, u_int which, struct rlimit *limp)
650 return (kern_proc_setrlimit(td, td->td_proc, which, limp));
654 kern_proc_setrlimit(struct thread *td, struct proc *p, u_int which,
657 struct plimit *newlim, *oldlim, *oldlim_td;
658 struct rlimit *alimp;
659 struct rlimit oldssiz;
662 if (which >= RLIM_NLIMITS)
666 * Preserve historical bugs by treating negative limits as unsigned.
668 if (limp->rlim_cur < 0)
669 limp->rlim_cur = RLIM_INFINITY;
670 if (limp->rlim_max < 0)
671 limp->rlim_max = RLIM_INFINITY;
673 oldssiz.rlim_cur = 0;
674 newlim = lim_alloc();
677 alimp = &oldlim->pl_rlimit[which];
678 if (limp->rlim_cur > alimp->rlim_max ||
679 limp->rlim_max > alimp->rlim_max)
680 if ((error = priv_check(td, PRIV_PROC_SETRLIMIT))) {
685 if (limp->rlim_cur > limp->rlim_max)
686 limp->rlim_cur = limp->rlim_max;
687 lim_copy(newlim, oldlim);
688 alimp = &newlim->pl_rlimit[which];
692 if (limp->rlim_cur != RLIM_INFINITY &&
693 p->p_cpulimit == RLIM_INFINITY)
694 callout_reset_sbt(&p->p_limco, SBT_1S, 0,
695 lim_cb, p, C_PREL(1));
696 p->p_cpulimit = limp->rlim_cur;
699 if (limp->rlim_cur > maxdsiz)
700 limp->rlim_cur = maxdsiz;
701 if (limp->rlim_max > maxdsiz)
702 limp->rlim_max = maxdsiz;
706 if (limp->rlim_cur > maxssiz)
707 limp->rlim_cur = maxssiz;
708 if (limp->rlim_max > maxssiz)
709 limp->rlim_max = maxssiz;
711 if (p->p_sysent->sv_fixlimit != NULL)
712 p->p_sysent->sv_fixlimit(&oldssiz,
717 if (limp->rlim_cur > maxfilesperproc)
718 limp->rlim_cur = maxfilesperproc;
719 if (limp->rlim_max > maxfilesperproc)
720 limp->rlim_max = maxfilesperproc;
724 if (limp->rlim_cur > maxprocperuid)
725 limp->rlim_cur = maxprocperuid;
726 if (limp->rlim_max > maxprocperuid)
727 limp->rlim_max = maxprocperuid;
728 if (limp->rlim_cur < 1)
730 if (limp->rlim_max < 1)
734 if (p->p_sysent->sv_fixlimit != NULL)
735 p->p_sysent->sv_fixlimit(limp, which);
740 if (td == curthread && PROC_COW_CHANGECOUNT(td, p) == 1) {
741 oldlim_td = lim_cowsync();
742 thread_cow_synced(td);
745 if (oldlim_td != NULL) {
746 MPASS(oldlim_td == oldlim);
747 lim_freen(oldlim, 2);
752 if (which == RLIMIT_STACK &&
754 * Skip calls from exec_new_vmspace(), done when stack is
757 (td != curthread || (p->p_flag & P_INEXEC) == 0)) {
759 * Stack is allocated to the max at exec time with only
760 * "rlim_cur" bytes accessible. If stack limit is going
761 * up make more accessible, if going down make inaccessible.
763 if (limp->rlim_cur != oldssiz.rlim_cur) {
768 if (limp->rlim_cur > oldssiz.rlim_cur) {
769 prot = p->p_sysent->sv_stackprot;
770 size = limp->rlim_cur - oldssiz.rlim_cur;
771 addr = round_page(p->p_vmspace->vm_stacktop) -
775 size = oldssiz.rlim_cur - limp->rlim_cur;
776 addr = round_page(p->p_vmspace->vm_stacktop) -
779 addr = trunc_page(addr);
780 size = round_page(size);
781 (void)vm_map_protect(&p->p_vmspace->vm_map,
782 addr, addr + size, prot, 0,
783 VM_MAP_PROTECT_SET_PROT);
790 #ifndef _SYS_SYSPROTO_H_
791 struct getrlimit_args {
798 sys_getrlimit(struct thread *td, struct getrlimit_args *uap)
803 if (uap->which >= RLIM_NLIMITS)
805 lim_rlimit(td, uap->which, &rlim);
806 error = copyout(&rlim, uap->rlp, sizeof(struct rlimit));
811 * Transform the running time and tick information for children of proc p
812 * into user and system time usage.
815 calccru(struct proc *p, struct timeval *up, struct timeval *sp)
818 PROC_LOCK_ASSERT(p, MA_OWNED);
819 calcru1(p, &p->p_crux, up, sp);
823 * Transform the running time and tick information in proc p into user
824 * and system time usage. If appropriate, include the current time slice
828 calcru(struct proc *p, struct timeval *up, struct timeval *sp)
833 PROC_LOCK_ASSERT(p, MA_OWNED);
834 PROC_STATLOCK_ASSERT(p, MA_OWNED);
836 * If we are getting stats for the current process, then add in the
837 * stats that this thread has accumulated in its current time slice.
838 * We reset the thread and CPU state as if we had performed a context
842 if (td->td_proc == p) {
844 runtime = u - PCPU_GET(switchtime);
845 td->td_runtime += runtime;
846 td->td_incruntime += runtime;
847 PCPU_SET(switchtime, u);
849 /* Make sure the per-thread stats are current. */
850 FOREACH_THREAD_IN_PROC(p, td) {
851 if (td->td_incruntime == 0)
855 calcru1(p, &p->p_rux, up, sp);
858 /* Collect resource usage for a single thread. */
860 rufetchtd(struct thread *td, struct rusage *ru)
866 PROC_STATLOCK_ASSERT(p, MA_OWNED);
867 THREAD_LOCK_ASSERT(td, MA_OWNED);
869 * If we are getting stats for the current thread, then add in the
870 * stats that this thread has accumulated in its current time slice.
871 * We reset the thread and CPU state as if we had performed a context
874 if (td == curthread) {
876 runtime = u - PCPU_GET(switchtime);
877 td->td_runtime += runtime;
878 td->td_incruntime += runtime;
879 PCPU_SET(switchtime, u);
881 ruxagg_locked(p, td);
883 calcru1(p, &td->td_rux, &ru->ru_utime, &ru->ru_stime);
887 mul64_by_fraction(uint64_t a, uint64_t b, uint64_t c)
889 uint64_t acc, bh, bl;
893 * Calculate (a * b) / c accurately enough without overflowing. c
894 * must be nonzero, and its top bit must be 0. a or b must be
895 * <= c, and the implementation is tuned for b <= c.
897 * The comments about times are for use in calcru1() with units of
898 * microseconds for 'a' and stathz ticks at 128 Hz for b and c.
900 * Let n be the number of top zero bits in c. Each iteration
901 * either returns, or reduces b by right shifting it by at least n.
902 * The number of iterations is at most 1 + 64 / n, and the error is
903 * at most the number of iterations.
905 * It is very unusual to need even 2 iterations. Previous
906 * implementations overflowed essentially by returning early in the
907 * first iteration, with n = 38 giving overflow at 105+ hours and
908 * n = 32 giving overlow at at 388+ days despite a more careful
909 * calculation. 388 days is a reasonable uptime, and the calculation
910 * needs to work for the uptime times the number of CPUs since 'a'
913 if (a >= (uint64_t)1 << 63)
914 return (0); /* Unsupported arg -- can't happen. */
916 for (i = 0; i < 128; i++) {
920 /* Up to 105 hours on first iteration. */
921 return (acc + (a * b) / c);
924 * This reduction is based on a = q * c + r, with the
925 * remainder r < c. 'a' may be large to start, and
926 * moving bits from b into 'a' at the end of the loop
927 * sets the top bit of 'a', so the reduction makes
928 * significant progress.
934 /* Up to 388 days on first iteration. */
935 return (acc + (a * b) / c);
939 * This step writes a * b as a * ((bh << s) + bl) =
940 * a * (bh << s) + a * bl = (a << s) * bh + a * bl. The 2
941 * additive terms are handled separately. Splitting in
942 * this way is linear except for rounding errors.
944 * s = 64 - sa is the maximum such that a << s fits in 64
945 * bits. Since a < c and c has at least 1 zero top bit,
946 * sa < 64 and s > 0. Thus this step makes progress by
947 * reducing b (it increases 'a', but taking remainders on
948 * the next iteration completes the reduction).
950 * Finally, the choice for s is just what is needed to keep
951 * a * bl from overflowing, so we don't need complications
952 * like a recursive call mul64_by_fraction(a, bl, c) to
953 * handle the second additive term.
962 return (0); /* Algorithm failure -- can't happen. */
966 calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up,
969 /* {user, system, interrupt, total} {ticks, usec}: */
970 uint64_t ut, uu, st, su, it, tt, tu;
972 ut = ruxp->rux_uticks;
973 st = ruxp->rux_sticks;
974 it = ruxp->rux_iticks;
977 /* Avoid divide by zero */
981 tu = cputick2usec(ruxp->rux_runtime);
982 if ((int64_t)tu < 0) {
983 /* XXX: this should be an assert /phk */
984 printf("calcru: negative runtime of %jd usec for pid %d (%s)\n",
985 (intmax_t)tu, p->p_pid, p->p_comm);
989 /* Subdivide tu. Avoid overflow in the multiplications. */
990 if (__predict_true(tu <= ((uint64_t)1 << 38) && tt <= (1 << 26))) {
991 /* Up to 76 hours when stathz is 128. */
995 uu = mul64_by_fraction(tu, ut, tt);
996 su = mul64_by_fraction(tu, st, tt);
999 if (tu >= ruxp->rux_tu) {
1001 * The normal case, time increased.
1002 * Enforce monotonicity of bucketed numbers.
1004 if (uu < ruxp->rux_uu)
1006 if (su < ruxp->rux_su)
1008 } else if (tu + 3 > ruxp->rux_tu || 101 * tu > 100 * ruxp->rux_tu) {
1010 * When we calibrate the cputicker, it is not uncommon to
1011 * see the presumably fixed frequency increase slightly over
1012 * time as a result of thermal stabilization and NTP
1013 * discipline (of the reference clock). We therefore ignore
1014 * a bit of backwards slop because we expect to catch up
1015 * shortly. We use a 3 microsecond limit to catch low
1016 * counts and a 1% limit for high counts.
1021 } else if (vm_guest == VM_GUEST_NO) { /* tu < ruxp->rux_tu */
1023 * What happened here was likely that a laptop, which ran at
1024 * a reduced clock frequency at boot, kicked into high gear.
1025 * The wisdom of spamming this message in that case is
1026 * dubious, but it might also be indicative of something
1027 * serious, so lets keep it and hope laptops can be made
1028 * more truthful about their CPU speed via ACPI.
1030 printf("calcru: runtime went backwards from %ju usec "
1031 "to %ju usec for pid %d (%s)\n",
1032 (uintmax_t)ruxp->rux_tu, (uintmax_t)tu,
1033 p->p_pid, p->p_comm);
1040 up->tv_sec = uu / 1000000;
1041 up->tv_usec = uu % 1000000;
1042 sp->tv_sec = su / 1000000;
1043 sp->tv_usec = su % 1000000;
1046 #ifndef _SYS_SYSPROTO_H_
1047 struct getrusage_args {
1049 struct rusage *rusage;
1053 sys_getrusage(struct thread *td, struct getrusage_args *uap)
1058 error = kern_getrusage(td, uap->who, &ru);
1060 error = copyout(&ru, uap->rusage, sizeof(struct rusage));
1065 kern_getrusage(struct thread *td, int who, struct rusage *rup)
1075 rufetchcalc(p, rup, &rup->ru_utime,
1079 case RUSAGE_CHILDREN:
1080 *rup = p->p_stats->p_cru;
1081 calccru(p, &rup->ru_utime, &rup->ru_stime);
1100 rucollect(struct rusage *ru, struct rusage *ru2)
1105 if (ru->ru_maxrss < ru2->ru_maxrss)
1106 ru->ru_maxrss = ru2->ru_maxrss;
1108 ip2 = &ru2->ru_first;
1109 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
1114 ruadd(struct rusage *ru, struct rusage_ext *rux, struct rusage *ru2,
1115 struct rusage_ext *rux2)
1118 rux->rux_runtime += rux2->rux_runtime;
1119 rux->rux_uticks += rux2->rux_uticks;
1120 rux->rux_sticks += rux2->rux_sticks;
1121 rux->rux_iticks += rux2->rux_iticks;
1122 rux->rux_uu += rux2->rux_uu;
1123 rux->rux_su += rux2->rux_su;
1124 rux->rux_tu += rux2->rux_tu;
1129 * Aggregate tick counts into the proc's rusage_ext.
1132 ruxagg_ext_locked(struct rusage_ext *rux, struct thread *td)
1135 rux->rux_runtime += td->td_incruntime;
1136 rux->rux_uticks += td->td_uticks;
1137 rux->rux_sticks += td->td_sticks;
1138 rux->rux_iticks += td->td_iticks;
1142 ruxagg_locked(struct proc *p, struct thread *td)
1144 THREAD_LOCK_ASSERT(td, MA_OWNED);
1145 PROC_STATLOCK_ASSERT(td->td_proc, MA_OWNED);
1147 ruxagg_ext_locked(&p->p_rux, td);
1148 ruxagg_ext_locked(&td->td_rux, td);
1149 td->td_incruntime = 0;
1156 ruxagg(struct proc *p, struct thread *td)
1160 ruxagg_locked(p, td);
1165 * Update the rusage_ext structure and fetch a valid aggregate rusage
1166 * for proc p if storage for one is supplied.
1169 rufetch(struct proc *p, struct rusage *ru)
1173 PROC_STATLOCK_ASSERT(p, MA_OWNED);
1176 if (p->p_numthreads > 0) {
1177 FOREACH_THREAD_IN_PROC(p, td) {
1179 rucollect(ru, &td->td_ru);
1185 * Atomically perform a rufetch and a calcru together.
1186 * Consumers, can safely assume the calcru is executed only once
1187 * rufetch is completed.
1190 rufetchcalc(struct proc *p, struct rusage *ru, struct timeval *up,
1201 * Allocate a new resource limits structure and initialize its
1202 * reference count and mutex pointer.
1207 struct plimit *limp;
1209 limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK);
1210 refcount_init(&limp->pl_refcnt, 1);
1215 lim_hold(struct plimit *limp)
1218 refcount_acquire(&limp->pl_refcnt);
1227 struct plimit *oldlimit;
1231 PROC_LOCK_ASSERT(p, MA_OWNED);
1233 if (td->td_limit == p->p_limit)
1236 oldlimit = td->td_limit;
1237 td->td_limit = lim_hold(p->p_limit);
1243 lim_fork(struct proc *p1, struct proc *p2)
1246 PROC_LOCK_ASSERT(p1, MA_OWNED);
1247 PROC_LOCK_ASSERT(p2, MA_OWNED);
1249 p2->p_limit = lim_hold(p1->p_limit);
1250 callout_init_mtx(&p2->p_limco, &p2->p_mtx, 0);
1251 if (p1->p_cpulimit != RLIM_INFINITY)
1252 callout_reset_sbt(&p2->p_limco, SBT_1S, 0,
1253 lim_cb, p2, C_PREL(1));
1257 lim_free(struct plimit *limp)
1260 if (refcount_release(&limp->pl_refcnt))
1261 free((void *)limp, M_PLIMIT);
1265 lim_freen(struct plimit *limp, int n)
1268 if (refcount_releasen(&limp->pl_refcnt, n))
1269 free((void *)limp, M_PLIMIT);
1273 limbatch_add(struct limbatch *lb, struct thread *td)
1275 struct plimit *limp;
1277 MPASS(td->td_limit != NULL);
1278 limp = td->td_limit;
1280 if (lb->limp != limp) {
1281 if (lb->count != 0) {
1282 lim_freen(lb->limp, lb->count);
1292 limbatch_final(struct limbatch *lb)
1295 MPASS(lb->count != 0);
1296 lim_freen(lb->limp, lb->count);
1300 * Make a copy of the plimit structure.
1301 * We share these structures copy-on-write after fork.
1304 lim_copy(struct plimit *dst, struct plimit *src)
1307 KASSERT(dst->pl_refcnt <= 1, ("lim_copy to shared limit"));
1308 bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit));
1312 * Return the hard limit for a particular system resource. The
1313 * which parameter specifies the index into the rlimit array.
1316 lim_max(struct thread *td, int which)
1320 lim_rlimit(td, which, &rl);
1321 return (rl.rlim_max);
1325 lim_max_proc(struct proc *p, int which)
1329 lim_rlimit_proc(p, which, &rl);
1330 return (rl.rlim_max);
1334 * Return the current (soft) limit for a particular system resource.
1335 * The which parameter which specifies the index into the rlimit array
1338 (lim_cur)(struct thread *td, int which)
1342 lim_rlimit(td, which, &rl);
1343 return (rl.rlim_cur);
1347 lim_cur_proc(struct proc *p, int which)
1351 lim_rlimit_proc(p, which, &rl);
1352 return (rl.rlim_cur);
1356 * Return a copy of the entire rlimit structure for the system limit
1357 * specified by 'which' in the rlimit structure pointed to by 'rlp'.
1360 lim_rlimit(struct thread *td, int which, struct rlimit *rlp)
1362 struct proc *p = td->td_proc;
1364 MPASS(td == curthread);
1365 KASSERT(which >= 0 && which < RLIM_NLIMITS,
1366 ("request for invalid resource limit"));
1367 *rlp = td->td_limit->pl_rlimit[which];
1368 if (p->p_sysent->sv_fixlimit != NULL)
1369 p->p_sysent->sv_fixlimit(rlp, which);
1373 lim_rlimit_proc(struct proc *p, int which, struct rlimit *rlp)
1376 PROC_LOCK_ASSERT(p, MA_OWNED);
1377 KASSERT(which >= 0 && which < RLIM_NLIMITS,
1378 ("request for invalid resource limit"));
1379 *rlp = p->p_limit->pl_rlimit[which];
1380 if (p->p_sysent->sv_fixlimit != NULL)
1381 p->p_sysent->sv_fixlimit(rlp, which);
1388 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
1389 rw_init(&uihashtbl_lock, "uidinfo hash");
1393 * Look up a uidinfo struct for the parameter uid.
1394 * uihashtbl_lock must be locked.
1395 * Increase refcount on uidinfo struct returned.
1397 static struct uidinfo *
1400 struct uihashhead *uipp;
1401 struct uidinfo *uip;
1403 rw_assert(&uihashtbl_lock, RA_LOCKED);
1405 LIST_FOREACH(uip, uipp, ui_hash)
1406 if (uip->ui_uid == uid) {
1415 * Find or allocate a struct uidinfo for a particular uid.
1416 * Returns with uidinfo struct referenced.
1417 * uifree() should be called on a struct uidinfo when released.
1422 struct uidinfo *new_uip, *uip;
1425 cred = curthread->td_ucred;
1426 if (cred->cr_uidinfo->ui_uid == uid) {
1427 uip = cred->cr_uidinfo;
1430 } else if (cred->cr_ruidinfo->ui_uid == uid) {
1431 uip = cred->cr_ruidinfo;
1436 rw_rlock(&uihashtbl_lock);
1437 uip = uilookup(uid);
1438 rw_runlock(&uihashtbl_lock);
1442 new_uip = malloc(sizeof(*new_uip), M_UIDINFO, M_WAITOK | M_ZERO);
1443 racct_create(&new_uip->ui_racct);
1444 refcount_init(&new_uip->ui_ref, 1);
1445 new_uip->ui_uid = uid;
1447 rw_wlock(&uihashtbl_lock);
1449 * There's a chance someone created our uidinfo while we
1450 * were in malloc and not holding the lock, so we have to
1451 * make sure we don't insert a duplicate uidinfo.
1453 if ((uip = uilookup(uid)) == NULL) {
1454 LIST_INSERT_HEAD(UIHASH(uid), new_uip, ui_hash);
1455 rw_wunlock(&uihashtbl_lock);
1458 rw_wunlock(&uihashtbl_lock);
1459 racct_destroy(&new_uip->ui_racct);
1460 free(new_uip, M_UIDINFO);
1466 * Place another refcount on a uidinfo struct.
1469 uihold(struct uidinfo *uip)
1472 refcount_acquire(&uip->ui_ref);
1476 * Since uidinfo structs have a long lifetime, we use an
1477 * opportunistic refcounting scheme to avoid locking the lookup hash
1480 * If the refcount hits 0, we need to free the structure,
1481 * which means we need to lock the hash.
1483 * After locking the struct and lowering the refcount, if we find
1484 * that we don't need to free, simply unlock and return.
1486 * If refcount lowering results in need to free, bump the count
1487 * back up, lose the lock and acquire the locks in the proper
1488 * order to try again.
1491 uifree(struct uidinfo *uip)
1494 if (refcount_release_if_not_last(&uip->ui_ref))
1497 rw_wlock(&uihashtbl_lock);
1498 if (refcount_release(&uip->ui_ref) == 0) {
1499 rw_wunlock(&uihashtbl_lock);
1503 racct_destroy(&uip->ui_racct);
1504 LIST_REMOVE(uip, ui_hash);
1505 rw_wunlock(&uihashtbl_lock);
1507 if (uip->ui_sbsize != 0)
1508 printf("freeing uidinfo: uid = %d, sbsize = %ld\n",
1509 uip->ui_uid, uip->ui_sbsize);
1510 if (uip->ui_proccnt != 0)
1511 printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
1512 uip->ui_uid, uip->ui_proccnt);
1513 if (uip->ui_vmsize != 0)
1514 printf("freeing uidinfo: uid = %d, swapuse = %lld\n",
1515 uip->ui_uid, (unsigned long long)uip->ui_vmsize);
1516 free(uip, M_UIDINFO);
1521 ui_racct_foreach(void (*callback)(struct racct *racct,
1522 void *arg2, void *arg3), void (*pre)(void), void (*post)(void),
1523 void *arg2, void *arg3)
1525 struct uidinfo *uip;
1526 struct uihashhead *uih;
1528 rw_rlock(&uihashtbl_lock);
1531 for (uih = &uihashtbl[uihash]; uih >= uihashtbl; uih--) {
1532 LIST_FOREACH(uip, uih, ui_hash) {
1533 (callback)(uip->ui_racct, arg2, arg3);
1538 rw_runlock(&uihashtbl_lock);
1543 chglimit(struct uidinfo *uip, long *limit, int diff, rlim_t max, const char *name)
1547 /* Don't allow them to exceed max, but allow subtraction. */
1548 new = atomic_fetchadd_long(limit, (long)diff) + diff;
1549 if (diff > 0 && max != 0) {
1550 if (new < 0 || new > max) {
1551 atomic_subtract_long(limit, (long)diff);
1555 printf("negative %s for uid = %d\n", name, uip->ui_uid);
1560 * Change the count associated with number of processes
1561 * a given user is using. When 'max' is 0, don't enforce a limit
1564 chgproccnt(struct uidinfo *uip, int diff, rlim_t max)
1567 return (chglimit(uip, &uip->ui_proccnt, diff, max, "proccnt"));
1571 * Change the total socket buffer size a user has used.
1574 chgsbsize(struct uidinfo *uip, u_int *hiwat, u_int to, rlim_t max)
1579 if (diff > 0 && max == 0) {
1582 rv = chglimit(uip, &uip->ui_sbsize, diff, max, "sbsize");
1590 * Change the count associated with number of pseudo-terminals
1591 * a given user is using. When 'max' is 0, don't enforce a limit
1594 chgptscnt(struct uidinfo *uip, int diff, rlim_t max)
1597 return (chglimit(uip, &uip->ui_ptscnt, diff, max, "ptscnt"));
1601 chgkqcnt(struct uidinfo *uip, int diff, rlim_t max)
1604 return (chglimit(uip, &uip->ui_kqcnt, diff, max, "kqcnt"));
1608 chgumtxcnt(struct uidinfo *uip, int diff, rlim_t max)
1611 return (chglimit(uip, &uip->ui_umtxcnt, diff, max, "umtxcnt"));