2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/sysproto.h>
41 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
47 #include <sys/refcount.h>
48 #include <sys/racct.h>
49 #include <sys/resourcevar.h>
50 #include <sys/rwlock.h>
51 #include <sys/sched.h>
53 #include <sys/syscallsubr.h>
54 #include <sys/sysctl.h>
55 #include <sys/sysent.h>
57 #include <sys/umtxvar.h>
60 #include <vm/vm_param.h>
62 #include <vm/vm_map.h>
64 static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures");
65 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
66 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
67 static struct rwlock uihashtbl_lock;
68 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
69 static u_long uihash; /* size of hash table - 1 */
71 static void calcru1(struct proc *p, struct rusage_ext *ruxp,
72 struct timeval *up, struct timeval *sp);
73 static int donice(struct thread *td, struct proc *chgp, int n);
74 static struct uidinfo *uilookup(uid_t uid);
75 static void ruxagg_ext_locked(struct rusage_ext *rux, struct thread *td);
78 * Resource controls and accounting.
80 #ifndef _SYS_SYSPROTO_H_
81 struct getpriority_args {
87 sys_getpriority(struct thread *td, struct getpriority_args *uap)
90 return (kern_getpriority(td, uap->which, uap->who));
94 kern_getpriority(struct thread *td, int which, int who)
105 low = td->td_proc->p_nice;
110 if (p_cansee(td, p) == 0)
117 sx_slock(&proctree_lock);
119 pg = td->td_proc->p_pgrp;
124 sx_sunlock(&proctree_lock);
128 sx_sunlock(&proctree_lock);
129 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
131 if (p->p_state == PRS_NORMAL &&
132 p_cansee(td, p) == 0) {
143 who = td->td_ucred->cr_uid;
144 sx_slock(&allproc_lock);
145 FOREACH_PROC_IN_SYSTEM(p) {
147 if (p->p_state == PRS_NORMAL &&
148 p_cansee(td, p) == 0 &&
149 p->p_ucred->cr_uid == who) {
155 sx_sunlock(&allproc_lock);
162 if (low == PRIO_MAX + 1 && error == 0)
164 td->td_retval[0] = low;
168 #ifndef _SYS_SYSPROTO_H_
169 struct setpriority_args {
176 sys_setpriority(struct thread *td, struct setpriority_args *uap)
179 return (kern_setpriority(td, uap->which, uap->who, uap->prio));
183 kern_setpriority(struct thread *td, int which, int who, int prio)
185 struct proc *curp, *p;
187 int found = 0, error = 0;
194 error = donice(td, curp, prio);
200 error = p_cansee(td, p);
202 error = donice(td, p, prio);
209 sx_slock(&proctree_lock);
216 sx_sunlock(&proctree_lock);
220 sx_sunlock(&proctree_lock);
221 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
223 if (p->p_state == PRS_NORMAL &&
224 p_cansee(td, p) == 0) {
225 error = donice(td, p, prio);
235 who = td->td_ucred->cr_uid;
236 sx_slock(&allproc_lock);
237 FOREACH_PROC_IN_SYSTEM(p) {
239 if (p->p_state == PRS_NORMAL &&
240 p->p_ucred->cr_uid == who &&
241 p_cansee(td, p) == 0) {
242 error = donice(td, p, prio);
247 sx_sunlock(&allproc_lock);
254 if (found == 0 && error == 0)
260 * Set "nice" for a (whole) process.
263 donice(struct thread *td, struct proc *p, int n)
267 PROC_LOCK_ASSERT(p, MA_OWNED);
268 if ((error = p_cansched(td, p)))
274 if (n < p->p_nice && priv_check(td, PRIV_SCHED_SETPRIORITY) != 0)
280 static int unprivileged_idprio;
281 SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_idprio, CTLFLAG_RW,
282 &unprivileged_idprio, 0,
283 "Allow non-root users to set an idle priority (deprecated)");
286 * Set realtime priority for LWP.
288 #ifndef _SYS_SYSPROTO_H_
289 struct rtprio_thread_args {
296 sys_rtprio_thread(struct thread *td, struct rtprio_thread_args *uap)
303 /* Perform copyin before acquiring locks if needed. */
304 if (uap->function == RTP_SET)
305 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
309 if (uap->lwpid == 0 || uap->lwpid == td->td_tid) {
314 td1 = tdfind(uap->lwpid, -1);
320 switch (uap->function) {
322 if ((error = p_cansee(td, p)))
324 pri_to_rtp(td1, &rtp);
326 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
328 if ((error = p_cansched(td, p)) || (error = cierror))
331 /* Disallow setting rtprio in most cases if not superuser. */
334 * Realtime priority has to be restricted for reasons which
335 * should be obvious. However, for idleprio processes, there is
336 * a potential for system deadlock if an idleprio process gains
337 * a lock on a resource that other processes need (and the
338 * idleprio process can't run due to a CPU-bound normal
339 * process). Fix me! XXX
341 * This problem is not only related to idleprio process.
342 * A user level program can obtain a file lock and hold it
343 * indefinitely. Additionally, without idleprio processes it is
344 * still conceivable that a program with low priority will never
345 * get to run. In short, allowing this feature might make it
346 * easier to lock a resource indefinitely, but it is not the
347 * only thing that makes it possible.
349 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME &&
350 (error = priv_check(td, PRIV_SCHED_RTPRIO)) != 0)
352 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE &&
353 unprivileged_idprio == 0 &&
354 (error = priv_check(td, PRIV_SCHED_IDPRIO)) != 0)
356 error = rtp_to_pri(&rtp, td1);
367 * Set realtime priority.
369 #ifndef _SYS_SYSPROTO_H_
377 sys_rtprio(struct thread *td, struct rtprio_args *uap)
384 /* Perform copyin before acquiring locks if needed. */
385 if (uap->function == RTP_SET)
386 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
399 switch (uap->function) {
401 if ((error = p_cansee(td, p)))
404 * Return OUR priority if no pid specified,
405 * or if one is, report the highest priority
406 * in the process. There isn't much more you can do as
407 * there is only room to return a single priority.
408 * Note: specifying our own pid is not the same
409 * as leaving it zero.
412 pri_to_rtp(td, &rtp);
416 rtp.type = RTP_PRIO_IDLE;
417 rtp.prio = RTP_PRIO_MAX;
418 FOREACH_THREAD_IN_PROC(p, tdp) {
419 pri_to_rtp(tdp, &rtp2);
420 if (rtp2.type < rtp.type ||
421 (rtp2.type == rtp.type &&
422 rtp2.prio < rtp.prio)) {
423 rtp.type = rtp2.type;
424 rtp.prio = rtp2.prio;
429 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
431 if ((error = p_cansched(td, p)) || (error = cierror))
435 * Disallow setting rtprio in most cases if not superuser.
436 * See the comment in sys_rtprio_thread about idprio
437 * threads holding a lock.
439 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME &&
440 (error = priv_check(td, PRIV_SCHED_RTPRIO)) != 0)
442 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE &&
443 unprivileged_idprio == 0 &&
444 (error = priv_check(td, PRIV_SCHED_IDPRIO)) != 0)
448 * If we are setting our own priority, set just our
449 * thread but if we are doing another process,
450 * do all the threads on that process. If we
451 * specify our own pid we do the latter.
454 error = rtp_to_pri(&rtp, td);
456 FOREACH_THREAD_IN_PROC(p, td) {
457 if ((error = rtp_to_pri(&rtp, td)) != 0)
471 rtp_to_pri(struct rtprio *rtp, struct thread *td)
473 u_char newpri, oldclass, oldpri;
475 switch (RTP_PRIO_BASE(rtp->type)) {
476 case RTP_PRIO_REALTIME:
477 if (rtp->prio > RTP_PRIO_MAX)
479 newpri = PRI_MIN_REALTIME + rtp->prio;
481 case RTP_PRIO_NORMAL:
482 if (rtp->prio > (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE))
484 newpri = PRI_MIN_TIMESHARE + rtp->prio;
487 if (rtp->prio > RTP_PRIO_MAX)
489 newpri = PRI_MIN_IDLE + rtp->prio;
496 oldclass = td->td_pri_class;
497 sched_class(td, rtp->type); /* XXX fix */
498 oldpri = td->td_user_pri;
499 sched_user_prio(td, newpri);
500 if (td->td_user_pri != oldpri && (oldclass != RTP_PRIO_NORMAL ||
501 td->td_pri_class != RTP_PRIO_NORMAL))
502 sched_prio(td, td->td_user_pri);
503 if (TD_ON_UPILOCK(td) && oldpri != newpri) {
506 umtx_pi_adjust(td, oldpri);
514 pri_to_rtp(struct thread *td, struct rtprio *rtp)
518 switch (PRI_BASE(td->td_pri_class)) {
520 rtp->prio = td->td_base_user_pri - PRI_MIN_REALTIME;
523 rtp->prio = td->td_base_user_pri - PRI_MIN_TIMESHARE;
526 rtp->prio = td->td_base_user_pri - PRI_MIN_IDLE;
531 rtp->type = td->td_pri_class;
535 #if defined(COMPAT_43)
536 #ifndef _SYS_SYSPROTO_H_
537 struct osetrlimit_args {
543 osetrlimit(struct thread *td, struct osetrlimit_args *uap)
549 if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit))))
551 lim.rlim_cur = olim.rlim_cur;
552 lim.rlim_max = olim.rlim_max;
553 error = kern_setrlimit(td, uap->which, &lim);
557 #ifndef _SYS_SYSPROTO_H_
558 struct ogetrlimit_args {
564 ogetrlimit(struct thread *td, struct ogetrlimit_args *uap)
570 if (uap->which >= RLIM_NLIMITS)
572 lim_rlimit(td, uap->which, &rl);
575 * XXX would be more correct to convert only RLIM_INFINITY to the
576 * old RLIM_INFINITY and fail with EOVERFLOW for other larger
577 * values. Most 64->32 and 32->16 conversions, including not
578 * unimportant ones of uids are even more broken than what we
579 * do here (they blindly truncate). We don't do this correctly
580 * here since we have little experience with EOVERFLOW yet.
581 * Elsewhere, getuid() can't fail...
583 olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur;
584 olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max;
585 error = copyout(&olim, uap->rlp, sizeof(olim));
588 #endif /* COMPAT_43 */
590 #ifndef _SYS_SYSPROTO_H_
591 struct setrlimit_args {
597 sys_setrlimit(struct thread *td, struct setrlimit_args *uap)
602 if ((error = copyin(uap->rlp, &alim, sizeof(struct rlimit))))
604 error = kern_setrlimit(td, uap->which, &alim);
616 PROC_LOCK_ASSERT(p, MA_OWNED);
618 * Check if the process exceeds its cpu resource allocation. If
619 * it reaches the max, arrange to kill the process in ast().
621 if (p->p_cpulimit == RLIM_INFINITY)
624 FOREACH_THREAD_IN_PROC(p, td) {
628 if (p->p_rux.rux_runtime > p->p_cpulimit * cpu_tickrate()) {
629 lim_rlimit_proc(p, RLIMIT_CPU, &rlim);
630 if (p->p_rux.rux_runtime >= rlim.rlim_max * cpu_tickrate()) {
631 killproc(p, "exceeded maximum CPU limit");
633 if (p->p_cpulimit < rlim.rlim_max)
635 kern_psignal(p, SIGXCPU);
638 if ((p->p_flag & P_WEXIT) == 0)
639 callout_reset_sbt(&p->p_limco, SBT_1S, 0,
640 lim_cb, p, C_PREL(1));
644 kern_setrlimit(struct thread *td, u_int which, struct rlimit *limp)
647 return (kern_proc_setrlimit(td, td->td_proc, which, limp));
651 kern_proc_setrlimit(struct thread *td, struct proc *p, u_int which,
654 struct plimit *newlim, *oldlim, *oldlim_td;
655 struct rlimit *alimp;
656 struct rlimit oldssiz;
659 if (which >= RLIM_NLIMITS)
663 * Preserve historical bugs by treating negative limits as unsigned.
665 if (limp->rlim_cur < 0)
666 limp->rlim_cur = RLIM_INFINITY;
667 if (limp->rlim_max < 0)
668 limp->rlim_max = RLIM_INFINITY;
670 oldssiz.rlim_cur = 0;
671 newlim = lim_alloc();
674 alimp = &oldlim->pl_rlimit[which];
675 if (limp->rlim_cur > alimp->rlim_max ||
676 limp->rlim_max > alimp->rlim_max)
677 if ((error = priv_check(td, PRIV_PROC_SETRLIMIT))) {
682 if (limp->rlim_cur > limp->rlim_max)
683 limp->rlim_cur = limp->rlim_max;
684 lim_copy(newlim, oldlim);
685 alimp = &newlim->pl_rlimit[which];
689 if (limp->rlim_cur != RLIM_INFINITY &&
690 p->p_cpulimit == RLIM_INFINITY)
691 callout_reset_sbt(&p->p_limco, SBT_1S, 0,
692 lim_cb, p, C_PREL(1));
693 p->p_cpulimit = limp->rlim_cur;
696 if (limp->rlim_cur > maxdsiz)
697 limp->rlim_cur = maxdsiz;
698 if (limp->rlim_max > maxdsiz)
699 limp->rlim_max = maxdsiz;
703 if (limp->rlim_cur > maxssiz)
704 limp->rlim_cur = maxssiz;
705 if (limp->rlim_max > maxssiz)
706 limp->rlim_max = maxssiz;
708 if (p->p_sysent->sv_fixlimit != NULL)
709 p->p_sysent->sv_fixlimit(&oldssiz,
714 if (limp->rlim_cur > maxfilesperproc)
715 limp->rlim_cur = maxfilesperproc;
716 if (limp->rlim_max > maxfilesperproc)
717 limp->rlim_max = maxfilesperproc;
721 if (limp->rlim_cur > maxprocperuid)
722 limp->rlim_cur = maxprocperuid;
723 if (limp->rlim_max > maxprocperuid)
724 limp->rlim_max = maxprocperuid;
725 if (limp->rlim_cur < 1)
727 if (limp->rlim_max < 1)
731 if (p->p_sysent->sv_fixlimit != NULL)
732 p->p_sysent->sv_fixlimit(limp, which);
737 if (td == curthread && PROC_COW_CHANGECOUNT(td, p) == 1) {
738 oldlim_td = lim_cowsync();
739 thread_cow_synced(td);
742 if (oldlim_td != NULL) {
743 MPASS(oldlim_td == oldlim);
744 lim_freen(oldlim, 2);
749 if (which == RLIMIT_STACK &&
751 * Skip calls from exec_new_vmspace(), done when stack is
754 (td != curthread || (p->p_flag & P_INEXEC) == 0)) {
756 * Stack is allocated to the max at exec time with only
757 * "rlim_cur" bytes accessible. If stack limit is going
758 * up make more accessible, if going down make inaccessible.
760 if (limp->rlim_cur != oldssiz.rlim_cur) {
765 if (limp->rlim_cur > oldssiz.rlim_cur) {
766 prot = p->p_sysent->sv_stackprot;
767 size = limp->rlim_cur - oldssiz.rlim_cur;
768 addr = round_page(p->p_vmspace->vm_stacktop) -
772 size = oldssiz.rlim_cur - limp->rlim_cur;
773 addr = round_page(p->p_vmspace->vm_stacktop) -
776 addr = trunc_page(addr);
777 size = round_page(size);
778 (void)vm_map_protect(&p->p_vmspace->vm_map,
779 addr, addr + size, prot, 0,
780 VM_MAP_PROTECT_SET_PROT);
787 #ifndef _SYS_SYSPROTO_H_
788 struct getrlimit_args {
795 sys_getrlimit(struct thread *td, struct getrlimit_args *uap)
800 if (uap->which >= RLIM_NLIMITS)
802 lim_rlimit(td, uap->which, &rlim);
803 error = copyout(&rlim, uap->rlp, sizeof(struct rlimit));
808 * Transform the running time and tick information for children of proc p
809 * into user and system time usage.
812 calccru(struct proc *p, struct timeval *up, struct timeval *sp)
815 PROC_LOCK_ASSERT(p, MA_OWNED);
816 calcru1(p, &p->p_crux, up, sp);
820 * Transform the running time and tick information in proc p into user
821 * and system time usage. If appropriate, include the current time slice
825 calcru(struct proc *p, struct timeval *up, struct timeval *sp)
830 PROC_LOCK_ASSERT(p, MA_OWNED);
831 PROC_STATLOCK_ASSERT(p, MA_OWNED);
833 * If we are getting stats for the current process, then add in the
834 * stats that this thread has accumulated in its current time slice.
835 * We reset the thread and CPU state as if we had performed a context
839 if (td->td_proc == p) {
841 runtime = u - PCPU_GET(switchtime);
842 td->td_runtime += runtime;
843 td->td_incruntime += runtime;
844 PCPU_SET(switchtime, u);
846 /* Make sure the per-thread stats are current. */
847 FOREACH_THREAD_IN_PROC(p, td) {
848 if (td->td_incruntime == 0)
852 calcru1(p, &p->p_rux, up, sp);
855 /* Collect resource usage for a single thread. */
857 rufetchtd(struct thread *td, struct rusage *ru)
863 PROC_STATLOCK_ASSERT(p, MA_OWNED);
864 THREAD_LOCK_ASSERT(td, MA_OWNED);
866 * If we are getting stats for the current thread, then add in the
867 * stats that this thread has accumulated in its current time slice.
868 * We reset the thread and CPU state as if we had performed a context
871 if (td == curthread) {
873 runtime = u - PCPU_GET(switchtime);
874 td->td_runtime += runtime;
875 td->td_incruntime += runtime;
876 PCPU_SET(switchtime, u);
878 ruxagg_locked(p, td);
880 calcru1(p, &td->td_rux, &ru->ru_utime, &ru->ru_stime);
884 mul64_by_fraction(uint64_t a, uint64_t b, uint64_t c)
886 uint64_t acc, bh, bl;
890 * Calculate (a * b) / c accurately enough without overflowing. c
891 * must be nonzero, and its top bit must be 0. a or b must be
892 * <= c, and the implementation is tuned for b <= c.
894 * The comments about times are for use in calcru1() with units of
895 * microseconds for 'a' and stathz ticks at 128 Hz for b and c.
897 * Let n be the number of top zero bits in c. Each iteration
898 * either returns, or reduces b by right shifting it by at least n.
899 * The number of iterations is at most 1 + 64 / n, and the error is
900 * at most the number of iterations.
902 * It is very unusual to need even 2 iterations. Previous
903 * implementations overflowed essentially by returning early in the
904 * first iteration, with n = 38 giving overflow at 105+ hours and
905 * n = 32 giving overlow at at 388+ days despite a more careful
906 * calculation. 388 days is a reasonable uptime, and the calculation
907 * needs to work for the uptime times the number of CPUs since 'a'
910 if (a >= (uint64_t)1 << 63)
911 return (0); /* Unsupported arg -- can't happen. */
913 for (i = 0; i < 128; i++) {
917 /* Up to 105 hours on first iteration. */
918 return (acc + (a * b) / c);
921 * This reduction is based on a = q * c + r, with the
922 * remainder r < c. 'a' may be large to start, and
923 * moving bits from b into 'a' at the end of the loop
924 * sets the top bit of 'a', so the reduction makes
925 * significant progress.
931 /* Up to 388 days on first iteration. */
932 return (acc + (a * b) / c);
936 * This step writes a * b as a * ((bh << s) + bl) =
937 * a * (bh << s) + a * bl = (a << s) * bh + a * bl. The 2
938 * additive terms are handled separately. Splitting in
939 * this way is linear except for rounding errors.
941 * s = 64 - sa is the maximum such that a << s fits in 64
942 * bits. Since a < c and c has at least 1 zero top bit,
943 * sa < 64 and s > 0. Thus this step makes progress by
944 * reducing b (it increases 'a', but taking remainders on
945 * the next iteration completes the reduction).
947 * Finally, the choice for s is just what is needed to keep
948 * a * bl from overflowing, so we don't need complications
949 * like a recursive call mul64_by_fraction(a, bl, c) to
950 * handle the second additive term.
959 return (0); /* Algorithm failure -- can't happen. */
963 calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up,
966 /* {user, system, interrupt, total} {ticks, usec}: */
967 uint64_t ut, uu, st, su, it, tt, tu;
969 ut = ruxp->rux_uticks;
970 st = ruxp->rux_sticks;
971 it = ruxp->rux_iticks;
974 /* Avoid divide by zero */
978 tu = cputick2usec(ruxp->rux_runtime);
979 if ((int64_t)tu < 0) {
980 /* XXX: this should be an assert /phk */
981 printf("calcru: negative runtime of %jd usec for pid %d (%s)\n",
982 (intmax_t)tu, p->p_pid, p->p_comm);
986 /* Subdivide tu. Avoid overflow in the multiplications. */
987 if (__predict_true(tu <= ((uint64_t)1 << 38) && tt <= (1 << 26))) {
988 /* Up to 76 hours when stathz is 128. */
992 uu = mul64_by_fraction(tu, ut, tt);
993 su = mul64_by_fraction(tu, st, tt);
996 if (tu >= ruxp->rux_tu) {
998 * The normal case, time increased.
999 * Enforce monotonicity of bucketed numbers.
1001 if (uu < ruxp->rux_uu)
1003 if (su < ruxp->rux_su)
1005 } else if (tu + 3 > ruxp->rux_tu || 101 * tu > 100 * ruxp->rux_tu) {
1007 * When we calibrate the cputicker, it is not uncommon to
1008 * see the presumably fixed frequency increase slightly over
1009 * time as a result of thermal stabilization and NTP
1010 * discipline (of the reference clock). We therefore ignore
1011 * a bit of backwards slop because we expect to catch up
1012 * shortly. We use a 3 microsecond limit to catch low
1013 * counts and a 1% limit for high counts.
1018 } else if (vm_guest == VM_GUEST_NO) { /* tu < ruxp->rux_tu */
1020 * What happened here was likely that a laptop, which ran at
1021 * a reduced clock frequency at boot, kicked into high gear.
1022 * The wisdom of spamming this message in that case is
1023 * dubious, but it might also be indicative of something
1024 * serious, so lets keep it and hope laptops can be made
1025 * more truthful about their CPU speed via ACPI.
1027 printf("calcru: runtime went backwards from %ju usec "
1028 "to %ju usec for pid %d (%s)\n",
1029 (uintmax_t)ruxp->rux_tu, (uintmax_t)tu,
1030 p->p_pid, p->p_comm);
1037 up->tv_sec = uu / 1000000;
1038 up->tv_usec = uu % 1000000;
1039 sp->tv_sec = su / 1000000;
1040 sp->tv_usec = su % 1000000;
1043 #ifndef _SYS_SYSPROTO_H_
1044 struct getrusage_args {
1046 struct rusage *rusage;
1050 sys_getrusage(struct thread *td, struct getrusage_args *uap)
1055 error = kern_getrusage(td, uap->who, &ru);
1057 error = copyout(&ru, uap->rusage, sizeof(struct rusage));
1062 kern_getrusage(struct thread *td, int who, struct rusage *rup)
1072 rufetchcalc(p, rup, &rup->ru_utime,
1076 case RUSAGE_CHILDREN:
1077 *rup = p->p_stats->p_cru;
1078 calccru(p, &rup->ru_utime, &rup->ru_stime);
1097 rucollect(struct rusage *ru, struct rusage *ru2)
1102 if (ru->ru_maxrss < ru2->ru_maxrss)
1103 ru->ru_maxrss = ru2->ru_maxrss;
1105 ip2 = &ru2->ru_first;
1106 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
1111 ruadd(struct rusage *ru, struct rusage_ext *rux, struct rusage *ru2,
1112 struct rusage_ext *rux2)
1115 rux->rux_runtime += rux2->rux_runtime;
1116 rux->rux_uticks += rux2->rux_uticks;
1117 rux->rux_sticks += rux2->rux_sticks;
1118 rux->rux_iticks += rux2->rux_iticks;
1119 rux->rux_uu += rux2->rux_uu;
1120 rux->rux_su += rux2->rux_su;
1121 rux->rux_tu += rux2->rux_tu;
1126 * Aggregate tick counts into the proc's rusage_ext.
1129 ruxagg_ext_locked(struct rusage_ext *rux, struct thread *td)
1132 rux->rux_runtime += td->td_incruntime;
1133 rux->rux_uticks += td->td_uticks;
1134 rux->rux_sticks += td->td_sticks;
1135 rux->rux_iticks += td->td_iticks;
1139 ruxagg_locked(struct proc *p, struct thread *td)
1141 THREAD_LOCK_ASSERT(td, MA_OWNED);
1142 PROC_STATLOCK_ASSERT(td->td_proc, MA_OWNED);
1144 ruxagg_ext_locked(&p->p_rux, td);
1145 ruxagg_ext_locked(&td->td_rux, td);
1146 td->td_incruntime = 0;
1153 ruxagg(struct proc *p, struct thread *td)
1157 ruxagg_locked(p, td);
1162 * Update the rusage_ext structure and fetch a valid aggregate rusage
1163 * for proc p if storage for one is supplied.
1166 rufetch(struct proc *p, struct rusage *ru)
1170 PROC_STATLOCK_ASSERT(p, MA_OWNED);
1173 if (p->p_numthreads > 0) {
1174 FOREACH_THREAD_IN_PROC(p, td) {
1176 rucollect(ru, &td->td_ru);
1182 * Atomically perform a rufetch and a calcru together.
1183 * Consumers, can safely assume the calcru is executed only once
1184 * rufetch is completed.
1187 rufetchcalc(struct proc *p, struct rusage *ru, struct timeval *up,
1198 * Allocate a new resource limits structure and initialize its
1199 * reference count and mutex pointer.
1204 struct plimit *limp;
1206 limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK);
1207 refcount_init(&limp->pl_refcnt, 1);
1212 lim_hold(struct plimit *limp)
1215 refcount_acquire(&limp->pl_refcnt);
1224 struct plimit *oldlimit;
1228 PROC_LOCK_ASSERT(p, MA_OWNED);
1230 if (td->td_limit == p->p_limit)
1233 oldlimit = td->td_limit;
1234 td->td_limit = lim_hold(p->p_limit);
1240 lim_fork(struct proc *p1, struct proc *p2)
1243 PROC_LOCK_ASSERT(p1, MA_OWNED);
1244 PROC_LOCK_ASSERT(p2, MA_OWNED);
1246 p2->p_limit = lim_hold(p1->p_limit);
1247 callout_init_mtx(&p2->p_limco, &p2->p_mtx, 0);
1248 if (p1->p_cpulimit != RLIM_INFINITY)
1249 callout_reset_sbt(&p2->p_limco, SBT_1S, 0,
1250 lim_cb, p2, C_PREL(1));
1254 lim_free(struct plimit *limp)
1257 if (refcount_release(&limp->pl_refcnt))
1258 free((void *)limp, M_PLIMIT);
1262 lim_freen(struct plimit *limp, int n)
1265 if (refcount_releasen(&limp->pl_refcnt, n))
1266 free((void *)limp, M_PLIMIT);
1270 limbatch_add(struct limbatch *lb, struct thread *td)
1272 struct plimit *limp;
1274 MPASS(td->td_limit != NULL);
1275 limp = td->td_limit;
1277 if (lb->limp != limp) {
1278 if (lb->count != 0) {
1279 lim_freen(lb->limp, lb->count);
1289 limbatch_final(struct limbatch *lb)
1292 MPASS(lb->count != 0);
1293 lim_freen(lb->limp, lb->count);
1297 * Make a copy of the plimit structure.
1298 * We share these structures copy-on-write after fork.
1301 lim_copy(struct plimit *dst, struct plimit *src)
1304 KASSERT(dst->pl_refcnt <= 1, ("lim_copy to shared limit"));
1305 bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit));
1309 * Return the hard limit for a particular system resource. The
1310 * which parameter specifies the index into the rlimit array.
1313 lim_max(struct thread *td, int which)
1317 lim_rlimit(td, which, &rl);
1318 return (rl.rlim_max);
1322 lim_max_proc(struct proc *p, int which)
1326 lim_rlimit_proc(p, which, &rl);
1327 return (rl.rlim_max);
1331 * Return the current (soft) limit for a particular system resource.
1332 * The which parameter which specifies the index into the rlimit array
1335 (lim_cur)(struct thread *td, int which)
1339 lim_rlimit(td, which, &rl);
1340 return (rl.rlim_cur);
1344 lim_cur_proc(struct proc *p, int which)
1348 lim_rlimit_proc(p, which, &rl);
1349 return (rl.rlim_cur);
1353 * Return a copy of the entire rlimit structure for the system limit
1354 * specified by 'which' in the rlimit structure pointed to by 'rlp'.
1357 lim_rlimit(struct thread *td, int which, struct rlimit *rlp)
1359 struct proc *p = td->td_proc;
1361 MPASS(td == curthread);
1362 KASSERT(which >= 0 && which < RLIM_NLIMITS,
1363 ("request for invalid resource limit"));
1364 *rlp = td->td_limit->pl_rlimit[which];
1365 if (p->p_sysent->sv_fixlimit != NULL)
1366 p->p_sysent->sv_fixlimit(rlp, which);
1370 lim_rlimit_proc(struct proc *p, int which, struct rlimit *rlp)
1373 PROC_LOCK_ASSERT(p, MA_OWNED);
1374 KASSERT(which >= 0 && which < RLIM_NLIMITS,
1375 ("request for invalid resource limit"));
1376 *rlp = p->p_limit->pl_rlimit[which];
1377 if (p->p_sysent->sv_fixlimit != NULL)
1378 p->p_sysent->sv_fixlimit(rlp, which);
1385 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
1386 rw_init(&uihashtbl_lock, "uidinfo hash");
1390 * Look up a uidinfo struct for the parameter uid.
1391 * uihashtbl_lock must be locked.
1392 * Increase refcount on uidinfo struct returned.
1394 static struct uidinfo *
1397 struct uihashhead *uipp;
1398 struct uidinfo *uip;
1400 rw_assert(&uihashtbl_lock, RA_LOCKED);
1402 LIST_FOREACH(uip, uipp, ui_hash)
1403 if (uip->ui_uid == uid) {
1412 * Find or allocate a struct uidinfo for a particular uid.
1413 * Returns with uidinfo struct referenced.
1414 * uifree() should be called on a struct uidinfo when released.
1419 struct uidinfo *new_uip, *uip;
1422 cred = curthread->td_ucred;
1423 if (cred->cr_uidinfo->ui_uid == uid) {
1424 uip = cred->cr_uidinfo;
1427 } else if (cred->cr_ruidinfo->ui_uid == uid) {
1428 uip = cred->cr_ruidinfo;
1433 rw_rlock(&uihashtbl_lock);
1434 uip = uilookup(uid);
1435 rw_runlock(&uihashtbl_lock);
1439 new_uip = malloc(sizeof(*new_uip), M_UIDINFO, M_WAITOK | M_ZERO);
1440 racct_create(&new_uip->ui_racct);
1441 refcount_init(&new_uip->ui_ref, 1);
1442 new_uip->ui_uid = uid;
1444 rw_wlock(&uihashtbl_lock);
1446 * There's a chance someone created our uidinfo while we
1447 * were in malloc and not holding the lock, so we have to
1448 * make sure we don't insert a duplicate uidinfo.
1450 if ((uip = uilookup(uid)) == NULL) {
1451 LIST_INSERT_HEAD(UIHASH(uid), new_uip, ui_hash);
1452 rw_wunlock(&uihashtbl_lock);
1455 rw_wunlock(&uihashtbl_lock);
1456 racct_destroy(&new_uip->ui_racct);
1457 free(new_uip, M_UIDINFO);
1463 * Place another refcount on a uidinfo struct.
1466 uihold(struct uidinfo *uip)
1469 refcount_acquire(&uip->ui_ref);
1473 * Since uidinfo structs have a long lifetime, we use an
1474 * opportunistic refcounting scheme to avoid locking the lookup hash
1477 * If the refcount hits 0, we need to free the structure,
1478 * which means we need to lock the hash.
1480 * After locking the struct and lowering the refcount, if we find
1481 * that we don't need to free, simply unlock and return.
1483 * If refcount lowering results in need to free, bump the count
1484 * back up, lose the lock and acquire the locks in the proper
1485 * order to try again.
1488 uifree(struct uidinfo *uip)
1491 if (refcount_release_if_not_last(&uip->ui_ref))
1494 rw_wlock(&uihashtbl_lock);
1495 if (refcount_release(&uip->ui_ref) == 0) {
1496 rw_wunlock(&uihashtbl_lock);
1500 racct_destroy(&uip->ui_racct);
1501 LIST_REMOVE(uip, ui_hash);
1502 rw_wunlock(&uihashtbl_lock);
1504 if (uip->ui_sbsize != 0)
1505 printf("freeing uidinfo: uid = %d, sbsize = %ld\n",
1506 uip->ui_uid, uip->ui_sbsize);
1507 if (uip->ui_proccnt != 0)
1508 printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
1509 uip->ui_uid, uip->ui_proccnt);
1510 if (uip->ui_vmsize != 0)
1511 printf("freeing uidinfo: uid = %d, swapuse = %lld\n",
1512 uip->ui_uid, (unsigned long long)uip->ui_vmsize);
1513 free(uip, M_UIDINFO);
1518 ui_racct_foreach(void (*callback)(struct racct *racct,
1519 void *arg2, void *arg3), void (*pre)(void), void (*post)(void),
1520 void *arg2, void *arg3)
1522 struct uidinfo *uip;
1523 struct uihashhead *uih;
1525 rw_rlock(&uihashtbl_lock);
1528 for (uih = &uihashtbl[uihash]; uih >= uihashtbl; uih--) {
1529 LIST_FOREACH(uip, uih, ui_hash) {
1530 (callback)(uip->ui_racct, arg2, arg3);
1535 rw_runlock(&uihashtbl_lock);
1540 chglimit(struct uidinfo *uip, long *limit, int diff, rlim_t max, const char *name)
1544 /* Don't allow them to exceed max, but allow subtraction. */
1545 new = atomic_fetchadd_long(limit, (long)diff) + diff;
1546 if (diff > 0 && max != 0) {
1547 if (new < 0 || new > max) {
1548 atomic_subtract_long(limit, (long)diff);
1552 printf("negative %s for uid = %d\n", name, uip->ui_uid);
1557 * Change the count associated with number of processes
1558 * a given user is using. When 'max' is 0, don't enforce a limit
1561 chgproccnt(struct uidinfo *uip, int diff, rlim_t max)
1564 return (chglimit(uip, &uip->ui_proccnt, diff, max, "proccnt"));
1568 * Change the total socket buffer size a user has used.
1571 chgsbsize(struct uidinfo *uip, u_int *hiwat, u_int to, rlim_t max)
1576 if (diff > 0 && max == 0) {
1579 rv = chglimit(uip, &uip->ui_sbsize, diff, max, "sbsize");
1587 * Change the count associated with number of pseudo-terminals
1588 * a given user is using. When 'max' is 0, don't enforce a limit
1591 chgptscnt(struct uidinfo *uip, int diff, rlim_t max)
1594 return (chglimit(uip, &uip->ui_ptscnt, diff, max, "ptscnt"));
1598 chgkqcnt(struct uidinfo *uip, int diff, rlim_t max)
1601 return (chglimit(uip, &uip->ui_kqcnt, diff, max, "kqcnt"));
1605 chgumtxcnt(struct uidinfo *uip, int diff, rlim_t max)
1608 return (chglimit(uip, &uip->ui_umtxcnt, diff, max, "umtxcnt"));