2 * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include "opt_hwpmc_hooks.h"
31 #include "opt_sched.h"
33 #include <sys/param.h>
34 #include <sys/systm.h>
36 #include <sys/kernel.h>
39 #include <sys/mutex.h>
41 #include <sys/resource.h>
42 #include <sys/resourcevar.h>
43 #include <sys/sched.h>
46 #include <sys/sysctl.h>
47 #include <sys/sysproto.h>
48 #include <sys/turnstile.h>
50 #include <sys/vmmeter.h>
53 #include <sys/ktrace.h>
57 #include <sys/pmckern.h>
60 #include <machine/cpu.h>
61 #include <machine/smp.h>
64 #error "SCHED_ULE requires options PREEMPTION"
69 * Pick idle from affinity group or self group first.
70 * Implement pick_score.
73 #define KTR_ULE 0x0 /* Enable for pickpri debugging. */
76 * Thread scheduler specific section.
79 TAILQ_ENTRY(td_sched) ts_procq; /* (j/z) Run queue. */
80 int ts_flags; /* (j) TSF_* flags. */
81 struct thread *ts_thread; /* (*) Active associated thread. */
82 u_char ts_rqindex; /* (j) Run queue index. */
86 u_char ts_cpu; /* CPU that we have affinity for. */
87 /* The following variables are only used for pctcpu calculation */
88 int ts_ltick; /* Last tick that we were running on */
89 int ts_ftick; /* First tick that we were running on */
90 int ts_ticks; /* Tick count */
92 int ts_rltick; /* Real last tick, for affinity. */
95 /* originally from kg_sched */
96 u_int skg_slptime; /* Number of ticks we vol. slept */
97 u_int skg_runtime; /* Number of ticks we were running */
99 /* flags kept in ts_flags */
100 #define TSF_BOUND 0x0001 /* Thread can not migrate. */
101 #define TSF_XFERABLE 0x0002 /* Thread was added as transferable. */
103 static struct td_sched td_sched0;
106 * Cpu percentage computation macros and defines.
108 * SCHED_TICK_SECS: Number of seconds to average the cpu usage across.
109 * SCHED_TICK_TARG: Number of hz ticks to average the cpu usage across.
110 * SCHED_TICK_MAX: Maximum number of ticks before scaling back.
111 * SCHED_TICK_SHIFT: Shift factor to avoid rounding away results.
112 * SCHED_TICK_HZ: Compute the number of hz ticks for a given ticks count.
113 * SCHED_TICK_TOTAL: Gives the amount of time we've been recording ticks.
115 #define SCHED_TICK_SECS 10
116 #define SCHED_TICK_TARG (hz * SCHED_TICK_SECS)
117 #define SCHED_TICK_MAX (SCHED_TICK_TARG + hz)
118 #define SCHED_TICK_SHIFT 10
119 #define SCHED_TICK_HZ(ts) ((ts)->ts_ticks >> SCHED_TICK_SHIFT)
120 #define SCHED_TICK_TOTAL(ts) (max((ts)->ts_ltick - (ts)->ts_ftick, hz))
123 * These macros determine priorities for non-interactive threads. They are
124 * assigned a priority based on their recent cpu utilization as expressed
125 * by the ratio of ticks to the tick total. NHALF priorities at the start
126 * and end of the MIN to MAX timeshare range are only reachable with negative
127 * or positive nice respectively.
129 * PRI_RANGE: Priority range for utilization dependent priorities.
130 * PRI_NRESV: Number of nice values.
131 * PRI_TICKS: Compute a priority in PRI_RANGE from the ticks count and total.
132 * PRI_NICE: Determines the part of the priority inherited from nice.
134 #define SCHED_PRI_NRESV (PRIO_MAX - PRIO_MIN)
135 #define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2)
136 #define SCHED_PRI_MIN (PRI_MIN_TIMESHARE + SCHED_PRI_NHALF)
137 #define SCHED_PRI_MAX (PRI_MAX_TIMESHARE - SCHED_PRI_NHALF)
138 #define SCHED_PRI_RANGE (SCHED_PRI_MAX - SCHED_PRI_MIN + 1)
139 #define SCHED_PRI_TICKS(ts) \
140 (SCHED_TICK_HZ((ts)) / \
141 (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE))
142 #define SCHED_PRI_NICE(nice) (nice)
145 * These determine the interactivity of a process. Interactivity differs from
146 * cpu utilization in that it expresses the voluntary time slept vs time ran
147 * while cpu utilization includes all time not running. This more accurately
148 * models the intent of the thread.
150 * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate
151 * before throttling back.
152 * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time.
153 * INTERACT_MAX: Maximum interactivity value. Smaller is better.
154 * INTERACT_THRESH: Threshhold for placement on the current runq.
156 #define SCHED_SLP_RUN_MAX ((hz * 5) << SCHED_TICK_SHIFT)
157 #define SCHED_SLP_RUN_FORK ((hz / 2) << SCHED_TICK_SHIFT)
158 #define SCHED_INTERACT_MAX (100)
159 #define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2)
160 #define SCHED_INTERACT_THRESH (30)
163 * tickincr: Converts a stathz tick into a hz domain scaled by
164 * the shift factor. Without the shift the error rate
165 * due to rounding would be unacceptably high.
166 * realstathz: stathz is sometimes 0 and run off of hz.
167 * sched_slice: Runtime of each thread before rescheduling.
169 static int sched_interact = SCHED_INTERACT_THRESH;
170 static int realstathz;
172 static int sched_slice;
175 * tdq - per processor runqs and statistics.
178 struct runq tdq_idle; /* Queue of IDLE threads. */
179 struct runq tdq_timeshare; /* timeshare run queue. */
180 struct runq tdq_realtime; /* real-time run queue. */
181 u_char tdq_idx; /* Current insert index. */
182 u_char tdq_ridx; /* Current removal index. */
183 short tdq_flags; /* Thread queue flags */
184 int tdq_load; /* Aggregate load. */
186 int tdq_transferable;
187 LIST_ENTRY(tdq) tdq_siblings; /* Next in tdq group. */
188 struct tdq_group *tdq_group; /* Our processor group. */
190 int tdq_sysload; /* For loadavg, !ITHD load. */
194 #define TDQF_BUSY 0x0001 /* Queue is marked as busy */
198 * tdq groups are groups of processors which can cheaply share threads. When
199 * one processor in the group goes idle it will check the runqs of the other
200 * processors in its group prior to halting and waiting for an interrupt.
201 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA.
202 * In a numa environment we'd want an idle bitmap per group and a two tiered
206 int tdg_cpus; /* Count of CPUs in this tdq group. */
207 cpumask_t tdg_cpumask; /* Mask of cpus in this group. */
208 cpumask_t tdg_idlemask; /* Idle cpus in this group. */
209 cpumask_t tdg_mask; /* Bit mask for first cpu. */
210 int tdg_load; /* Total load of this group. */
211 int tdg_transferable; /* Transferable load of this group. */
212 LIST_HEAD(, tdq) tdg_members; /* Linked list of all members. */
215 #define SCHED_AFFINITY_DEFAULT (hz / 100)
216 #define SCHED_AFFINITY(ts) ((ts)->ts_rltick > ticks - affinity)
221 static int rebalance = 0;
222 static int pick_pri = 1;
224 static int tryself = 1;
225 static int tryselfidle = 1;
226 static int ipi_ast = 0;
227 static int ipi_preempt = 1;
228 static int ipi_thresh = PRI_MIN_KERN;
229 static int steal_htt = 1;
230 static int steal_busy = 1;
231 static int busy_thresh = 4;
234 * One thread queue per processor.
236 static volatile cpumask_t tdq_idle;
237 static volatile cpumask_t tdq_busy;
238 static int tdg_maxid;
239 static struct tdq tdq_cpu[MAXCPU];
240 static struct tdq_group tdq_groups[MAXCPU];
242 static int gbal_tick;
243 static int balance_groups;
245 #define TDQ_SELF() (&tdq_cpu[PCPU_GET(cpuid)])
246 #define TDQ_CPU(x) (&tdq_cpu[(x)])
247 #define TDQ_ID(x) ((x) - tdq_cpu)
248 #define TDQ_GROUP(x) (&tdq_groups[(x)])
250 static struct tdq tdq_cpu;
252 #define TDQ_SELF() (&tdq_cpu)
253 #define TDQ_CPU(x) (&tdq_cpu)
256 static void sched_priority(struct thread *);
257 static void sched_thread_priority(struct thread *, u_char);
258 static int sched_interact_score(struct thread *);
259 static void sched_interact_update(struct thread *);
260 static void sched_interact_fork(struct thread *);
261 static void sched_pctcpu_update(struct td_sched *);
262 static inline void sched_pin_td(struct thread *td);
263 static inline void sched_unpin_td(struct thread *td);
265 /* Operations on per processor queues */
266 static struct td_sched * tdq_choose(struct tdq *);
267 static void tdq_setup(struct tdq *);
268 static void tdq_load_add(struct tdq *, struct td_sched *);
269 static void tdq_load_rem(struct tdq *, struct td_sched *);
270 static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int);
271 static __inline void tdq_runq_rem(struct tdq *, struct td_sched *);
272 void tdq_print(int cpu);
273 static void runq_print(struct runq *rq);
275 static int tdq_pickidle(struct tdq *, struct td_sched *);
276 static int tdq_pickpri(struct tdq *, struct td_sched *, int);
277 static struct td_sched *runq_steal(struct runq *);
278 static void sched_balance(void);
279 static void sched_balance_groups(void);
280 static void sched_balance_group(struct tdq_group *);
281 static void sched_balance_pair(struct tdq *, struct tdq *);
282 static void sched_smp_tick(struct thread *);
283 static void tdq_move(struct tdq *, int);
284 static int tdq_idled(struct tdq *);
285 static void tdq_notify(struct td_sched *);
286 static struct td_sched *tdq_steal(struct tdq *, int);
288 #define THREAD_CAN_MIGRATE(td) ((td)->td_pinned == 0)
291 static void sched_setup(void *dummy);
292 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
294 static void sched_initticks(void *dummy);
295 SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL)
298 sched_pin_td(struct thread *td)
304 sched_unpin_td(struct thread *td)
310 runq_print(struct runq *rq)
318 for (i = 0; i < RQB_LEN; i++) {
319 printf("\t\trunq bits %d 0x%zx\n",
320 i, rq->rq_status.rqb_bits[i]);
321 for (j = 0; j < RQB_BPW; j++)
322 if (rq->rq_status.rqb_bits[i] & (1ul << j)) {
323 pri = j + (i << RQB_L2BPW);
324 rqh = &rq->rq_queues[pri];
325 TAILQ_FOREACH(ts, rqh, ts_procq) {
326 printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n",
327 ts->ts_thread, ts->ts_thread->td_proc->p_comm, ts->ts_thread->td_priority, ts->ts_rqindex, pri);
341 printf("\tload: %d\n", tdq->tdq_load);
342 printf("\ttimeshare idx: %d\n", tdq->tdq_idx);
343 printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx);
344 printf("\trealtime runq:\n");
345 runq_print(&tdq->tdq_realtime);
346 printf("\ttimeshare runq:\n");
347 runq_print(&tdq->tdq_timeshare);
348 printf("\tidle runq:\n");
349 runq_print(&tdq->tdq_idle);
351 printf("\tload transferable: %d\n", tdq->tdq_transferable);
356 tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags)
359 if (THREAD_CAN_MIGRATE(ts->ts_thread)) {
360 tdq->tdq_transferable++;
361 tdq->tdq_group->tdg_transferable++;
362 ts->ts_flags |= TSF_XFERABLE;
363 if (tdq->tdq_transferable >= busy_thresh &&
364 (tdq->tdq_flags & TDQF_BUSY) == 0) {
365 tdq->tdq_flags |= TDQF_BUSY;
366 atomic_set_int(&tdq_busy, 1 << TDQ_ID(tdq));
370 if (ts->ts_runq == &tdq->tdq_timeshare) {
373 pri = ts->ts_thread->td_priority;
374 KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE,
375 ("Invalid priority %d on timeshare runq", pri));
377 * This queue contains only priorities between MIN and MAX
378 * realtime. Use the whole queue to represent these values.
380 #define TS_RQ_PPQ (((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS)
381 if ((flags & SRQ_BORROWING) == 0) {
382 pri = (pri - PRI_MIN_TIMESHARE) / TS_RQ_PPQ;
383 pri = (pri + tdq->tdq_idx) % RQ_NQS;
385 * This effectively shortens the queue by one so we
386 * can have a one slot difference between idx and
387 * ridx while we wait for threads to drain.
389 if (tdq->tdq_ridx != tdq->tdq_idx &&
390 pri == tdq->tdq_ridx)
391 pri = (unsigned char)(pri - 1) % RQ_NQS;
394 runq_add_pri(ts->ts_runq, ts, pri, flags);
396 runq_add(ts->ts_runq, ts, flags);
400 tdq_runq_rem(struct tdq *tdq, struct td_sched *ts)
403 if (ts->ts_flags & TSF_XFERABLE) {
404 tdq->tdq_transferable--;
405 tdq->tdq_group->tdg_transferable--;
406 ts->ts_flags &= ~TSF_XFERABLE;
407 if (tdq->tdq_transferable < busy_thresh &&
408 (tdq->tdq_flags & TDQF_BUSY)) {
409 atomic_clear_int(&tdq_busy, 1 << TDQ_ID(tdq));
410 tdq->tdq_flags &= ~TDQF_BUSY;
414 if (ts->ts_runq == &tdq->tdq_timeshare) {
415 if (tdq->tdq_idx != tdq->tdq_ridx)
416 runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx);
418 runq_remove_idx(ts->ts_runq, ts, NULL);
420 * For timeshare threads we update the priority here so
421 * the priority reflects the time we've been sleeping.
423 ts->ts_ltick = ticks;
424 sched_pctcpu_update(ts);
425 sched_priority(ts->ts_thread);
427 runq_remove(ts->ts_runq, ts);
431 tdq_load_add(struct tdq *tdq, struct td_sched *ts)
434 mtx_assert(&sched_lock, MA_OWNED);
435 class = PRI_BASE(ts->ts_thread->td_pri_class);
437 CTR1(KTR_SCHED, "load: %d", tdq->tdq_load);
438 if (class != PRI_ITHD &&
439 (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
441 tdq->tdq_group->tdg_load++;
448 tdq_load_rem(struct tdq *tdq, struct td_sched *ts)
451 mtx_assert(&sched_lock, MA_OWNED);
452 class = PRI_BASE(ts->ts_thread->td_pri_class);
453 if (class != PRI_ITHD &&
454 (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
456 tdq->tdq_group->tdg_load--;
461 CTR1(KTR_SCHED, "load: %d", tdq->tdq_load);
467 sched_smp_tick(struct thread *td)
473 if (ticks >= bal_tick)
475 if (ticks >= gbal_tick && balance_groups)
476 sched_balance_groups();
478 td->td_sched->ts_rltick = ticks;
482 * sched_balance is a simple CPU load balancing algorithm. It operates by
483 * finding the least loaded and most loaded cpu and equalizing their load
484 * by migrating some processes.
486 * Dealing only with two CPUs at a time has two advantages. Firstly, most
487 * installations will only have 2 cpus. Secondly, load balancing too much at
488 * once can have an unpleasant effect on the system. The scheduler rarely has
489 * enough information to make perfect decisions. So this algorithm chooses
490 * algorithm simplicity and more gradual effects on load in larger systems.
492 * It could be improved by considering the priorities and slices assigned to
493 * each task prior to balancing them. There are many pathological cases with
494 * any approach and so the semi random algorithm below may work as well as any.
500 struct tdq_group *high;
501 struct tdq_group *low;
502 struct tdq_group *tdg;
506 bal_tick = ticks + (random() % (hz * 2));
507 if (smp_started == 0)
510 i = random() % (tdg_maxid + 1);
511 for (cnt = 0; cnt <= tdg_maxid; cnt++) {
514 * Find the CPU with the highest load that has some
515 * threads to transfer.
517 if ((high == NULL || tdg->tdg_load > high->tdg_load)
518 && tdg->tdg_transferable)
520 if (low == NULL || tdg->tdg_load < low->tdg_load)
525 if (low != NULL && high != NULL && high != low)
526 sched_balance_pair(LIST_FIRST(&high->tdg_members),
527 LIST_FIRST(&low->tdg_members));
531 sched_balance_groups(void)
535 gbal_tick = ticks + (random() % (hz * 2));
536 mtx_assert(&sched_lock, MA_OWNED);
538 for (i = 0; i <= tdg_maxid; i++)
539 sched_balance_group(TDQ_GROUP(i));
543 sched_balance_group(struct tdq_group *tdg)
550 if (tdg->tdg_transferable == 0)
554 LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) {
555 load = tdq->tdq_load;
556 if (high == NULL || load > high->tdq_load)
558 if (low == NULL || load < low->tdq_load)
561 if (high != NULL && low != NULL && high != low)
562 sched_balance_pair(high, low);
566 sched_balance_pair(struct tdq *high, struct tdq *low)
576 * If we're transfering within a group we have to use this specific
577 * tdq's transferable count, otherwise we can steal from other members
580 if (high->tdq_group == low->tdq_group) {
581 transferable = high->tdq_transferable;
582 high_load = high->tdq_load;
583 low_load = low->tdq_load;
585 transferable = high->tdq_group->tdg_transferable;
586 high_load = high->tdq_group->tdg_load;
587 low_load = low->tdq_group->tdg_load;
589 if (transferable == 0)
592 * Determine what the imbalance is and then adjust that to how many
593 * threads we actually have to give up (transferable).
595 diff = high_load - low_load;
599 move = min(move, transferable);
600 for (i = 0; i < move; i++)
601 tdq_move(high, TDQ_ID(low));
606 tdq_move(struct tdq *from, int cpu)
614 ts = tdq_steal(tdq, 1);
616 struct tdq_group *tdg;
618 tdg = tdq->tdq_group;
619 LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) {
620 if (tdq == from || tdq->tdq_transferable == 0)
622 ts = tdq_steal(tdq, 1);
626 panic("tdq_move: No threads available with a "
627 "transferable count of %d\n",
628 tdg->tdg_transferable);
632 sched_rem(ts->ts_thread);
634 sched_pin_td(ts->ts_thread);
635 sched_add(ts->ts_thread, SRQ_YIELDING);
636 sched_unpin_td(ts->ts_thread);
640 tdq_idled(struct tdq *tdq)
642 struct tdq_group *tdg;
646 tdg = tdq->tdq_group;
648 * If we're in a cpu group, try and steal threads from another cpu in
649 * the group before idling.
651 if (steal_htt && tdg->tdg_cpus > 1 && tdg->tdg_transferable) {
652 LIST_FOREACH(steal, &tdg->tdg_members, tdq_siblings) {
653 if (steal == tdq || steal->tdq_transferable == 0)
655 ts = tdq_steal(steal, 0);
668 steal = TDQ_CPU(cpu);
669 if (steal->tdq_transferable == 0)
671 ts = tdq_steal(steal, 1);
675 "tdq_idled: stealing td %p(%s) pri %d from %d busy 0x%X",
676 ts->ts_thread, ts->ts_thread->td_proc->p_comm,
677 ts->ts_thread->td_priority, cpu, tdq_busy);
682 * We only set the idled bit when all of the cpus in the group are
683 * idle. Otherwise we could get into a situation where a thread bounces
684 * back and forth between two idle cores on seperate physical CPUs.
686 tdg->tdg_idlemask |= PCPU_GET(cpumask);
687 if (tdg->tdg_idlemask == tdg->tdg_cpumask)
688 atomic_set_int(&tdq_idle, tdg->tdg_mask);
691 sched_rem(ts->ts_thread);
692 ts->ts_cpu = PCPU_GET(cpuid);
693 sched_pin_td(ts->ts_thread);
694 sched_add(ts->ts_thread, SRQ_YIELDING);
695 sched_unpin_td(ts->ts_thread);
701 tdq_notify(struct td_sched *ts)
710 pri = ts->ts_thread->td_priority;
711 pcpu = pcpu_find(cpu);
712 ctd = pcpu->pc_curthread;
713 cpri = ctd->td_priority;
716 * If our priority is not better than the current priority there is
724 if (cpri > PRI_MIN_IDLE)
727 * If we're realtime or better and there is timeshare or worse running
730 if (pri < PRI_MAX_REALTIME && cpri > PRI_MAX_REALTIME)
733 * Otherwise only IPI if we exceed the threshold.
735 if (pri > ipi_thresh)
738 ctd->td_flags |= TDF_NEEDRESCHED;
739 if (cpri < PRI_MIN_IDLE) {
741 ipi_selected(1 << cpu, IPI_AST);
742 else if (ipi_preempt)
743 ipi_selected(1 << cpu, IPI_PREEMPT);
745 ipi_selected(1 << cpu, IPI_PREEMPT);
748 static struct td_sched *
749 runq_steal(struct runq *rq)
757 mtx_assert(&sched_lock, MA_OWNED);
758 rqb = &rq->rq_status;
759 for (word = 0; word < RQB_LEN; word++) {
760 if (rqb->rqb_bits[word] == 0)
762 for (bit = 0; bit < RQB_BPW; bit++) {
763 if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
765 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
766 TAILQ_FOREACH(ts, rqh, ts_procq) {
767 if (THREAD_CAN_MIGRATE(ts->ts_thread))
775 static struct td_sched *
776 tdq_steal(struct tdq *tdq, int stealidle)
781 * Steal from next first to try to get a non-interactive task that
782 * may not have run for a while.
783 * XXX Need to effect steal order for timeshare threads.
785 if ((ts = runq_steal(&tdq->tdq_realtime)) != NULL)
787 if ((ts = runq_steal(&tdq->tdq_timeshare)) != NULL)
790 return (runq_steal(&tdq->tdq_idle));
795 tdq_pickidle(struct tdq *tdq, struct td_sched *ts)
797 struct tdq_group *tdg;
801 self = PCPU_GET(cpuid);
802 if (smp_started == 0)
805 * If the current CPU has idled, just run it here.
807 if ((tdq->tdq_group->tdg_idlemask & PCPU_GET(cpumask)) != 0)
810 * Try the last group we ran on.
812 tdg = TDQ_CPU(ts->ts_cpu)->tdq_group;
813 cpu = ffs(tdg->tdg_idlemask);
817 * Search for an idle group.
823 * XXX If there are no idle groups, check for an idle core.
832 tdq_pickpri(struct tdq *tdq, struct td_sched *ts, int flags)
843 self = PCPU_GET(cpuid);
844 if (smp_started == 0)
847 pri = ts->ts_thread->td_priority;
849 * Regardless of affinity, if the last cpu is idle send it there.
851 pcpu = pcpu_find(ts->ts_cpu);
852 if (pcpu->pc_curthread->td_priority > PRI_MIN_IDLE) {
854 "ts_cpu %d idle, ltick %d ticks %d pri %d curthread %d",
855 ts->ts_cpu, ts->ts_rltick, ticks, pri,
856 pcpu->pc_curthread->td_priority);
860 * If we have affinity, try to place it on the cpu we last ran on.
862 if (SCHED_AFFINITY(ts) && pcpu->pc_curthread->td_priority > pri) {
864 "affinity for %d, ltick %d ticks %d pri %d curthread %d",
865 ts->ts_cpu, ts->ts_rltick, ticks, pri,
866 pcpu->pc_curthread->td_priority);
870 * Try ourself first; If we're running something lower priority this
871 * may have some locality with the waking thread and execute faster
876 * If we're being awoken by an interrupt thread or the waker
877 * is going right to sleep run here as well.
879 if ((TDQ_SELF()->tdq_load == 1) && (flags & SRQ_YIELDING ||
880 curthread->td_pri_class == PRI_ITHD)) {
881 CTR2(KTR_ULE, "tryself load %d flags %d",
882 TDQ_SELF()->tdq_load, flags);
887 * Look for an idle group.
889 CTR1(KTR_ULE, "tdq_idle %X", tdq_idle);
893 if (tryselfidle && pri < curthread->td_priority) {
894 CTR1(KTR_ULE, "tryself %d",
895 curthread->td_priority);
899 * Now search for the cpu running the lowest priority thread with
904 for (cpu = 0; cpu <= mp_maxid; cpu++) {
907 pcpu = pcpu_find(cpu);
908 pri = pcpu->pc_curthread->td_priority;
910 "cpu %d pri %d lowcpu %d lowpri %d",
911 cpu, pri, lowcpu, lowpri);
914 load = TDQ_CPU(cpu)->tdq_load;
915 if (lowpri && lowpri == pri && load > lowload)
928 * Pick the highest priority task we have and return it.
931 static struct td_sched *
932 tdq_choose(struct tdq *tdq)
936 mtx_assert(&sched_lock, MA_OWNED);
938 ts = runq_choose(&tdq->tdq_realtime);
940 KASSERT(ts->ts_thread->td_priority <= PRI_MAX_REALTIME,
941 ("tdq_choose: Invalid priority on realtime queue %d",
942 ts->ts_thread->td_priority));
945 ts = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
947 KASSERT(ts->ts_thread->td_priority <= PRI_MAX_TIMESHARE &&
948 ts->ts_thread->td_priority >= PRI_MIN_TIMESHARE,
949 ("tdq_choose: Invalid priority on timeshare queue %d",
950 ts->ts_thread->td_priority));
954 ts = runq_choose(&tdq->tdq_idle);
956 KASSERT(ts->ts_thread->td_priority >= PRI_MIN_IDLE,
957 ("tdq_choose: Invalid priority on idle queue %d",
958 ts->ts_thread->td_priority));
966 tdq_setup(struct tdq *tdq)
968 runq_init(&tdq->tdq_realtime);
969 runq_init(&tdq->tdq_timeshare);
970 runq_init(&tdq->tdq_idle);
975 sched_setup(void *dummy)
982 * To avoid divide-by-zero, we set realstathz a dummy value
983 * in case which sched_clock() called before sched_initticks().
986 sched_slice = (realstathz/10); /* ~100ms */
987 tickincr = 1 << SCHED_TICK_SHIFT;
992 * Initialize the tdqs.
994 for (i = 0; i < MAXCPU; i++) {
998 tdq_setup(&tdq_cpu[i]);
1001 struct tdq_group *tdg;
1005 for (cpus = 0, i = 0; i < MAXCPU; i++) {
1009 tdg = &tdq_groups[cpus];
1011 * Setup a tdq group with one member.
1013 tdq->tdq_transferable = 0;
1014 tdq->tdq_group = tdg;
1016 tdg->tdg_idlemask = 0;
1017 tdg->tdg_cpumask = tdg->tdg_mask = 1 << i;
1019 tdg->tdg_transferable = 0;
1020 LIST_INIT(&tdg->tdg_members);
1021 LIST_INSERT_HEAD(&tdg->tdg_members, tdq, tdq_siblings);
1024 tdg_maxid = cpus - 1;
1026 struct tdq_group *tdg;
1027 struct cpu_group *cg;
1030 for (i = 0; i < smp_topology->ct_count; i++) {
1031 cg = &smp_topology->ct_group[i];
1032 tdg = &tdq_groups[i];
1034 * Initialize the group.
1036 tdg->tdg_idlemask = 0;
1038 tdg->tdg_transferable = 0;
1039 tdg->tdg_cpus = cg->cg_count;
1040 tdg->tdg_cpumask = cg->cg_mask;
1041 LIST_INIT(&tdg->tdg_members);
1043 * Find all of the group members and add them.
1045 for (j = 0; j < MAXCPU; j++) {
1046 if ((cg->cg_mask & (1 << j)) != 0) {
1047 if (tdg->tdg_mask == 0)
1048 tdg->tdg_mask = 1 << j;
1049 tdq_cpu[j].tdq_transferable = 0;
1050 tdq_cpu[j].tdq_group = tdg;
1051 LIST_INSERT_HEAD(&tdg->tdg_members,
1052 &tdq_cpu[j], tdq_siblings);
1055 if (tdg->tdg_cpus > 1)
1058 tdg_maxid = smp_topology->ct_count - 1;
1061 * Stagger the group and global load balancer so they do not
1062 * interfere with each other.
1064 bal_tick = ticks + hz;
1066 gbal_tick = ticks + (hz / 2);
1068 tdq_setup(TDQ_SELF());
1070 mtx_lock_spin(&sched_lock);
1071 tdq_load_add(TDQ_SELF(), &td_sched0);
1072 mtx_unlock_spin(&sched_lock);
1077 sched_initticks(void *dummy)
1079 mtx_lock_spin(&sched_lock);
1080 realstathz = stathz ? stathz : hz;
1081 sched_slice = (realstathz/10); /* ~100ms */
1084 * tickincr is shifted out by 10 to avoid rounding errors due to
1085 * hz not being evenly divisible by stathz on all platforms.
1087 tickincr = (hz << SCHED_TICK_SHIFT) / realstathz;
1089 * This does not work for values of stathz that are more than
1090 * 1 << SCHED_TICK_SHIFT * hz. In practice this does not happen.
1095 affinity = SCHED_AFFINITY_DEFAULT;
1097 mtx_unlock_spin(&sched_lock);
1102 * Scale the scheduling priority according to the "interactivity" of this
1106 sched_priority(struct thread *td)
1111 if (td->td_pri_class != PRI_TIMESHARE)
1114 * If the score is interactive we place the thread in the realtime
1115 * queue with a priority that is less than kernel and interrupt
1116 * priorities. These threads are not subject to nice restrictions.
1118 * Scores greater than this are placed on the normal realtime queue
1119 * where the priority is partially decided by the most recent cpu
1120 * utilization and the rest is decided by nice value.
1122 score = sched_interact_score(td);
1123 if (score < sched_interact) {
1124 pri = PRI_MIN_REALTIME;
1125 pri += ((PRI_MAX_REALTIME - PRI_MIN_REALTIME) / sched_interact)
1127 KASSERT(pri >= PRI_MIN_REALTIME && pri <= PRI_MAX_REALTIME,
1128 ("sched_priority: invalid interactive priority %d score %d",
1131 pri = SCHED_PRI_MIN;
1132 if (td->td_sched->ts_ticks)
1133 pri += SCHED_PRI_TICKS(td->td_sched);
1134 pri += SCHED_PRI_NICE(td->td_proc->p_nice);
1135 if (!(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE)) {
1136 static int once = 1;
1138 printf("sched_priority: invalid priority %d",
1140 printf("nice %d, ticks %d ftick %d ltick %d tick pri %d\n",
1141 td->td_proc->p_nice,
1142 td->td_sched->ts_ticks,
1143 td->td_sched->ts_ftick,
1144 td->td_sched->ts_ltick,
1145 SCHED_PRI_TICKS(td->td_sched));
1148 pri = min(max(pri, PRI_MIN_TIMESHARE),
1152 sched_user_prio(td, pri);
1158 * This routine enforces a maximum limit on the amount of scheduling history
1159 * kept. It is called after either the slptime or runtime is adjusted.
1162 sched_interact_update(struct thread *td)
1164 struct td_sched *ts;
1168 sum = ts->skg_runtime + ts->skg_slptime;
1169 if (sum < SCHED_SLP_RUN_MAX)
1172 * This only happens from two places:
1173 * 1) We have added an unusual amount of run time from fork_exit.
1174 * 2) We have added an unusual amount of sleep time from sched_sleep().
1176 if (sum > SCHED_SLP_RUN_MAX * 2) {
1177 if (ts->skg_runtime > ts->skg_slptime) {
1178 ts->skg_runtime = SCHED_SLP_RUN_MAX;
1179 ts->skg_slptime = 1;
1181 ts->skg_slptime = SCHED_SLP_RUN_MAX;
1182 ts->skg_runtime = 1;
1187 * If we have exceeded by more than 1/5th then the algorithm below
1188 * will not bring us back into range. Dividing by two here forces
1189 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1191 if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1192 ts->skg_runtime /= 2;
1193 ts->skg_slptime /= 2;
1196 ts->skg_runtime = (ts->skg_runtime / 5) * 4;
1197 ts->skg_slptime = (ts->skg_slptime / 5) * 4;
1201 sched_interact_fork(struct thread *td)
1206 sum = td->td_sched->skg_runtime + td->td_sched->skg_slptime;
1207 if (sum > SCHED_SLP_RUN_FORK) {
1208 ratio = sum / SCHED_SLP_RUN_FORK;
1209 td->td_sched->skg_runtime /= ratio;
1210 td->td_sched->skg_slptime /= ratio;
1215 sched_interact_score(struct thread *td)
1219 if (td->td_sched->skg_runtime > td->td_sched->skg_slptime) {
1220 div = max(1, td->td_sched->skg_runtime / SCHED_INTERACT_HALF);
1221 return (SCHED_INTERACT_HALF +
1222 (SCHED_INTERACT_HALF - (td->td_sched->skg_slptime / div)));
1224 if (td->td_sched->skg_slptime > td->td_sched->skg_runtime) {
1225 div = max(1, td->td_sched->skg_slptime / SCHED_INTERACT_HALF);
1226 return (td->td_sched->skg_runtime / div);
1228 /* runtime == slptime */
1229 if (td->td_sched->skg_runtime)
1230 return (SCHED_INTERACT_HALF);
1233 * This can happen if slptime and runtime are 0.
1240 * Called from proc0_init() to bootstrap the scheduler.
1247 * Set up the scheduler specific parts of proc0.
1249 proc0.p_sched = NULL; /* XXX */
1250 thread0.td_sched = &td_sched0;
1251 td_sched0.ts_ltick = ticks;
1252 td_sched0.ts_ftick = ticks;
1253 td_sched0.ts_thread = &thread0;
1257 * This is only somewhat accurate since given many processes of the same
1258 * priority they will switch when their slices run out, which will be
1259 * at most sched_slice stathz ticks.
1262 sched_rr_interval(void)
1265 /* Convert sched_slice to hz */
1266 return (hz/(realstathz/sched_slice));
1270 sched_pctcpu_update(struct td_sched *ts)
1273 if (ts->ts_ticks == 0)
1275 if (ticks - (hz / 10) < ts->ts_ltick &&
1276 SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX)
1279 * Adjust counters and watermark for pctcpu calc.
1281 if (ts->ts_ltick > ticks - SCHED_TICK_TARG)
1282 ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) *
1286 ts->ts_ltick = ticks;
1287 ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG;
1291 sched_thread_priority(struct thread *td, u_char prio)
1293 struct td_sched *ts;
1295 CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
1296 td, td->td_proc->p_comm, td->td_priority, prio, curthread,
1297 curthread->td_proc->p_comm);
1299 mtx_assert(&sched_lock, MA_OWNED);
1300 if (td->td_priority == prio)
1303 if (TD_ON_RUNQ(td) && prio < td->td_priority) {
1305 * If the priority has been elevated due to priority
1306 * propagation, we may have to move ourselves to a new
1307 * queue. This could be optimized to not re-add in some
1311 td->td_priority = prio;
1312 sched_add(td, SRQ_BORROWING);
1314 td->td_priority = prio;
1318 * Update a thread's priority when it is lent another thread's
1322 sched_lend_prio(struct thread *td, u_char prio)
1325 td->td_flags |= TDF_BORROWING;
1326 sched_thread_priority(td, prio);
1330 * Restore a thread's priority when priority propagation is
1331 * over. The prio argument is the minimum priority the thread
1332 * needs to have to satisfy other possible priority lending
1333 * requests. If the thread's regular priority is less
1334 * important than prio, the thread will keep a priority boost
1338 sched_unlend_prio(struct thread *td, u_char prio)
1342 if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
1343 td->td_base_pri <= PRI_MAX_TIMESHARE)
1344 base_pri = td->td_user_pri;
1346 base_pri = td->td_base_pri;
1347 if (prio >= base_pri) {
1348 td->td_flags &= ~TDF_BORROWING;
1349 sched_thread_priority(td, base_pri);
1351 sched_lend_prio(td, prio);
1355 sched_prio(struct thread *td, u_char prio)
1359 /* First, update the base priority. */
1360 td->td_base_pri = prio;
1363 * If the thread is borrowing another thread's priority, don't
1364 * ever lower the priority.
1366 if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
1369 /* Change the real priority. */
1370 oldprio = td->td_priority;
1371 sched_thread_priority(td, prio);
1374 * If the thread is on a turnstile, then let the turnstile update
1377 if (TD_ON_LOCK(td) && oldprio != prio)
1378 turnstile_adjust(td, oldprio);
1382 sched_user_prio(struct thread *td, u_char prio)
1386 td->td_base_user_pri = prio;
1387 if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio)
1389 oldprio = td->td_user_pri;
1390 td->td_user_pri = prio;
1392 if (TD_ON_UPILOCK(td) && oldprio != prio)
1393 umtx_pi_adjust(td, oldprio);
1397 sched_lend_user_prio(struct thread *td, u_char prio)
1401 td->td_flags |= TDF_UBORROWING;
1403 oldprio = td->td_user_pri;
1404 td->td_user_pri = prio;
1406 if (TD_ON_UPILOCK(td) && oldprio != prio)
1407 umtx_pi_adjust(td, oldprio);
1411 sched_unlend_user_prio(struct thread *td, u_char prio)
1415 base_pri = td->td_base_user_pri;
1416 if (prio >= base_pri) {
1417 td->td_flags &= ~TDF_UBORROWING;
1418 sched_user_prio(td, base_pri);
1420 sched_lend_user_prio(td, prio);
1424 sched_switch(struct thread *td, struct thread *newtd, int flags)
1427 struct td_sched *ts;
1430 mtx_assert(&sched_lock, MA_OWNED);
1432 preempt = flags & SW_PREEMPT;
1435 td->td_lastcpu = td->td_oncpu;
1436 td->td_oncpu = NOCPU;
1437 td->td_flags &= ~TDF_NEEDRESCHED;
1438 td->td_owepreempt = 0;
1440 * If the thread has been assigned it may be in the process of switching
1441 * to the new cpu. This is the case in sched_bind().
1443 if (TD_IS_IDLETHREAD(td)) {
1446 tdq_load_rem(tdq, ts);
1447 if (TD_IS_RUNNING(td)) {
1449 * Don't allow the thread to migrate
1450 * from a preemption.
1454 sched_add(td, preempt ?
1455 SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1456 SRQ_OURSELF|SRQ_YIELDING);
1461 if (newtd != NULL) {
1463 * If we bring in a thread account for it as if it had been
1464 * added to the run queue and then chosen.
1466 TD_SET_RUNNING(newtd);
1467 tdq_load_add(TDQ_SELF(), newtd->td_sched);
1469 newtd = choosethread();
1472 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1473 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1476 cpu_switch(td, newtd);
1478 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1479 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1482 sched_lock.mtx_lock = (uintptr_t)td;
1483 td->td_oncpu = PCPU_GET(cpuid);
1487 sched_nice(struct proc *p, int nice)
1491 PROC_LOCK_ASSERT(p, MA_OWNED);
1492 mtx_assert(&sched_lock, MA_OWNED);
1495 FOREACH_THREAD_IN_PROC(p, td) {
1497 sched_prio(td, td->td_base_user_pri);
1502 sched_sleep(struct thread *td)
1505 mtx_assert(&sched_lock, MA_OWNED);
1507 td->td_sched->ts_slptime = ticks;
1511 sched_wakeup(struct thread *td)
1513 struct td_sched *ts;
1516 mtx_assert(&sched_lock, MA_OWNED);
1519 * If we slept for more than a tick update our interactivity and
1522 slptime = ts->ts_slptime;
1524 if (slptime && slptime != ticks) {
1527 hzticks = (ticks - slptime) << SCHED_TICK_SHIFT;
1528 ts->skg_slptime += hzticks;
1529 sched_interact_update(td);
1530 sched_pctcpu_update(ts);
1533 /* Reset the slice value after we sleep. */
1534 ts->ts_slice = sched_slice;
1535 sched_add(td, SRQ_BORING);
1539 * Penalize the parent for creating a new child and initialize the child's
1543 sched_fork(struct thread *td, struct thread *child)
1545 mtx_assert(&sched_lock, MA_OWNED);
1546 sched_fork_thread(td, child);
1548 * Penalize the parent and child for forking.
1550 sched_interact_fork(child);
1551 sched_priority(child);
1552 td->td_sched->skg_runtime += tickincr;
1553 sched_interact_update(td);
1558 sched_fork_thread(struct thread *td, struct thread *child)
1560 struct td_sched *ts;
1561 struct td_sched *ts2;
1566 sched_newthread(child);
1568 ts2 = child->td_sched;
1569 ts2->ts_cpu = ts->ts_cpu;
1570 ts2->ts_runq = NULL;
1572 * Grab our parents cpu estimation information and priority.
1574 ts2->ts_ticks = ts->ts_ticks;
1575 ts2->ts_ltick = ts->ts_ltick;
1576 ts2->ts_ftick = ts->ts_ftick;
1577 child->td_user_pri = td->td_user_pri;
1578 child->td_base_user_pri = td->td_base_user_pri;
1580 * And update interactivity score.
1582 ts2->skg_slptime = ts->skg_slptime;
1583 ts2->skg_runtime = ts->skg_runtime;
1584 ts2->ts_slice = 1; /* Attempt to quickly learn interactivity. */
1588 sched_class(struct thread *td, int class)
1591 mtx_assert(&sched_lock, MA_OWNED);
1592 if (td->td_pri_class == class)
1597 * On SMP if we're on the RUNQ we must adjust the transferable
1598 * count because could be changing to or from an interrupt
1601 if (TD_ON_RUNQ(td)) {
1604 tdq = TDQ_CPU(td->td_sched->ts_cpu);
1605 if (THREAD_CAN_MIGRATE(td)) {
1606 tdq->tdq_transferable--;
1607 tdq->tdq_group->tdg_transferable--;
1609 td->td_pri_class = class;
1610 if (THREAD_CAN_MIGRATE(td)) {
1611 tdq->tdq_transferable++;
1612 tdq->tdq_group->tdg_transferable++;
1616 td->td_pri_class = class;
1620 * Return some of the child's priority and interactivity to the parent.
1623 sched_exit(struct proc *p, struct thread *child)
1627 CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d",
1628 child, child->td_proc->p_comm, child->td_priority);
1630 td = FIRST_THREAD_IN_PROC(p);
1631 sched_exit_thread(td, child);
1635 sched_exit_thread(struct thread *td, struct thread *child)
1638 CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
1639 child, child->td_proc->p_comm, child->td_priority);
1641 tdq_load_rem(TDQ_CPU(child->td_sched->ts_cpu), child->td_sched);
1644 * KSE forks and exits so often that this penalty causes short-lived
1645 * threads to always be non-interactive. This causes mozilla to
1648 if ((td->td_pflags & TDP_SA) && td->td_proc == child->td_proc)
1652 * Give the child's runtime to the parent without returning the
1653 * sleep time as a penalty to the parent. This causes shells that
1654 * launch expensive things to mark their children as expensive.
1656 td->td_sched->skg_runtime += child->td_sched->skg_runtime;
1657 sched_interact_update(td);
1662 sched_userret(struct thread *td)
1665 * XXX we cheat slightly on the locking here to avoid locking in
1666 * the usual case. Setting td_priority here is essentially an
1667 * incomplete workaround for not setting it properly elsewhere.
1668 * Now that some interrupt handlers are threads, not setting it
1669 * properly elsewhere can clobber it in the window between setting
1670 * it here and returning to user mode, so don't waste time setting
1671 * it perfectly here.
1673 KASSERT((td->td_flags & TDF_BORROWING) == 0,
1674 ("thread with borrowed priority returning to userland"));
1675 if (td->td_priority != td->td_user_pri) {
1676 mtx_lock_spin(&sched_lock);
1677 td->td_priority = td->td_user_pri;
1678 td->td_base_pri = td->td_user_pri;
1679 mtx_unlock_spin(&sched_lock);
1684 sched_clock(struct thread *td)
1687 struct td_sched *ts;
1689 mtx_assert(&sched_lock, MA_OWNED);
1695 * Advance the insert index once for each tick to ensure that all
1696 * threads get a chance to run.
1698 if (tdq->tdq_idx == tdq->tdq_ridx) {
1699 tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS;
1700 if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx]))
1701 tdq->tdq_ridx = tdq->tdq_idx;
1705 * We only do slicing code for TIMESHARE threads.
1707 if (td->td_pri_class != PRI_TIMESHARE)
1710 * We used a tick; charge it to the thread so that we can compute our
1713 td->td_sched->skg_runtime += tickincr;
1714 sched_interact_update(td);
1716 * We used up one time slice.
1718 if (--ts->ts_slice > 0)
1721 * We're out of time, recompute priorities and requeue.
1724 td->td_flags |= TDF_NEEDRESCHED;
1728 sched_runnable(void)
1740 if ((curthread->td_flags & TDF_IDLETD) != 0) {
1741 if (tdq->tdq_load > 0)
1744 if (tdq->tdq_load - 1 > 0)
1755 struct td_sched *ts;
1757 mtx_assert(&sched_lock, MA_OWNED);
1762 ts = tdq_choose(tdq);
1765 if (ts->ts_thread->td_priority > PRI_MIN_IDLE)
1766 if (tdq_idled(tdq) == 0)
1769 tdq_runq_rem(tdq, ts);
1770 return (ts->ts_thread);
1773 if (tdq_idled(tdq) == 0)
1776 return (PCPU_GET(idlethread));
1780 sched_preempt(struct thread *td)
1787 pri = td->td_priority;
1788 cpri = ctd->td_priority;
1789 if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd))
1792 * Always preempt IDLE threads. Otherwise only if the preempting
1793 * thread is an ithread.
1795 if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
1797 if (ctd->td_critnest > 1) {
1798 CTR1(KTR_PROC, "sched_preempt: in critical section %d",
1800 ctd->td_owepreempt = 1;
1804 * Thread is runnable but not yet put on system run queue.
1806 MPASS(TD_ON_RUNQ(td));
1808 CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
1809 td->td_proc->p_pid, td->td_proc->p_comm);
1810 mi_switch(SW_INVOL|SW_PREEMPT, td);
1815 sched_add(struct thread *td, int flags)
1818 struct td_sched *ts;
1827 mtx_assert(&sched_lock, MA_OWNED);
1828 CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
1829 td, td->td_proc->p_comm, td->td_priority, curthread,
1830 curthread->td_proc->p_comm);
1831 KASSERT((td->td_inhibitors == 0),
1832 ("sched_add: trying to run inhibited thread"));
1833 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
1834 ("sched_add: bad thread state"));
1835 KASSERT(td->td_proc->p_sflag & PS_INMEM,
1836 ("sched_add: process swapped out"));
1837 KASSERT(ts->ts_runq == NULL,
1838 ("sched_add: thread %p is still assigned to a run queue", td));
1841 class = PRI_BASE(td->td_pri_class);
1842 preemptive = !(flags & SRQ_YIELDING);
1844 * Recalculate the priority before we select the target cpu or
1847 if (class == PRI_TIMESHARE)
1849 if (ts->ts_slice == 0)
1850 ts->ts_slice = sched_slice;
1852 cpuid = PCPU_GET(cpuid);
1854 * Pick the destination cpu and if it isn't ours transfer to the
1857 if (THREAD_CAN_MIGRATE(td)) {
1858 if (td->td_priority <= PRI_MAX_ITHD) {
1859 CTR2(KTR_ULE, "ithd %d < %d",
1860 td->td_priority, PRI_MAX_ITHD);
1862 } else if (pick_pri)
1863 ts->ts_cpu = tdq_pickpri(tdq, ts, flags);
1865 ts->ts_cpu = tdq_pickidle(tdq, ts);
1867 CTR1(KTR_ULE, "pinned %d", td->td_pinned);
1868 if (ts->ts_cpu != cpuid)
1870 tdq = TDQ_CPU(ts->ts_cpu);
1871 cpumask = 1 << ts->ts_cpu;
1873 * If we had been idle, clear our bit in the group and potentially
1874 * the global bitmap.
1876 if ((class != PRI_IDLE && class != PRI_ITHD) &&
1877 (tdq->tdq_group->tdg_idlemask & cpumask) != 0) {
1879 * Check to see if our group is unidling, and if so, remove it
1880 * from the global idle mask.
1882 if (tdq->tdq_group->tdg_idlemask ==
1883 tdq->tdq_group->tdg_cpumask)
1884 atomic_clear_int(&tdq_idle, tdq->tdq_group->tdg_mask);
1886 * Now remove ourselves from the group specific idle mask.
1888 tdq->tdq_group->tdg_idlemask &= ~cpumask;
1892 * Pick the run queue based on priority.
1894 if (td->td_priority <= PRI_MAX_REALTIME)
1895 ts->ts_runq = &tdq->tdq_realtime;
1896 else if (td->td_priority <= PRI_MAX_TIMESHARE)
1897 ts->ts_runq = &tdq->tdq_timeshare;
1899 ts->ts_runq = &tdq->tdq_idle;
1900 if (preemptive && sched_preempt(td))
1902 tdq_runq_add(tdq, ts, flags);
1903 tdq_load_add(tdq, ts);
1905 if (ts->ts_cpu != cpuid) {
1910 if (td->td_priority < curthread->td_priority)
1911 curthread->td_flags |= TDF_NEEDRESCHED;
1915 sched_rem(struct thread *td)
1918 struct td_sched *ts;
1920 CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
1921 td, td->td_proc->p_comm, td->td_priority, curthread,
1922 curthread->td_proc->p_comm);
1923 mtx_assert(&sched_lock, MA_OWNED);
1925 KASSERT(TD_ON_RUNQ(td),
1926 ("sched_rem: thread not on run queue"));
1928 tdq = TDQ_CPU(ts->ts_cpu);
1929 tdq_runq_rem(tdq, ts);
1930 tdq_load_rem(tdq, ts);
1935 sched_pctcpu(struct thread *td)
1938 struct td_sched *ts;
1945 mtx_lock_spin(&sched_lock);
1949 sched_pctcpu_update(ts);
1950 /* How many rtick per second ? */
1951 rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz);
1952 pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT;
1954 td->td_proc->p_swtime = ts->ts_ltick - ts->ts_ftick;
1955 mtx_unlock_spin(&sched_lock);
1961 sched_bind(struct thread *td, int cpu)
1963 struct td_sched *ts;
1965 mtx_assert(&sched_lock, MA_OWNED);
1967 if (ts->ts_flags & TSF_BOUND)
1969 ts->ts_flags |= TSF_BOUND;
1972 if (PCPU_GET(cpuid) == cpu)
1975 /* When we return from mi_switch we'll be on the correct cpu. */
1976 mi_switch(SW_VOL, NULL);
1981 sched_unbind(struct thread *td)
1983 struct td_sched *ts;
1985 mtx_assert(&sched_lock, MA_OWNED);
1987 if ((ts->ts_flags & TSF_BOUND) == 0)
1989 ts->ts_flags &= ~TSF_BOUND;
1996 sched_is_bound(struct thread *td)
1998 mtx_assert(&sched_lock, MA_OWNED);
1999 return (td->td_sched->ts_flags & TSF_BOUND);
2003 sched_relinquish(struct thread *td)
2005 mtx_lock_spin(&sched_lock);
2006 if (td->td_pri_class == PRI_TIMESHARE)
2007 sched_prio(td, PRI_MAX_TIMESHARE);
2008 mi_switch(SW_VOL, NULL);
2009 mtx_unlock_spin(&sched_lock);
2020 for (i = 0; i <= tdg_maxid; i++)
2021 total += TDQ_GROUP(i)->tdg_load;
2024 return (TDQ_SELF()->tdq_sysload);
2029 sched_sizeof_proc(void)
2031 return (sizeof(struct proc));
2035 sched_sizeof_thread(void)
2037 return (sizeof(struct thread) + sizeof(struct td_sched));
2043 struct td_sched *ts;
2045 ts = curthread->td_sched;
2046 /* Adjust ticks for pctcpu */
2047 ts->ts_ticks += 1 << SCHED_TICK_SHIFT;
2048 ts->ts_ltick = ticks;
2050 * Update if we've exceeded our desired tick threshhold by over one
2053 if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick)
2054 sched_pctcpu_update(ts);
2058 * The actual idle process.
2061 sched_idletd(void *dummy)
2068 mtx_assert(&Giant, MA_NOTOWNED);
2069 /* ULE Relies on preemption for idle interruption. */
2074 static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");
2075 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ule", 0,
2077 SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, "");
2078 SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0, "");
2079 SYSCTL_INT(_kern_sched, OID_AUTO, tickincr, CTLFLAG_RD, &tickincr, 0, "");
2080 SYSCTL_INT(_kern_sched, OID_AUTO, realstathz, CTLFLAG_RD, &realstathz, 0, "");
2082 SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri, CTLFLAG_RW, &pick_pri, 0, "");
2083 SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri_affinity, CTLFLAG_RW,
2085 SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri_tryself, CTLFLAG_RW,
2087 SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri_tryselfidle, CTLFLAG_RW,
2088 &tryselfidle, 0, "");
2089 SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0, "");
2090 SYSCTL_INT(_kern_sched, OID_AUTO, ipi_preempt, CTLFLAG_RW, &ipi_preempt, 0, "");
2091 SYSCTL_INT(_kern_sched, OID_AUTO, ipi_ast, CTLFLAG_RW, &ipi_ast, 0, "");
2092 SYSCTL_INT(_kern_sched, OID_AUTO, ipi_thresh, CTLFLAG_RW, &ipi_thresh, 0, "");
2093 SYSCTL_INT(_kern_sched, OID_AUTO, steal_htt, CTLFLAG_RW, &steal_htt, 0, "");
2094 SYSCTL_INT(_kern_sched, OID_AUTO, steal_busy, CTLFLAG_RW, &steal_busy, 0, "");
2095 SYSCTL_INT(_kern_sched, OID_AUTO, busy_thresh, CTLFLAG_RW, &busy_thresh, 0, "");
2099 static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
2100 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
2103 #define KERN_SWITCH_INCLUDE 1
2104 #include "kern/kern_switch.c"