2 * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * This file implements the ULE scheduler. ULE supports independent CPU
29 * run queues and fine grain locking. It has superior interactive
30 * performance under load even on uni-processor systems.
33 * ULE is the last three letters in schedule. It owes its name to a
34 * generic user created for a scheduling system by Paul Mikesell at
35 * Isilon Systems and a general lack of creativity on the part of the author.
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
41 #include "opt_hwpmc_hooks.h"
42 #include "opt_sched.h"
44 #include <sys/param.h>
45 #include <sys/systm.h>
47 #include <sys/kernel.h>
50 #include <sys/mutex.h>
52 #include <sys/resource.h>
53 #include <sys/resourcevar.h>
54 #include <sys/sched.h>
57 #include <sys/sysctl.h>
58 #include <sys/sysproto.h>
59 #include <sys/turnstile.h>
61 #include <sys/vmmeter.h>
64 #include <sys/ktrace.h>
68 #include <sys/pmckern.h>
71 #include <machine/cpu.h>
72 #include <machine/smp.h>
74 #if !defined(__i386__) && !defined(__amd64__) && !defined(__powerpc__) && !defined(__arm__)
75 #error "This architecture is not currently compatible with ULE"
81 * Thread scheduler specific section. All fields are protected
85 TAILQ_ENTRY(td_sched) ts_procq; /* Run queue. */
86 struct thread *ts_thread; /* Active associated thread. */
87 struct runq *ts_runq; /* Run-queue we're queued on. */
88 short ts_flags; /* TSF_* flags. */
89 u_char ts_rqindex; /* Run queue index. */
90 u_char ts_cpu; /* CPU that we have affinity for. */
91 int ts_slice; /* Ticks of slice remaining. */
92 u_int ts_slptime; /* Number of ticks we vol. slept */
93 u_int ts_runtime; /* Number of ticks we were running */
94 /* The following variables are only used for pctcpu calculation */
95 int ts_ltick; /* Last tick that we were running on */
96 int ts_ftick; /* First tick that we were running on */
97 int ts_ticks; /* Tick count */
99 int ts_rltick; /* Real last tick, for affinity. */
102 /* flags kept in ts_flags */
103 #define TSF_BOUND 0x0001 /* Thread can not migrate. */
104 #define TSF_XFERABLE 0x0002 /* Thread was added as transferable. */
106 static struct td_sched td_sched0;
109 * Cpu percentage computation macros and defines.
111 * SCHED_TICK_SECS: Number of seconds to average the cpu usage across.
112 * SCHED_TICK_TARG: Number of hz ticks to average the cpu usage across.
113 * SCHED_TICK_MAX: Maximum number of ticks before scaling back.
114 * SCHED_TICK_SHIFT: Shift factor to avoid rounding away results.
115 * SCHED_TICK_HZ: Compute the number of hz ticks for a given ticks count.
116 * SCHED_TICK_TOTAL: Gives the amount of time we've been recording ticks.
118 #define SCHED_TICK_SECS 10
119 #define SCHED_TICK_TARG (hz * SCHED_TICK_SECS)
120 #define SCHED_TICK_MAX (SCHED_TICK_TARG + hz)
121 #define SCHED_TICK_SHIFT 10
122 #define SCHED_TICK_HZ(ts) ((ts)->ts_ticks >> SCHED_TICK_SHIFT)
123 #define SCHED_TICK_TOTAL(ts) (max((ts)->ts_ltick - (ts)->ts_ftick, hz))
126 * These macros determine priorities for non-interactive threads. They are
127 * assigned a priority based on their recent cpu utilization as expressed
128 * by the ratio of ticks to the tick total. NHALF priorities at the start
129 * and end of the MIN to MAX timeshare range are only reachable with negative
130 * or positive nice respectively.
132 * PRI_RANGE: Priority range for utilization dependent priorities.
133 * PRI_NRESV: Number of nice values.
134 * PRI_TICKS: Compute a priority in PRI_RANGE from the ticks count and total.
135 * PRI_NICE: Determines the part of the priority inherited from nice.
137 #define SCHED_PRI_NRESV (PRIO_MAX - PRIO_MIN)
138 #define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2)
139 #define SCHED_PRI_MIN (PRI_MIN_TIMESHARE + SCHED_PRI_NHALF)
140 #define SCHED_PRI_MAX (PRI_MAX_TIMESHARE - SCHED_PRI_NHALF)
141 #define SCHED_PRI_RANGE (SCHED_PRI_MAX - SCHED_PRI_MIN)
142 #define SCHED_PRI_TICKS(ts) \
143 (SCHED_TICK_HZ((ts)) / \
144 (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE))
145 #define SCHED_PRI_NICE(nice) (nice)
148 * These determine the interactivity of a process. Interactivity differs from
149 * cpu utilization in that it expresses the voluntary time slept vs time ran
150 * while cpu utilization includes all time not running. This more accurately
151 * models the intent of the thread.
153 * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate
154 * before throttling back.
155 * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time.
156 * INTERACT_MAX: Maximum interactivity value. Smaller is better.
157 * INTERACT_THRESH: Threshhold for placement on the current runq.
159 #define SCHED_SLP_RUN_MAX ((hz * 5) << SCHED_TICK_SHIFT)
160 #define SCHED_SLP_RUN_FORK ((hz / 2) << SCHED_TICK_SHIFT)
161 #define SCHED_INTERACT_MAX (100)
162 #define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2)
163 #define SCHED_INTERACT_THRESH (30)
166 * tickincr: Converts a stathz tick into a hz domain scaled by
167 * the shift factor. Without the shift the error rate
168 * due to rounding would be unacceptably high.
169 * realstathz: stathz is sometimes 0 and run off of hz.
170 * sched_slice: Runtime of each thread before rescheduling.
171 * preempt_thresh: Priority threshold for preemption and remote IPIs.
173 static int sched_interact = SCHED_INTERACT_THRESH;
174 static int realstathz;
176 static int sched_slice;
178 #ifdef FULL_PREEMPTION
179 static int preempt_thresh = PRI_MAX_IDLE;
181 static int preempt_thresh = PRI_MIN_KERN;
184 static int preempt_thresh = 0;
188 * tdq - per processor runqs and statistics. All fields are protected by the
189 * tdq_lock. The load and lowpri may be accessed without to avoid excess
190 * locking in sched_pickcpu();
193 struct mtx *tdq_lock; /* Pointer to group lock. */
194 struct runq tdq_realtime; /* real-time run queue. */
195 struct runq tdq_timeshare; /* timeshare run queue. */
196 struct runq tdq_idle; /* Queue of IDLE threads. */
197 int tdq_load; /* Aggregate load. */
198 u_char tdq_idx; /* Current insert index. */
199 u_char tdq_ridx; /* Current removal index. */
201 u_char tdq_lowpri; /* Lowest priority thread. */
202 int tdq_transferable; /* Transferable thread count. */
203 LIST_ENTRY(tdq) tdq_siblings; /* Next in tdq group. */
204 struct tdq_group *tdq_group; /* Our processor group. */
206 int tdq_sysload; /* For loadavg, !ITHD load. */
213 * tdq groups are groups of processors which can cheaply share threads. When
214 * one processor in the group goes idle it will check the runqs of the other
215 * processors in its group prior to halting and waiting for an interrupt.
216 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA.
217 * In a numa environment we'd want an idle bitmap per group and a two tiered
221 struct mtx tdg_lock; /* Protects all fields below. */
222 int tdg_cpus; /* Count of CPUs in this tdq group. */
223 cpumask_t tdg_cpumask; /* Mask of cpus in this group. */
224 cpumask_t tdg_idlemask; /* Idle cpus in this group. */
225 cpumask_t tdg_mask; /* Bit mask for first cpu. */
226 int tdg_load; /* Total load of this group. */
227 int tdg_transferable; /* Transferable load of this group. */
228 LIST_HEAD(, tdq) tdg_members; /* Linked list of all members. */
229 char tdg_name[16]; /* lock name. */
232 #define SCHED_AFFINITY_DEFAULT (max(1, hz / 300))
233 #define SCHED_AFFINITY(ts) ((ts)->ts_rltick > ticks - affinity)
238 static int rebalance = 1;
239 static int balance_interval = 128; /* Default set in sched_initticks(). */
240 static int pick_pri = 1;
242 static int tryself = 1;
243 static int steal_htt = 1;
244 static int steal_idle = 1;
245 static int steal_thresh = 2;
246 static int topology = 0;
249 * One thread queue per processor.
251 static volatile cpumask_t tdq_idle;
252 static int tdg_maxid;
253 static struct tdq tdq_cpu[MAXCPU];
254 static struct tdq_group tdq_groups[MAXCPU];
255 static struct tdq *balance_tdq;
256 static int balance_group_ticks;
257 static int balance_ticks;
259 #define TDQ_SELF() (&tdq_cpu[PCPU_GET(cpuid)])
260 #define TDQ_CPU(x) (&tdq_cpu[(x)])
261 #define TDQ_ID(x) ((int)((x) - tdq_cpu))
262 #define TDQ_GROUP(x) (&tdq_groups[(x)])
263 #define TDG_ID(x) ((int)((x) - tdq_groups))
265 static struct tdq tdq_cpu;
266 static struct mtx tdq_lock;
268 #define TDQ_ID(x) (0)
269 #define TDQ_SELF() (&tdq_cpu)
270 #define TDQ_CPU(x) (&tdq_cpu)
273 #define TDQ_LOCK_ASSERT(t, type) mtx_assert(TDQ_LOCKPTR((t)), (type))
274 #define TDQ_LOCK(t) mtx_lock_spin(TDQ_LOCKPTR((t)))
275 #define TDQ_LOCK_FLAGS(t, f) mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f))
276 #define TDQ_UNLOCK(t) mtx_unlock_spin(TDQ_LOCKPTR((t)))
277 #define TDQ_LOCKPTR(t) ((t)->tdq_lock)
279 static void sched_priority(struct thread *);
280 static void sched_thread_priority(struct thread *, u_char);
281 static int sched_interact_score(struct thread *);
282 static void sched_interact_update(struct thread *);
283 static void sched_interact_fork(struct thread *);
284 static void sched_pctcpu_update(struct td_sched *);
286 /* Operations on per processor queues */
287 static struct td_sched * tdq_choose(struct tdq *);
288 static void tdq_setup(struct tdq *);
289 static void tdq_load_add(struct tdq *, struct td_sched *);
290 static void tdq_load_rem(struct tdq *, struct td_sched *);
291 static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int);
292 static __inline void tdq_runq_rem(struct tdq *, struct td_sched *);
293 void tdq_print(int cpu);
294 static void runq_print(struct runq *rq);
295 static void tdq_add(struct tdq *, struct thread *, int);
297 static void tdq_move(struct tdq *, struct tdq *);
298 static int tdq_idled(struct tdq *);
299 static void tdq_notify(struct td_sched *);
300 static struct td_sched *tdq_steal(struct tdq *);
301 static struct td_sched *runq_steal(struct runq *);
302 static int sched_pickcpu(struct td_sched *, int);
303 static void sched_balance(void);
304 static void sched_balance_groups(void);
305 static void sched_balance_group(struct tdq_group *);
306 static void sched_balance_pair(struct tdq *, struct tdq *);
307 static inline struct tdq *sched_setcpu(struct td_sched *, int, int);
308 static inline struct mtx *thread_block_switch(struct thread *);
309 static inline void thread_unblock_switch(struct thread *, struct mtx *);
310 static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int);
312 #define THREAD_CAN_MIGRATE(td) ((td)->td_pinned == 0)
315 static void sched_setup(void *dummy);
316 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
318 static void sched_initticks(void *dummy);
319 SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL)
322 * Print the threads waiting on a run-queue.
325 runq_print(struct runq *rq)
333 for (i = 0; i < RQB_LEN; i++) {
334 printf("\t\trunq bits %d 0x%zx\n",
335 i, rq->rq_status.rqb_bits[i]);
336 for (j = 0; j < RQB_BPW; j++)
337 if (rq->rq_status.rqb_bits[i] & (1ul << j)) {
338 pri = j + (i << RQB_L2BPW);
339 rqh = &rq->rq_queues[pri];
340 TAILQ_FOREACH(ts, rqh, ts_procq) {
341 printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n",
342 ts->ts_thread, ts->ts_thread->td_name, ts->ts_thread->td_priority, ts->ts_rqindex, pri);
349 * Print the status of a per-cpu thread queue. Should be a ddb show cmd.
358 printf("tdq %d:\n", TDQ_ID(tdq));
359 printf("\tlockptr %p\n", TDQ_LOCKPTR(tdq));
360 printf("\tload: %d\n", tdq->tdq_load);
361 printf("\ttimeshare idx: %d\n", tdq->tdq_idx);
362 printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx);
363 printf("\trealtime runq:\n");
364 runq_print(&tdq->tdq_realtime);
365 printf("\ttimeshare runq:\n");
366 runq_print(&tdq->tdq_timeshare);
367 printf("\tidle runq:\n");
368 runq_print(&tdq->tdq_idle);
370 printf("\tload transferable: %d\n", tdq->tdq_transferable);
371 printf("\tlowest priority: %d\n", tdq->tdq_lowpri);
372 printf("\tgroup: %d\n", TDG_ID(tdq->tdq_group));
373 printf("\tLock name: %s\n", tdq->tdq_group->tdg_name);
377 #define TS_RQ_PPQ (((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS)
379 * Add a thread to the actual run-queue. Keeps transferable counts up to
380 * date with what is actually on the run-queue. Selects the correct
381 * queue position for timeshare threads.
384 tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags)
386 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
387 THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
389 if (THREAD_CAN_MIGRATE(ts->ts_thread)) {
390 tdq->tdq_transferable++;
391 tdq->tdq_group->tdg_transferable++;
392 ts->ts_flags |= TSF_XFERABLE;
395 if (ts->ts_runq == &tdq->tdq_timeshare) {
398 pri = ts->ts_thread->td_priority;
399 KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE,
400 ("Invalid priority %d on timeshare runq", pri));
402 * This queue contains only priorities between MIN and MAX
403 * realtime. Use the whole queue to represent these values.
405 if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) {
406 pri = (pri - PRI_MIN_TIMESHARE) / TS_RQ_PPQ;
407 pri = (pri + tdq->tdq_idx) % RQ_NQS;
409 * This effectively shortens the queue by one so we
410 * can have a one slot difference between idx and
411 * ridx while we wait for threads to drain.
413 if (tdq->tdq_ridx != tdq->tdq_idx &&
414 pri == tdq->tdq_ridx)
415 pri = (unsigned char)(pri - 1) % RQ_NQS;
418 runq_add_pri(ts->ts_runq, ts, pri, flags);
420 runq_add(ts->ts_runq, ts, flags);
424 * Remove a thread from a run-queue. This typically happens when a thread
425 * is selected to run. Running threads are not on the queue and the
426 * transferable count does not reflect them.
429 tdq_runq_rem(struct tdq *tdq, struct td_sched *ts)
431 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
432 KASSERT(ts->ts_runq != NULL,
433 ("tdq_runq_remove: thread %p null ts_runq", ts->ts_thread));
435 if (ts->ts_flags & TSF_XFERABLE) {
436 tdq->tdq_transferable--;
437 tdq->tdq_group->tdg_transferable--;
438 ts->ts_flags &= ~TSF_XFERABLE;
441 if (ts->ts_runq == &tdq->tdq_timeshare) {
442 if (tdq->tdq_idx != tdq->tdq_ridx)
443 runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx);
445 runq_remove_idx(ts->ts_runq, ts, NULL);
447 * For timeshare threads we update the priority here so
448 * the priority reflects the time we've been sleeping.
450 ts->ts_ltick = ticks;
451 sched_pctcpu_update(ts);
452 sched_priority(ts->ts_thread);
454 runq_remove(ts->ts_runq, ts);
458 * Load is maintained for all threads RUNNING and ON_RUNQ. Add the load
459 * for this thread to the referenced thread queue.
462 tdq_load_add(struct tdq *tdq, struct td_sched *ts)
466 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
467 THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
468 class = PRI_BASE(ts->ts_thread->td_pri_class);
470 CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load);
471 if (class != PRI_ITHD &&
472 (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
474 tdq->tdq_group->tdg_load++;
481 * Remove the load from a thread that is transitioning to a sleep state or
485 tdq_load_rem(struct tdq *tdq, struct td_sched *ts)
489 THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
490 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
491 class = PRI_BASE(ts->ts_thread->td_pri_class);
492 if (class != PRI_ITHD &&
493 (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
495 tdq->tdq_group->tdg_load--;
499 KASSERT(tdq->tdq_load != 0,
500 ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq)));
502 CTR1(KTR_SCHED, "load: %d", tdq->tdq_load);
508 * sched_balance is a simple CPU load balancing algorithm. It operates by
509 * finding the least loaded and most loaded cpu and equalizing their load
510 * by migrating some processes.
512 * Dealing only with two CPUs at a time has two advantages. Firstly, most
513 * installations will only have 2 cpus. Secondly, load balancing too much at
514 * once can have an unpleasant effect on the system. The scheduler rarely has
515 * enough information to make perfect decisions. So this algorithm chooses
516 * simplicity and more gradual effects on load in larger systems.
522 struct tdq_group *high;
523 struct tdq_group *low;
524 struct tdq_group *tdg;
530 * Select a random time between .5 * balance_interval and
531 * 1.5 * balance_interval.
533 balance_ticks = max(balance_interval / 2, 1);
534 balance_ticks += random() % balance_interval;
535 if (smp_started == 0 || rebalance == 0)
540 i = random() % (tdg_maxid + 1);
541 for (cnt = 0; cnt <= tdg_maxid; cnt++) {
544 * Find the CPU with the highest load that has some
545 * threads to transfer.
547 if ((high == NULL || tdg->tdg_load > high->tdg_load)
548 && tdg->tdg_transferable)
550 if (low == NULL || tdg->tdg_load < low->tdg_load)
555 if (low != NULL && high != NULL && high != low)
556 sched_balance_pair(LIST_FIRST(&high->tdg_members),
557 LIST_FIRST(&low->tdg_members));
562 * Balance load between CPUs in a group. Will only migrate within the group.
565 sched_balance_groups()
571 * Select a random time between .5 * balance_interval and
572 * 1.5 * balance_interval.
574 balance_group_ticks = max(balance_interval / 2, 1);
575 balance_group_ticks += random() % balance_interval;
576 if (smp_started == 0 || rebalance == 0)
580 for (i = 0; i <= tdg_maxid; i++)
581 sched_balance_group(TDQ_GROUP(i));
586 * Finds the greatest imbalance between two tdqs in a group.
589 sched_balance_group(struct tdq_group *tdg)
596 if (tdg->tdg_transferable == 0)
600 LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) {
601 load = tdq->tdq_load;
602 if (high == NULL || load > high->tdq_load)
604 if (low == NULL || load < low->tdq_load)
607 if (high != NULL && low != NULL && high != low)
608 sched_balance_pair(high, low);
612 * Lock two thread queues using their address to maintain lock order.
615 tdq_lock_pair(struct tdq *one, struct tdq *two)
619 TDQ_LOCK_FLAGS(two, MTX_DUPOK);
622 TDQ_LOCK_FLAGS(one, MTX_DUPOK);
627 * Unlock two thread queues. Order is not important here.
630 tdq_unlock_pair(struct tdq *one, struct tdq *two)
637 * Transfer load between two imbalanced thread queues.
640 sched_balance_pair(struct tdq *high, struct tdq *low)
649 tdq_lock_pair(high, low);
651 * If we're transfering within a group we have to use this specific
652 * tdq's transferable count, otherwise we can steal from other members
655 if (high->tdq_group == low->tdq_group) {
656 transferable = high->tdq_transferable;
657 high_load = high->tdq_load;
658 low_load = low->tdq_load;
660 transferable = high->tdq_group->tdg_transferable;
661 high_load = high->tdq_group->tdg_load;
662 low_load = low->tdq_group->tdg_load;
665 * Determine what the imbalance is and then adjust that to how many
666 * threads we actually have to give up (transferable).
668 if (transferable != 0) {
669 diff = high_load - low_load;
673 move = min(move, transferable);
674 for (i = 0; i < move; i++)
677 * IPI the target cpu to force it to reschedule with the new
680 ipi_selected(1 << TDQ_ID(low), IPI_PREEMPT);
682 tdq_unlock_pair(high, low);
687 * Move a thread from one thread queue to another.
690 tdq_move(struct tdq *from, struct tdq *to)
697 TDQ_LOCK_ASSERT(from, MA_OWNED);
698 TDQ_LOCK_ASSERT(to, MA_OWNED);
704 struct tdq_group *tdg;
706 tdg = tdq->tdq_group;
707 LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) {
708 if (tdq == from || tdq->tdq_transferable == 0)
720 * Although the run queue is locked the thread may be blocked. Lock
721 * it to clear this and acquire the run-queue lock.
724 /* Drop recursive lock on from acquired via thread_lock(). */
728 td->td_lock = TDQ_LOCKPTR(to);
729 tdq_add(to, td, SRQ_YIELDING);
733 * This tdq has idled. Try to steal a thread from another cpu and switch
737 tdq_idled(struct tdq *tdq)
739 struct tdq_group *tdg;
745 if (smp_started == 0 || steal_idle == 0)
747 /* We don't want to be preempted while we're iterating over tdqs */
749 tdg = tdq->tdq_group;
751 * If we're in a cpu group, try and steal threads from another cpu in
752 * the group before idling. In a HTT group all cpus share the same
753 * run-queue lock, however, we still need a recursive lock to
756 if (steal_htt && tdg->tdg_cpus > 1 && tdg->tdg_transferable) {
758 LIST_FOREACH(steal, &tdg->tdg_members, tdq_siblings) {
759 if (steal == tdq || steal->tdq_transferable == 0)
767 * Find the least loaded CPU with a transferable thread and attempt
768 * to steal it. We make a lockless pass and then verify that the
769 * thread is still available after locking.
774 for (cpu = 0; cpu <= mp_maxid; cpu++) {
777 steal = TDQ_CPU(cpu);
778 if (steal->tdq_transferable == 0)
780 if (steal->tdq_load < highload)
782 highload = steal->tdq_load;
785 if (highload < steal_thresh)
787 steal = TDQ_CPU(highcpu);
790 tdq_lock_pair(tdq, steal);
791 if (steal->tdq_load >= steal_thresh && steal->tdq_transferable)
793 tdq_unlock_pair(tdq, steal);
799 tdq_move(steal, tdq);
801 mi_switch(SW_VOL, NULL);
802 thread_unlock(curthread);
808 * Notify a remote cpu of new work. Sends an IPI if criteria are met.
811 tdq_notify(struct td_sched *ts)
820 pri = ts->ts_thread->td_priority;
821 pcpu = pcpu_find(cpu);
822 ctd = pcpu->pc_curthread;
823 cpri = ctd->td_priority;
826 * If our priority is not better than the current priority there is
834 if (cpri > PRI_MIN_IDLE)
837 * If we're realtime or better and there is timeshare or worse running
840 if (pri < PRI_MAX_REALTIME && cpri > PRI_MAX_REALTIME)
843 * Otherwise only IPI if we exceed the threshold.
845 if (pri > preempt_thresh)
848 ctd->td_flags |= TDF_NEEDRESCHED;
849 ipi_selected(1 << cpu, IPI_PREEMPT);
853 * Steals load from a timeshare queue. Honors the rotating queue head
856 static struct td_sched *
857 runq_steal_from(struct runq *rq, u_char start)
867 rqb = &rq->rq_status;
868 bit = start & (RQB_BPW -1);
872 for (i = RQB_WORD(start); i < RQB_LEN; bit = 0, i++) {
873 if (rqb->rqb_bits[i] == 0)
876 for (pri = bit; pri < RQB_BPW; pri++)
877 if (rqb->rqb_bits[i] & (1ul << pri))
882 pri = RQB_FFS(rqb->rqb_bits[i]);
883 pri += (i << RQB_L2BPW);
884 rqh = &rq->rq_queues[pri];
885 TAILQ_FOREACH(ts, rqh, ts_procq) {
886 if (first && THREAD_CAN_MIGRATE(ts->ts_thread))
900 * Steals load from a standard linear queue.
902 static struct td_sched *
903 runq_steal(struct runq *rq)
911 rqb = &rq->rq_status;
912 for (word = 0; word < RQB_LEN; word++) {
913 if (rqb->rqb_bits[word] == 0)
915 for (bit = 0; bit < RQB_BPW; bit++) {
916 if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
918 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
919 TAILQ_FOREACH(ts, rqh, ts_procq)
920 if (THREAD_CAN_MIGRATE(ts->ts_thread))
928 * Attempt to steal a thread in priority order from a thread queue.
930 static struct td_sched *
931 tdq_steal(struct tdq *tdq)
935 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
936 if ((ts = runq_steal(&tdq->tdq_realtime)) != NULL)
938 if ((ts = runq_steal_from(&tdq->tdq_timeshare, tdq->tdq_ridx)) != NULL)
940 return (runq_steal(&tdq->tdq_idle));
944 * Sets the thread lock and ts_cpu to match the requested cpu. Unlocks the
945 * current lock and returns with the assigned queue locked.
947 static inline struct tdq *
948 sched_setcpu(struct td_sched *ts, int cpu, int flags)
953 THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
959 /* If the lock matches just return the queue. */
960 if (td->td_lock == TDQ_LOCKPTR(tdq))
964 * If the thread isn't running its lockptr is a
965 * turnstile or a sleepqueue. We can just lock_set without
968 if (TD_CAN_RUN(td)) {
970 thread_lock_set(td, TDQ_LOCKPTR(tdq));
975 * The hard case, migration, we need to block the thread first to
976 * prevent order reversals with other cpus locks.
978 thread_lock_block(td);
980 thread_lock_unblock(td, TDQ_LOCKPTR(tdq));
985 * Find the thread queue running the lowest priority thread.
1000 for (cpu = 0; cpu <= mp_maxid; cpu++) {
1001 if (CPU_ABSENT(cpu))
1004 pri = tdq->tdq_lowpri;
1005 load = TDQ_CPU(cpu)->tdq_load;
1007 "cpu %d pri %d lowcpu %d lowpri %d",
1008 cpu, pri, lowcpu, lowpri);
1011 if (lowpri && lowpri == pri && load > lowload)
1022 * Find the thread queue with the least load.
1025 tdq_lowestload(void)
1036 lowload = TDQ_CPU(0)->tdq_load;
1037 lowpri = TDQ_CPU(0)->tdq_lowpri;
1038 for (cpu = 1; cpu <= mp_maxid; cpu++) {
1039 if (CPU_ABSENT(cpu))
1042 load = tdq->tdq_load;
1043 pri = tdq->tdq_lowpri;
1044 CTR4(KTR_ULE, "cpu %d load %d lowcpu %d lowload %d",
1045 cpu, load, lowcpu, lowload);
1048 if (load == lowload && pri < lowpri)
1059 * Pick the destination cpu for sched_add(). Respects affinity and makes
1060 * a determination based on load or priority of available processors.
1063 sched_pickcpu(struct td_sched *ts, int flags)
1070 cpu = self = PCPU_GET(cpuid);
1071 if (smp_started == 0)
1074 * Don't migrate a running thread from sched_switch().
1076 if (flags & SRQ_OURSELF) {
1077 CTR1(KTR_ULE, "YIELDING %d",
1078 curthread->td_priority);
1081 pri = ts->ts_thread->td_priority;
1084 * Regardless of affinity, if the last cpu is idle send it there.
1087 if (tdq->tdq_lowpri > PRI_MIN_IDLE) {
1089 "ts_cpu %d idle, ltick %d ticks %d pri %d curthread %d",
1090 ts->ts_cpu, ts->ts_rltick, ticks, pri,
1092 return (ts->ts_cpu);
1095 * If we have affinity, try to place it on the cpu we last ran on.
1097 if (SCHED_AFFINITY(ts) && tdq->tdq_lowpri > pri) {
1099 "affinity for %d, ltick %d ticks %d pri %d curthread %d",
1100 ts->ts_cpu, ts->ts_rltick, ticks, pri,
1102 return (ts->ts_cpu);
1105 * Look for an idle group.
1107 CTR1(KTR_ULE, "tdq_idle %X", tdq_idle);
1108 cpu = ffs(tdq_idle);
1112 * If there are no idle cores see if we can run the thread locally.
1113 * This may improve locality among sleepers and wakers when there
1116 if (tryself && pri < TDQ_CPU(self)->tdq_lowpri) {
1117 CTR1(KTR_ULE, "tryself %d",
1118 curthread->td_priority);
1122 * Now search for the cpu running the lowest priority thread with
1126 cpu = tdq_lowestpri();
1128 cpu = tdq_lowestload();
1135 * Pick the highest priority task we have and return it.
1137 static struct td_sched *
1138 tdq_choose(struct tdq *tdq)
1140 struct td_sched *ts;
1142 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
1143 ts = runq_choose(&tdq->tdq_realtime);
1146 ts = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
1148 KASSERT(ts->ts_thread->td_priority >= PRI_MIN_TIMESHARE,
1149 ("tdq_choose: Invalid priority on timeshare queue %d",
1150 ts->ts_thread->td_priority));
1154 ts = runq_choose(&tdq->tdq_idle);
1156 KASSERT(ts->ts_thread->td_priority >= PRI_MIN_IDLE,
1157 ("tdq_choose: Invalid priority on idle queue %d",
1158 ts->ts_thread->td_priority));
1166 * Initialize a thread queue.
1169 tdq_setup(struct tdq *tdq)
1173 printf("ULE: setup cpu %d\n", TDQ_ID(tdq));
1174 runq_init(&tdq->tdq_realtime);
1175 runq_init(&tdq->tdq_timeshare);
1176 runq_init(&tdq->tdq_idle);
1182 tdg_setup(struct tdq_group *tdg)
1185 printf("ULE: setup cpu group %d\n", TDG_ID(tdg));
1186 snprintf(tdg->tdg_name, sizeof(tdg->tdg_name),
1187 "sched lock %d", (int)TDG_ID(tdg));
1188 mtx_init(&tdg->tdg_lock, tdg->tdg_name, "sched lock",
1189 MTX_SPIN | MTX_RECURSE);
1190 LIST_INIT(&tdg->tdg_members);
1192 tdg->tdg_transferable = 0;
1195 tdg->tdg_cpumask = 0;
1196 tdg->tdg_idlemask = 0;
1200 tdg_add(struct tdq_group *tdg, struct tdq *tdq)
1202 if (tdg->tdg_mask == 0)
1203 tdg->tdg_mask |= 1 << TDQ_ID(tdq);
1204 tdg->tdg_cpumask |= 1 << TDQ_ID(tdq);
1206 tdq->tdq_group = tdg;
1207 tdq->tdq_lock = &tdg->tdg_lock;
1208 LIST_INSERT_HEAD(&tdg->tdg_members, tdq, tdq_siblings);
1210 printf("ULE: adding cpu %d to group %d: cpus %d mask 0x%X\n",
1211 TDQ_ID(tdq), TDG_ID(tdg), tdg->tdg_cpus, tdg->tdg_cpumask);
1215 sched_setup_topology(void)
1217 struct tdq_group *tdg;
1218 struct cpu_group *cg;
1226 for (i = 0; i < smp_topology->ct_count; i++) {
1227 cg = &smp_topology->ct_group[i];
1228 tdg = &tdq_groups[i];
1230 * Initialize the group.
1234 * Find all of the group members and add them.
1236 for (j = 0; j < MAXCPU; j++) {
1237 if ((cg->cg_mask & (1 << j)) != 0) {
1243 if (tdg->tdg_cpus > 1)
1246 tdg_maxid = smp_topology->ct_count - 1;
1248 sched_balance_groups();
1252 sched_setup_smp(void)
1254 struct tdq_group *tdg;
1259 for (cpus = 0, i = 0; i < MAXCPU; i++) {
1263 tdg = &tdq_groups[i];
1265 * Setup a tdq group with one member.
1272 tdg_maxid = cpus - 1;
1276 * Fake a topology with one group containing all CPUs.
1279 sched_fake_topo(void)
1281 #ifdef SCHED_FAKE_TOPOLOGY
1282 static struct cpu_top top;
1283 static struct cpu_group group;
1286 top.ct_group = &group;
1287 group.cg_mask = all_cpus;
1288 group.cg_count = mp_ncpus;
1289 group.cg_children = 0;
1290 smp_topology = ⊤
1296 * Setup the thread queues and initialize the topology based on MD
1300 sched_setup(void *dummy)
1308 * Setup tdqs based on a topology configuration or vanilla SMP based
1311 if (smp_topology == NULL)
1314 sched_setup_topology();
1319 mtx_init(&tdq_lock, "sched lock", "sched lock", MTX_SPIN | MTX_RECURSE);
1320 tdq->tdq_lock = &tdq_lock;
1323 * To avoid divide-by-zero, we set realstathz a dummy value
1324 * in case which sched_clock() called before sched_initticks().
1327 sched_slice = (realstathz/10); /* ~100ms */
1328 tickincr = 1 << SCHED_TICK_SHIFT;
1330 /* Add thread0's load since it's running. */
1332 thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF());
1333 tdq_load_add(tdq, &td_sched0);
1338 * This routine determines the tickincr after stathz and hz are setup.
1342 sched_initticks(void *dummy)
1346 realstathz = stathz ? stathz : hz;
1347 sched_slice = (realstathz/10); /* ~100ms */
1350 * tickincr is shifted out by 10 to avoid rounding errors due to
1351 * hz not being evenly divisible by stathz on all platforms.
1353 incr = (hz << SCHED_TICK_SHIFT) / realstathz;
1355 * This does not work for values of stathz that are more than
1356 * 1 << SCHED_TICK_SHIFT * hz. In practice this does not happen.
1363 * Set the default balance interval now that we know
1364 * what realstathz is.
1366 balance_interval = realstathz;
1368 * Set steal thresh to log2(mp_ncpu) but no greater than 4. This
1369 * prevents excess thrashing on large machines and excess idle on
1372 steal_thresh = min(ffs(mp_ncpus) - 1, 4);
1373 affinity = SCHED_AFFINITY_DEFAULT;
1379 * This is the core of the interactivity algorithm. Determines a score based
1380 * on past behavior. It is the ratio of sleep time to run time scaled to
1381 * a [0, 100] integer. This is the voluntary sleep time of a process, which
1382 * differs from the cpu usage because it does not account for time spent
1383 * waiting on a run-queue. Would be prettier if we had floating point.
1386 sched_interact_score(struct thread *td)
1388 struct td_sched *ts;
1393 * The score is only needed if this is likely to be an interactive
1394 * task. Don't go through the expense of computing it if there's
1397 if (sched_interact <= SCHED_INTERACT_HALF &&
1398 ts->ts_runtime >= ts->ts_slptime)
1399 return (SCHED_INTERACT_HALF);
1401 if (ts->ts_runtime > ts->ts_slptime) {
1402 div = max(1, ts->ts_runtime / SCHED_INTERACT_HALF);
1403 return (SCHED_INTERACT_HALF +
1404 (SCHED_INTERACT_HALF - (ts->ts_slptime / div)));
1406 if (ts->ts_slptime > ts->ts_runtime) {
1407 div = max(1, ts->ts_slptime / SCHED_INTERACT_HALF);
1408 return (ts->ts_runtime / div);
1410 /* runtime == slptime */
1412 return (SCHED_INTERACT_HALF);
1415 * This can happen if slptime and runtime are 0.
1422 * Scale the scheduling priority according to the "interactivity" of this
1426 sched_priority(struct thread *td)
1431 if (td->td_pri_class != PRI_TIMESHARE)
1434 * If the score is interactive we place the thread in the realtime
1435 * queue with a priority that is less than kernel and interrupt
1436 * priorities. These threads are not subject to nice restrictions.
1438 * Scores greater than this are placed on the normal timeshare queue
1439 * where the priority is partially decided by the most recent cpu
1440 * utilization and the rest is decided by nice value.
1442 * The nice value of the process has a linear effect on the calculated
1443 * score. Negative nice values make it easier for a thread to be
1444 * considered interactive.
1446 score = imax(0, sched_interact_score(td) - td->td_proc->p_nice);
1447 if (score < sched_interact) {
1448 pri = PRI_MIN_REALTIME;
1449 pri += ((PRI_MAX_REALTIME - PRI_MIN_REALTIME) / sched_interact)
1451 KASSERT(pri >= PRI_MIN_REALTIME && pri <= PRI_MAX_REALTIME,
1452 ("sched_priority: invalid interactive priority %d score %d",
1455 pri = SCHED_PRI_MIN;
1456 if (td->td_sched->ts_ticks)
1457 pri += SCHED_PRI_TICKS(td->td_sched);
1458 pri += SCHED_PRI_NICE(td->td_proc->p_nice);
1459 KASSERT(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE,
1460 ("sched_priority: invalid priority %d: nice %d, "
1461 "ticks %d ftick %d ltick %d tick pri %d",
1462 pri, td->td_proc->p_nice, td->td_sched->ts_ticks,
1463 td->td_sched->ts_ftick, td->td_sched->ts_ltick,
1464 SCHED_PRI_TICKS(td->td_sched)));
1466 sched_user_prio(td, pri);
1472 * This routine enforces a maximum limit on the amount of scheduling history
1473 * kept. It is called after either the slptime or runtime is adjusted. This
1474 * function is ugly due to integer math.
1477 sched_interact_update(struct thread *td)
1479 struct td_sched *ts;
1483 sum = ts->ts_runtime + ts->ts_slptime;
1484 if (sum < SCHED_SLP_RUN_MAX)
1487 * This only happens from two places:
1488 * 1) We have added an unusual amount of run time from fork_exit.
1489 * 2) We have added an unusual amount of sleep time from sched_sleep().
1491 if (sum > SCHED_SLP_RUN_MAX * 2) {
1492 if (ts->ts_runtime > ts->ts_slptime) {
1493 ts->ts_runtime = SCHED_SLP_RUN_MAX;
1496 ts->ts_slptime = SCHED_SLP_RUN_MAX;
1502 * If we have exceeded by more than 1/5th then the algorithm below
1503 * will not bring us back into range. Dividing by two here forces
1504 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1506 if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1507 ts->ts_runtime /= 2;
1508 ts->ts_slptime /= 2;
1511 ts->ts_runtime = (ts->ts_runtime / 5) * 4;
1512 ts->ts_slptime = (ts->ts_slptime / 5) * 4;
1516 * Scale back the interactivity history when a child thread is created. The
1517 * history is inherited from the parent but the thread may behave totally
1518 * differently. For example, a shell spawning a compiler process. We want
1519 * to learn that the compiler is behaving badly very quickly.
1522 sched_interact_fork(struct thread *td)
1527 sum = td->td_sched->ts_runtime + td->td_sched->ts_slptime;
1528 if (sum > SCHED_SLP_RUN_FORK) {
1529 ratio = sum / SCHED_SLP_RUN_FORK;
1530 td->td_sched->ts_runtime /= ratio;
1531 td->td_sched->ts_slptime /= ratio;
1536 * Called from proc0_init() to setup the scheduler fields.
1543 * Set up the scheduler specific parts of proc0.
1545 proc0.p_sched = NULL; /* XXX */
1546 thread0.td_sched = &td_sched0;
1547 td_sched0.ts_ltick = ticks;
1548 td_sched0.ts_ftick = ticks;
1549 td_sched0.ts_thread = &thread0;
1553 * This is only somewhat accurate since given many processes of the same
1554 * priority they will switch when their slices run out, which will be
1555 * at most sched_slice stathz ticks.
1558 sched_rr_interval(void)
1561 /* Convert sched_slice to hz */
1562 return (hz/(realstathz/sched_slice));
1566 * Update the percent cpu tracking information when it is requested or
1567 * the total history exceeds the maximum. We keep a sliding history of
1568 * tick counts that slowly decays. This is less precise than the 4BSD
1569 * mechanism since it happens with less regular and frequent events.
1572 sched_pctcpu_update(struct td_sched *ts)
1575 if (ts->ts_ticks == 0)
1577 if (ticks - (hz / 10) < ts->ts_ltick &&
1578 SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX)
1581 * Adjust counters and watermark for pctcpu calc.
1583 if (ts->ts_ltick > ticks - SCHED_TICK_TARG)
1584 ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) *
1588 ts->ts_ltick = ticks;
1589 ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG;
1593 * Adjust the priority of a thread. Move it to the appropriate run-queue
1594 * if necessary. This is the back-end for several priority related
1598 sched_thread_priority(struct thread *td, u_char prio)
1600 struct td_sched *ts;
1602 CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
1603 td, td->td_name, td->td_priority, prio, curthread,
1604 curthread->td_name);
1606 THREAD_LOCK_ASSERT(td, MA_OWNED);
1607 if (td->td_priority == prio)
1610 if (TD_ON_RUNQ(td) && prio < td->td_priority) {
1612 * If the priority has been elevated due to priority
1613 * propagation, we may have to move ourselves to a new
1614 * queue. This could be optimized to not re-add in some
1618 td->td_priority = prio;
1619 sched_add(td, SRQ_BORROWING);
1621 } else if (TD_IS_RUNNING(td)) {
1624 tdq = TDQ_CPU(ts->ts_cpu);
1625 if (prio < tdq->tdq_lowpri ||
1626 (td->td_priority == tdq->tdq_lowpri && tdq->tdq_load <= 1))
1627 tdq->tdq_lowpri = prio;
1628 td->td_priority = prio;
1631 td->td_priority = prio;
1635 * Update a thread's priority when it is lent another thread's
1639 sched_lend_prio(struct thread *td, u_char prio)
1642 td->td_flags |= TDF_BORROWING;
1643 sched_thread_priority(td, prio);
1647 * Restore a thread's priority when priority propagation is
1648 * over. The prio argument is the minimum priority the thread
1649 * needs to have to satisfy other possible priority lending
1650 * requests. If the thread's regular priority is less
1651 * important than prio, the thread will keep a priority boost
1655 sched_unlend_prio(struct thread *td, u_char prio)
1659 if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
1660 td->td_base_pri <= PRI_MAX_TIMESHARE)
1661 base_pri = td->td_user_pri;
1663 base_pri = td->td_base_pri;
1664 if (prio >= base_pri) {
1665 td->td_flags &= ~TDF_BORROWING;
1666 sched_thread_priority(td, base_pri);
1668 sched_lend_prio(td, prio);
1672 * Standard entry for setting the priority to an absolute value.
1675 sched_prio(struct thread *td, u_char prio)
1679 /* First, update the base priority. */
1680 td->td_base_pri = prio;
1683 * If the thread is borrowing another thread's priority, don't
1684 * ever lower the priority.
1686 if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
1689 /* Change the real priority. */
1690 oldprio = td->td_priority;
1691 sched_thread_priority(td, prio);
1694 * If the thread is on a turnstile, then let the turnstile update
1697 if (TD_ON_LOCK(td) && oldprio != prio)
1698 turnstile_adjust(td, oldprio);
1702 * Set the base user priority, does not effect current running priority.
1705 sched_user_prio(struct thread *td, u_char prio)
1709 td->td_base_user_pri = prio;
1710 if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio)
1712 oldprio = td->td_user_pri;
1713 td->td_user_pri = prio;
1717 sched_lend_user_prio(struct thread *td, u_char prio)
1721 THREAD_LOCK_ASSERT(td, MA_OWNED);
1722 td->td_flags |= TDF_UBORROWING;
1723 oldprio = td->td_user_pri;
1724 td->td_user_pri = prio;
1728 sched_unlend_user_prio(struct thread *td, u_char prio)
1732 THREAD_LOCK_ASSERT(td, MA_OWNED);
1733 base_pri = td->td_base_user_pri;
1734 if (prio >= base_pri) {
1735 td->td_flags &= ~TDF_UBORROWING;
1736 sched_user_prio(td, base_pri);
1738 sched_lend_user_prio(td, prio);
1743 * Add the thread passed as 'newtd' to the run queue before selecting
1744 * the next thread to run. This is only used for KSE.
1747 sched_switchin(struct tdq *tdq, struct thread *td)
1754 sched_setcpu(td->td_sched, TDQ_ID(tdq), SRQ_YIELDING);
1756 td->td_lock = TDQ_LOCKPTR(tdq);
1758 tdq_add(tdq, td, SRQ_YIELDING);
1759 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1763 * Block a thread for switching. Similar to thread_block() but does not
1764 * bump the spin count.
1766 static inline struct mtx *
1767 thread_block_switch(struct thread *td)
1771 THREAD_LOCK_ASSERT(td, MA_OWNED);
1773 td->td_lock = &blocked_lock;
1774 mtx_unlock_spin(lock);
1780 * Handle migration from sched_switch(). This happens only for
1784 sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
1788 tdn = TDQ_CPU(td->td_sched->ts_cpu);
1791 * Do the lock dance required to avoid LOR. We grab an extra
1792 * spinlock nesting to prevent preemption while we're
1793 * not holding either run-queue lock.
1796 thread_block_switch(td); /* This releases the lock on tdq. */
1798 tdq_add(tdn, td, flags);
1799 tdq_notify(td->td_sched);
1801 * After we unlock tdn the new cpu still can't switch into this
1802 * thread until we've unblocked it in cpu_switch(). The lock
1803 * pointers may match in the case of HTT cores. Don't unlock here
1804 * or we can deadlock when the other CPU runs the IPI handler.
1806 if (TDQ_LOCKPTR(tdn) != TDQ_LOCKPTR(tdq)) {
1812 return (TDQ_LOCKPTR(tdn));
1816 * Release a thread that was blocked with thread_block_switch().
1819 thread_unblock_switch(struct thread *td, struct mtx *mtx)
1821 atomic_store_rel_ptr((volatile uintptr_t *)&td->td_lock,
1826 * Switch threads. This function has to handle threads coming in while
1827 * blocked for some reason, running, or idle. It also must deal with
1828 * migrating a thread from one queue to another as running threads may
1829 * be assigned elsewhere via binding.
1832 sched_switch(struct thread *td, struct thread *newtd, int flags)
1835 struct td_sched *ts;
1840 THREAD_LOCK_ASSERT(td, MA_OWNED);
1842 cpuid = PCPU_GET(cpuid);
1843 tdq = TDQ_CPU(cpuid);
1847 ts->ts_rltick = ticks;
1849 td->td_lastcpu = td->td_oncpu;
1850 td->td_oncpu = NOCPU;
1851 td->td_flags &= ~TDF_NEEDRESCHED;
1852 td->td_owepreempt = 0;
1854 * The lock pointer in an idle thread should never change. Reset it
1855 * to CAN_RUN as well.
1857 if (TD_IS_IDLETHREAD(td)) {
1858 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1860 } else if (TD_IS_RUNNING(td)) {
1861 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1862 tdq_load_rem(tdq, ts);
1863 srqflag = (flags & SW_PREEMPT) ?
1864 SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1865 SRQ_OURSELF|SRQ_YIELDING;
1866 if (ts->ts_cpu == cpuid)
1867 tdq_add(tdq, td, srqflag);
1869 mtx = sched_switch_migrate(tdq, td, srqflag);
1871 /* This thread must be going to sleep. */
1873 mtx = thread_block_switch(td);
1874 tdq_load_rem(tdq, ts);
1877 * We enter here with the thread blocked and assigned to the
1878 * appropriate cpu run-queue or sleep-queue and with the current
1879 * thread-queue locked.
1881 TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
1883 * If KSE assigned a new thread just add it here and let choosethread
1884 * select the best one.
1887 sched_switchin(tdq, newtd);
1888 newtd = choosethread();
1890 * Call the MD code to switch contexts if necessary.
1894 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1895 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1897 lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object);
1898 TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
1899 cpu_switch(td, newtd, mtx);
1901 * We may return from cpu_switch on a different cpu. However,
1902 * we always return with td_lock pointing to the current cpu's
1905 cpuid = PCPU_GET(cpuid);
1906 tdq = TDQ_CPU(cpuid);
1907 lock_profile_obtain_lock_success(
1908 &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__);
1910 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1911 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1914 thread_unblock_switch(td, mtx);
1916 * Assert that all went well and return.
1919 /* We should always get here with the lowest priority td possible */
1920 tdq->tdq_lowpri = td->td_priority;
1922 TDQ_LOCK_ASSERT(tdq, MA_OWNED|MA_NOTRECURSED);
1923 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1924 td->td_oncpu = cpuid;
1928 * Adjust thread priorities as a result of a nice request.
1931 sched_nice(struct proc *p, int nice)
1935 PROC_LOCK_ASSERT(p, MA_OWNED);
1936 PROC_SLOCK_ASSERT(p, MA_OWNED);
1939 FOREACH_THREAD_IN_PROC(p, td) {
1942 sched_prio(td, td->td_base_user_pri);
1948 * Record the sleep time for the interactivity scorer.
1951 sched_sleep(struct thread *td)
1954 THREAD_LOCK_ASSERT(td, MA_OWNED);
1956 td->td_slptick = ticks;
1960 * Schedule a thread to resume execution and record how long it voluntarily
1961 * slept. We also update the pctcpu, interactivity, and priority.
1964 sched_wakeup(struct thread *td)
1966 struct td_sched *ts;
1969 THREAD_LOCK_ASSERT(td, MA_OWNED);
1972 * If we slept for more than a tick update our interactivity and
1975 slptick = td->td_slptick;
1977 if (slptick && slptick != ticks) {
1980 hzticks = (ticks - slptick) << SCHED_TICK_SHIFT;
1981 ts->ts_slptime += hzticks;
1982 sched_interact_update(td);
1983 sched_pctcpu_update(ts);
1986 /* Reset the slice value after we sleep. */
1987 ts->ts_slice = sched_slice;
1988 sched_add(td, SRQ_BORING);
1992 * Penalize the parent for creating a new child and initialize the child's
1996 sched_fork(struct thread *td, struct thread *child)
1998 THREAD_LOCK_ASSERT(td, MA_OWNED);
1999 sched_fork_thread(td, child);
2001 * Penalize the parent and child for forking.
2003 sched_interact_fork(child);
2004 sched_priority(child);
2005 td->td_sched->ts_runtime += tickincr;
2006 sched_interact_update(td);
2011 * Fork a new thread, may be within the same process.
2014 sched_fork_thread(struct thread *td, struct thread *child)
2016 struct td_sched *ts;
2017 struct td_sched *ts2;
2022 THREAD_LOCK_ASSERT(td, MA_OWNED);
2023 sched_newthread(child);
2024 child->td_lock = TDQ_LOCKPTR(TDQ_SELF());
2026 ts2 = child->td_sched;
2027 ts2->ts_cpu = ts->ts_cpu;
2028 ts2->ts_runq = NULL;
2030 * Grab our parents cpu estimation information and priority.
2032 ts2->ts_ticks = ts->ts_ticks;
2033 ts2->ts_ltick = ts->ts_ltick;
2034 ts2->ts_ftick = ts->ts_ftick;
2035 child->td_user_pri = td->td_user_pri;
2036 child->td_base_user_pri = td->td_base_user_pri;
2038 * And update interactivity score.
2040 ts2->ts_slptime = ts->ts_slptime;
2041 ts2->ts_runtime = ts->ts_runtime;
2042 ts2->ts_slice = 1; /* Attempt to quickly learn interactivity. */
2046 * Adjust the priority class of a thread.
2049 sched_class(struct thread *td, int class)
2052 THREAD_LOCK_ASSERT(td, MA_OWNED);
2053 if (td->td_pri_class == class)
2058 * On SMP if we're on the RUNQ we must adjust the transferable
2059 * count because could be changing to or from an interrupt
2062 if (TD_ON_RUNQ(td)) {
2065 tdq = TDQ_CPU(td->td_sched->ts_cpu);
2066 if (THREAD_CAN_MIGRATE(td)) {
2067 tdq->tdq_transferable--;
2068 tdq->tdq_group->tdg_transferable--;
2070 td->td_pri_class = class;
2071 if (THREAD_CAN_MIGRATE(td)) {
2072 tdq->tdq_transferable++;
2073 tdq->tdq_group->tdg_transferable++;
2077 td->td_pri_class = class;
2081 * Return some of the child's priority and interactivity to the parent.
2084 sched_exit(struct proc *p, struct thread *child)
2088 CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d",
2089 child, child->td_name, child->td_priority);
2091 PROC_SLOCK_ASSERT(p, MA_OWNED);
2092 td = FIRST_THREAD_IN_PROC(p);
2093 sched_exit_thread(td, child);
2097 * Penalize another thread for the time spent on this one. This helps to
2098 * worsen the priority and interactivity of processes which schedule batch
2099 * jobs such as make. This has little effect on the make process itself but
2100 * causes new processes spawned by it to receive worse scores immediately.
2103 sched_exit_thread(struct thread *td, struct thread *child)
2106 CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
2107 child, child->td_name, child->td_priority);
2111 * KSE forks and exits so often that this penalty causes short-lived
2112 * threads to always be non-interactive. This causes mozilla to
2115 if ((td->td_pflags & TDP_SA) && td->td_proc == child->td_proc)
2119 * Give the child's runtime to the parent without returning the
2120 * sleep time as a penalty to the parent. This causes shells that
2121 * launch expensive things to mark their children as expensive.
2124 td->td_sched->ts_runtime += child->td_sched->ts_runtime;
2125 sched_interact_update(td);
2131 * Fix priorities on return to user-space. Priorities may be elevated due
2132 * to static priorities in msleep() or similar.
2135 sched_userret(struct thread *td)
2138 * XXX we cheat slightly on the locking here to avoid locking in
2139 * the usual case. Setting td_priority here is essentially an
2140 * incomplete workaround for not setting it properly elsewhere.
2141 * Now that some interrupt handlers are threads, not setting it
2142 * properly elsewhere can clobber it in the window between setting
2143 * it here and returning to user mode, so don't waste time setting
2144 * it perfectly here.
2146 KASSERT((td->td_flags & TDF_BORROWING) == 0,
2147 ("thread with borrowed priority returning to userland"));
2148 if (td->td_priority != td->td_user_pri) {
2150 td->td_priority = td->td_user_pri;
2151 td->td_base_pri = td->td_user_pri;
2157 * Handle a stathz tick. This is really only relevant for timeshare
2161 sched_clock(struct thread *td)
2164 struct td_sched *ts;
2166 THREAD_LOCK_ASSERT(td, MA_OWNED);
2170 * We run the long term load balancer infrequently on the first cpu.
2172 if (balance_tdq == tdq) {
2173 if (balance_ticks && --balance_ticks == 0)
2175 if (balance_group_ticks && --balance_group_ticks == 0)
2176 sched_balance_groups();
2180 * Advance the insert index once for each tick to ensure that all
2181 * threads get a chance to run.
2183 if (tdq->tdq_idx == tdq->tdq_ridx) {
2184 tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS;
2185 if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx]))
2186 tdq->tdq_ridx = tdq->tdq_idx;
2189 if (td->td_pri_class & PRI_FIFO_BIT)
2191 if (td->td_pri_class == PRI_TIMESHARE) {
2193 * We used a tick; charge it to the thread so
2194 * that we can compute our interactivity.
2196 td->td_sched->ts_runtime += tickincr;
2197 sched_interact_update(td);
2200 * We used up one time slice.
2202 if (--ts->ts_slice > 0)
2205 * We're out of time, recompute priorities and requeue.
2208 td->td_flags |= TDF_NEEDRESCHED;
2212 * Called once per hz tick. Used for cpu utilization information. This
2213 * is easier than trying to scale based on stathz.
2218 struct td_sched *ts;
2220 ts = curthread->td_sched;
2221 /* Adjust ticks for pctcpu */
2222 ts->ts_ticks += 1 << SCHED_TICK_SHIFT;
2223 ts->ts_ltick = ticks;
2225 * Update if we've exceeded our desired tick threshhold by over one
2228 if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick)
2229 sched_pctcpu_update(ts);
2233 * Return whether the current CPU has runnable tasks. Used for in-kernel
2234 * cooperative idle threads.
2237 sched_runnable(void)
2245 if ((curthread->td_flags & TDF_IDLETD) != 0) {
2246 if (tdq->tdq_load > 0)
2249 if (tdq->tdq_load - 1 > 0)
2257 * Choose the highest priority thread to run. The thread is removed from
2258 * the run-queue while running however the load remains. For SMP we set
2259 * the tdq in the global idle bitmask if it idles here.
2265 struct tdq_group *tdg;
2267 struct td_sched *ts;
2272 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2273 ts = tdq_choose(tdq);
2275 tdq_runq_rem(tdq, ts);
2276 return (ts->ts_thread);
2278 td = PCPU_GET(idlethread);
2281 * We only set the idled bit when all of the cpus in the group are
2282 * idle. Otherwise we could get into a situation where a thread bounces
2283 * back and forth between two idle cores on seperate physical CPUs.
2285 tdg = tdq->tdq_group;
2286 tdg->tdg_idlemask |= PCPU_GET(cpumask);
2287 if (tdg->tdg_idlemask == tdg->tdg_cpumask)
2288 atomic_set_int(&tdq_idle, tdg->tdg_mask);
2289 tdq->tdq_lowpri = td->td_priority;
2295 * Set owepreempt if necessary. Preemption never happens directly in ULE,
2296 * we always request it once we exit a critical section.
2299 sched_setpreempt(struct thread *td)
2306 pri = td->td_priority;
2307 cpri = ctd->td_priority;
2308 if (td->td_priority < ctd->td_priority)
2309 curthread->td_flags |= TDF_NEEDRESCHED;
2310 if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd))
2313 * Always preempt IDLE threads. Otherwise only if the preempting
2314 * thread is an ithread.
2316 if (pri > preempt_thresh && cpri < PRI_MIN_IDLE)
2318 ctd->td_owepreempt = 1;
2323 * Add a thread to a thread queue. Initializes priority, slice, runq, and
2324 * add it to the appropriate queue. This is the internal function called
2325 * when the tdq is predetermined.
2328 tdq_add(struct tdq *tdq, struct thread *td, int flags)
2330 struct td_sched *ts;
2336 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2337 KASSERT((td->td_inhibitors == 0),
2338 ("sched_add: trying to run inhibited thread"));
2339 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
2340 ("sched_add: bad thread state"));
2341 KASSERT(td->td_flags & TDF_INMEM,
2342 ("sched_add: thread swapped out"));
2345 class = PRI_BASE(td->td_pri_class);
2347 if (ts->ts_slice == 0)
2348 ts->ts_slice = sched_slice;
2350 * Pick the run queue based on priority.
2352 if (td->td_priority <= PRI_MAX_REALTIME)
2353 ts->ts_runq = &tdq->tdq_realtime;
2354 else if (td->td_priority <= PRI_MAX_TIMESHARE)
2355 ts->ts_runq = &tdq->tdq_timeshare;
2357 ts->ts_runq = &tdq->tdq_idle;
2359 cpumask = 1 << ts->ts_cpu;
2361 * If we had been idle, clear our bit in the group and potentially
2362 * the global bitmap.
2364 if ((class != PRI_IDLE && class != PRI_ITHD) &&
2365 (tdq->tdq_group->tdg_idlemask & cpumask) != 0) {
2367 * Check to see if our group is unidling, and if so, remove it
2368 * from the global idle mask.
2370 if (tdq->tdq_group->tdg_idlemask ==
2371 tdq->tdq_group->tdg_cpumask)
2372 atomic_clear_int(&tdq_idle, tdq->tdq_group->tdg_mask);
2374 * Now remove ourselves from the group specific idle mask.
2376 tdq->tdq_group->tdg_idlemask &= ~cpumask;
2378 if (td->td_priority < tdq->tdq_lowpri)
2379 tdq->tdq_lowpri = td->td_priority;
2381 tdq_runq_add(tdq, ts, flags);
2382 tdq_load_add(tdq, ts);
2386 * Select the target thread queue and add a thread to it. Request
2387 * preemption or IPI a remote processor if required.
2390 sched_add(struct thread *td, int flags)
2392 struct td_sched *ts;
2398 CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
2399 td, td->td_name, td->td_priority, curthread,
2400 curthread->td_name);
2401 THREAD_LOCK_ASSERT(td, MA_OWNED);
2404 * Recalculate the priority before we select the target cpu or
2407 if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
2410 cpuid = PCPU_GET(cpuid);
2412 * Pick the destination cpu and if it isn't ours transfer to the
2415 if (td->td_priority <= PRI_MAX_ITHD && THREAD_CAN_MIGRATE(td) &&
2416 curthread->td_intr_nesting_level)
2418 if (!THREAD_CAN_MIGRATE(td))
2421 cpu = sched_pickcpu(ts, flags);
2422 tdq = sched_setcpu(ts, cpu, flags);
2423 tdq_add(tdq, td, flags);
2432 * Now that the thread is moving to the run-queue, set the lock
2433 * to the scheduler's lock.
2435 thread_lock_set(td, TDQ_LOCKPTR(tdq));
2436 tdq_add(tdq, td, flags);
2438 if (!(flags & SRQ_YIELDING))
2439 sched_setpreempt(td);
2443 * Remove a thread from a run-queue without running it. This is used
2444 * when we're stealing a thread from a remote queue. Otherwise all threads
2445 * exit by calling sched_exit_thread() and sched_throw() themselves.
2448 sched_rem(struct thread *td)
2451 struct td_sched *ts;
2453 CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
2454 td, td->td_name, td->td_priority, curthread,
2455 curthread->td_name);
2457 tdq = TDQ_CPU(ts->ts_cpu);
2458 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2459 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2460 KASSERT(TD_ON_RUNQ(td),
2461 ("sched_rem: thread not on run queue"));
2462 tdq_runq_rem(tdq, ts);
2463 tdq_load_rem(tdq, ts);
2468 * Fetch cpu utilization information. Updates on demand.
2471 sched_pctcpu(struct thread *td)
2474 struct td_sched *ts;
2485 sched_pctcpu_update(ts);
2486 /* How many rtick per second ? */
2487 rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz);
2488 pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT;
2496 sched_affinity(struct thread *td)
2501 * Bind a thread to a target cpu.
2504 sched_bind(struct thread *td, int cpu)
2506 struct td_sched *ts;
2508 THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
2510 if (ts->ts_flags & TSF_BOUND)
2512 ts->ts_flags |= TSF_BOUND;
2515 if (PCPU_GET(cpuid) == cpu)
2518 /* When we return from mi_switch we'll be on the correct cpu. */
2519 mi_switch(SW_VOL, NULL);
2524 * Release a bound thread.
2527 sched_unbind(struct thread *td)
2529 struct td_sched *ts;
2531 THREAD_LOCK_ASSERT(td, MA_OWNED);
2533 if ((ts->ts_flags & TSF_BOUND) == 0)
2535 ts->ts_flags &= ~TSF_BOUND;
2542 sched_is_bound(struct thread *td)
2544 THREAD_LOCK_ASSERT(td, MA_OWNED);
2545 return (td->td_sched->ts_flags & TSF_BOUND);
2552 sched_relinquish(struct thread *td)
2555 SCHED_STAT_INC(switch_relinquish);
2556 mi_switch(SW_VOL, NULL);
2561 * Return the total system load.
2571 for (i = 0; i <= tdg_maxid; i++)
2572 total += TDQ_GROUP(i)->tdg_load;
2575 return (TDQ_SELF()->tdq_sysload);
2580 sched_sizeof_proc(void)
2582 return (sizeof(struct proc));
2586 sched_sizeof_thread(void)
2588 return (sizeof(struct thread) + sizeof(struct td_sched));
2592 * The actual idle process.
2595 sched_idletd(void *dummy)
2602 mtx_assert(&Giant, MA_NOTOWNED);
2603 /* ULE relies on preemption for idle interruption. */
2615 * A CPU is entering for the first time or a thread is exiting.
2618 sched_throw(struct thread *td)
2620 struct thread *newtd;
2625 /* Correct spinlock nesting and acquire the correct lock. */
2629 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2630 tdq_load_rem(tdq, td->td_sched);
2631 lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object);
2633 KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
2634 newtd = choosethread();
2635 TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
2636 PCPU_SET(switchtime, cpu_ticks());
2637 PCPU_SET(switchticks, ticks);
2638 cpu_throw(td, newtd); /* doesn't return */
2642 * This is called from fork_exit(). Just acquire the correct locks and
2643 * let fork do the rest of the work.
2646 sched_fork_exit(struct thread *td)
2648 struct td_sched *ts;
2653 * Finish setting up thread glue so that it begins execution in a
2654 * non-nested critical section with the scheduler lock held.
2656 cpuid = PCPU_GET(cpuid);
2657 tdq = TDQ_CPU(cpuid);
2659 if (TD_IS_IDLETHREAD(td))
2660 td->td_lock = TDQ_LOCKPTR(tdq);
2661 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2662 td->td_oncpu = cpuid;
2663 TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
2664 lock_profile_obtain_lock_success(
2665 &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__);
2668 static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0,
2670 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0,
2672 SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
2673 "Slice size for timeshare threads");
2674 SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0,
2675 "Interactivity score threshold");
2676 SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh,
2677 0,"Min priority for preemption, lower priorities have greater precedence");
2679 SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri, CTLFLAG_RW, &pick_pri, 0,
2680 "Pick the target cpu based on priority rather than load.");
2681 SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0,
2682 "Number of hz ticks to keep thread affinity for");
2683 SYSCTL_INT(_kern_sched, OID_AUTO, tryself, CTLFLAG_RW, &tryself, 0, "");
2684 SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0,
2685 "Enables the long-term load balancer");
2686 SYSCTL_INT(_kern_sched, OID_AUTO, balance_interval, CTLFLAG_RW,
2687 &balance_interval, 0,
2688 "Average frequency in stathz ticks to run the long-term balancer");
2689 SYSCTL_INT(_kern_sched, OID_AUTO, steal_htt, CTLFLAG_RW, &steal_htt, 0,
2690 "Steals work from another hyper-threaded core on idle");
2691 SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0,
2692 "Attempts to steal work from other cores before idling");
2693 SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0,
2694 "Minimum load on remote cpu before we'll steal");
2695 SYSCTL_INT(_kern_sched, OID_AUTO, topology, CTLFLAG_RD, &topology, 0,
2696 "True when a topology has been specified by the MD code.");
2699 /* ps compat. All cpu percentages from ULE are weighted. */
2700 static int ccpu = 0;
2701 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
2704 #define KERN_SWITCH_INCLUDE 1
2705 #include "kern/kern_switch.c"