2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include "opt_sched.h"
33 #include <sys/param.h>
34 #include <sys/systm.h>
36 #include <sys/kernel.h>
39 #include <sys/mutex.h>
41 #include <sys/queue.h>
42 #include <sys/sched.h>
44 #include <sys/sysctl.h>
46 #include <machine/cpu.h>
48 /* Uncomment this to enable logging of critical_enter/exit. */
50 #define KTR_CRITICAL KTR_SCHED
52 #define KTR_CRITICAL 0
55 #ifdef FULL_PREEMPTION
57 #error "The FULL_PREEMPTION option requires the PREEMPTION option"
61 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
64 * kern.sched.preemption allows user space to determine if preemption support
65 * is compiled in or not. It is not currently a boot or runtime flag that
69 static int kern_sched_preemption = 1;
71 static int kern_sched_preemption = 0;
73 SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
74 &kern_sched_preemption, 0, "Kernel preemption enabled");
77 * Support for scheduler stats exported via kern.sched.stats. All stats may
78 * be reset with kern.sched.stats.reset = 1. Stats may be defined elsewhere
79 * with SCHED_STAT_DEFINE().
82 SYSCTL_NODE(_kern_sched, OID_AUTO, stats, CTLFLAG_RW, 0, "switch stats");
84 /* Switch reasons from mi_switch(). */
85 DPCPU_DEFINE(long, sched_switch_stats[SWT_COUNT]);
86 SCHED_STAT_DEFINE_VAR(uncategorized,
87 &DPCPU_NAME(sched_switch_stats[SWT_NONE]), "");
88 SCHED_STAT_DEFINE_VAR(preempt,
89 &DPCPU_NAME(sched_switch_stats[SWT_PREEMPT]), "");
90 SCHED_STAT_DEFINE_VAR(owepreempt,
91 &DPCPU_NAME(sched_switch_stats[SWT_OWEPREEMPT]), "");
92 SCHED_STAT_DEFINE_VAR(turnstile,
93 &DPCPU_NAME(sched_switch_stats[SWT_TURNSTILE]), "");
94 SCHED_STAT_DEFINE_VAR(sleepq,
95 &DPCPU_NAME(sched_switch_stats[SWT_SLEEPQ]), "");
96 SCHED_STAT_DEFINE_VAR(relinquish,
97 &DPCPU_NAME(sched_switch_stats[SWT_RELINQUISH]), "");
98 SCHED_STAT_DEFINE_VAR(needresched,
99 &DPCPU_NAME(sched_switch_stats[SWT_NEEDRESCHED]), "");
100 SCHED_STAT_DEFINE_VAR(idle,
101 &DPCPU_NAME(sched_switch_stats[SWT_IDLE]), "");
102 SCHED_STAT_DEFINE_VAR(iwait,
103 &DPCPU_NAME(sched_switch_stats[SWT_IWAIT]), "");
104 SCHED_STAT_DEFINE_VAR(suspend,
105 &DPCPU_NAME(sched_switch_stats[SWT_SUSPEND]), "");
106 SCHED_STAT_DEFINE_VAR(remotepreempt,
107 &DPCPU_NAME(sched_switch_stats[SWT_REMOTEPREEMPT]), "");
108 SCHED_STAT_DEFINE_VAR(remotewakeidle,
109 &DPCPU_NAME(sched_switch_stats[SWT_REMOTEWAKEIDLE]), "");
112 sysctl_stats_reset(SYSCTL_HANDLER_ARGS)
114 struct sysctl_oid *p;
121 error = sysctl_handle_int(oidp, &val, 0, req);
122 if (error != 0 || req->newptr == NULL)
127 * Traverse the list of children of _kern_sched_stats and reset each
128 * to 0. Skip the reset entry.
130 SLIST_FOREACH(p, oidp->oid_parent, oid_link) {
131 if (p == oidp || p->oid_arg1 == NULL)
133 counter = (uintptr_t)p->oid_arg1;
135 *(long *)(dpcpu_off[i] + counter) = 0;
141 SYSCTL_PROC(_kern_sched_stats, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_WR, NULL,
142 0, sysctl_stats_reset, "I", "Reset scheduler statistics");
145 /************************************************************************
146 * Functions that manipulate runnability from a thread perspective. *
147 ************************************************************************/
149 * Select the thread that will be run next.
160 * If we are in panic, only allow system threads,
161 * plus the one we are running in, to be run.
163 if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
164 (td->td_flags & TDF_INPANIC) == 0)) {
165 /* note that it is no longer on the run queue */
175 * Kernel thread preemption implementation. Critical sections mark
176 * regions of code in which preemptions are not allowed.
178 * It might seem a good idea to inline critical_enter() but, in order
179 * to prevent instructions reordering by the compiler, a __compiler_membar()
180 * would have to be used here (the same as sched_pin()). The performance
181 * penalty imposed by the membar could, then, produce slower code than
182 * the function call itself, for most cases.
191 CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
192 (long)td->td_proc->p_pid, td->td_name, td->td_critnest);
202 KASSERT(td->td_critnest != 0,
203 ("critical_exit: td_critnest == 0"));
205 if (td->td_critnest == 1) {
207 if (td->td_owepreempt && !kdb_active) {
211 flags = SW_INVOL | SW_PREEMPT;
212 if (TD_IS_IDLETHREAD(td))
215 flags |= SWT_OWEPREEMPT;
216 mi_switch(flags, NULL);
222 CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
223 (long)td->td_proc->p_pid, td->td_name, td->td_critnest);
226 /************************************************************************
227 * SYSTEM RUN QUEUE manipulations and tests *
228 ************************************************************************/
230 * Initialize a run structure.
233 runq_init(struct runq *rq)
237 bzero(rq, sizeof *rq);
238 for (i = 0; i < RQ_NQS; i++)
239 TAILQ_INIT(&rq->rq_queues[i]);
243 * Clear the status bit of the queue corresponding to priority level pri,
244 * indicating that it is empty.
247 runq_clrbit(struct runq *rq, int pri)
251 rqb = &rq->rq_status;
252 CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
253 rqb->rqb_bits[RQB_WORD(pri)],
254 rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
255 RQB_BIT(pri), RQB_WORD(pri));
256 rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
260 * Find the index of the first non-empty run queue. This is done by
261 * scanning the status bits, a set bit indicates a non-empty queue.
264 runq_findbit(struct runq *rq)
270 rqb = &rq->rq_status;
271 for (i = 0; i < RQB_LEN; i++)
272 if (rqb->rqb_bits[i]) {
273 pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
274 CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
275 rqb->rqb_bits[i], i, pri);
283 runq_findbit_from(struct runq *rq, u_char pri)
290 * Set the mask for the first word so we ignore priorities before 'pri'.
292 mask = (rqb_word_t)-1 << (pri & (RQB_BPW - 1));
293 rqb = &rq->rq_status;
295 for (i = RQB_WORD(pri); i < RQB_LEN; mask = -1, i++) {
296 mask = rqb->rqb_bits[i] & mask;
299 pri = RQB_FFS(mask) + (i << RQB_L2BPW);
300 CTR3(KTR_RUNQ, "runq_findbit_from: bits=%#x i=%d pri=%d",
307 * Wrap back around to the beginning of the list just once so we
308 * scan the whole thing.
315 * Set the status bit of the queue corresponding to priority level pri,
316 * indicating that it is non-empty.
319 runq_setbit(struct runq *rq, int pri)
323 rqb = &rq->rq_status;
324 CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
325 rqb->rqb_bits[RQB_WORD(pri)],
326 rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
327 RQB_BIT(pri), RQB_WORD(pri));
328 rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
332 * Add the thread to the queue specified by its priority, and set the
333 * corresponding status bit.
336 runq_add(struct runq *rq, struct thread *td, int flags)
341 pri = td->td_priority / RQ_PPQ;
342 td->td_rqindex = pri;
343 runq_setbit(rq, pri);
344 rqh = &rq->rq_queues[pri];
345 CTR4(KTR_RUNQ, "runq_add: td=%p pri=%d %d rqh=%p",
346 td, td->td_priority, pri, rqh);
347 if (flags & SRQ_PREEMPTED) {
348 TAILQ_INSERT_HEAD(rqh, td, td_runq);
350 TAILQ_INSERT_TAIL(rqh, td, td_runq);
355 runq_add_pri(struct runq *rq, struct thread *td, u_char pri, int flags)
359 KASSERT(pri < RQ_NQS, ("runq_add_pri: %d out of range", pri));
360 td->td_rqindex = pri;
361 runq_setbit(rq, pri);
362 rqh = &rq->rq_queues[pri];
363 CTR4(KTR_RUNQ, "runq_add_pri: td=%p pri=%d idx=%d rqh=%p",
364 td, td->td_priority, pri, rqh);
365 if (flags & SRQ_PREEMPTED) {
366 TAILQ_INSERT_HEAD(rqh, td, td_runq);
368 TAILQ_INSERT_TAIL(rqh, td, td_runq);
372 * Return true if there are runnable processes of any priority on the run
373 * queue, false otherwise. Has no side effects, does not modify the run
377 runq_check(struct runq *rq)
382 rqb = &rq->rq_status;
383 for (i = 0; i < RQB_LEN; i++)
384 if (rqb->rqb_bits[i]) {
385 CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
386 rqb->rqb_bits[i], i);
389 CTR0(KTR_RUNQ, "runq_check: empty");
395 * Find the highest priority process on the run queue.
398 runq_choose_fuzz(struct runq *rq, int fuzz)
404 while ((pri = runq_findbit(rq)) != -1) {
405 rqh = &rq->rq_queues[pri];
406 /* fuzz == 1 is normal.. 0 or less are ignored */
409 * In the first couple of entries, check if
410 * there is one for our CPU as a preference.
413 int cpu = PCPU_GET(cpuid);
415 td2 = td = TAILQ_FIRST(rqh);
417 while (count-- && td2) {
418 if (td2->td_lastcpu == cpu) {
422 td2 = TAILQ_NEXT(td2, td_runq);
425 td = TAILQ_FIRST(rqh);
426 KASSERT(td != NULL, ("runq_choose_fuzz: no proc on busy queue"));
428 "runq_choose_fuzz: pri=%d thread=%p rqh=%p", pri, td, rqh);
431 CTR1(KTR_RUNQ, "runq_choose_fuzz: idleproc pri=%d", pri);
437 * Find the highest priority process on the run queue.
440 runq_choose(struct runq *rq)
446 while ((pri = runq_findbit(rq)) != -1) {
447 rqh = &rq->rq_queues[pri];
448 td = TAILQ_FIRST(rqh);
449 KASSERT(td != NULL, ("runq_choose: no thread on busy queue"));
451 "runq_choose: pri=%d thread=%p rqh=%p", pri, td, rqh);
454 CTR1(KTR_RUNQ, "runq_choose: idlethread pri=%d", pri);
460 runq_choose_from(struct runq *rq, u_char idx)
466 if ((pri = runq_findbit_from(rq, idx)) != -1) {
467 rqh = &rq->rq_queues[pri];
468 td = TAILQ_FIRST(rqh);
469 KASSERT(td != NULL, ("runq_choose: no thread on busy queue"));
471 "runq_choose_from: pri=%d thread=%p idx=%d rqh=%p",
472 pri, td, td->td_rqindex, rqh);
475 CTR1(KTR_RUNQ, "runq_choose_from: idlethread pri=%d", pri);
480 * Remove the thread from the queue specified by its priority, and clear the
481 * corresponding status bit if the queue becomes empty.
482 * Caller must set state afterwards.
485 runq_remove(struct runq *rq, struct thread *td)
488 runq_remove_idx(rq, td, NULL);
492 runq_remove_idx(struct runq *rq, struct thread *td, u_char *idx)
497 KASSERT(td->td_flags & TDF_INMEM,
498 ("runq_remove_idx: thread swapped out"));
499 pri = td->td_rqindex;
500 KASSERT(pri < RQ_NQS, ("runq_remove_idx: Invalid index %d\n", pri));
501 rqh = &rq->rq_queues[pri];
502 CTR4(KTR_RUNQ, "runq_remove_idx: td=%p, pri=%d %d rqh=%p",
503 td, td->td_priority, pri, rqh);
504 TAILQ_REMOVE(rqh, td, td_runq);
505 if (TAILQ_EMPTY(rqh)) {
506 CTR0(KTR_RUNQ, "runq_remove_idx: empty");
507 runq_clrbit(rq, pri);
508 if (idx != NULL && *idx == pri)
509 *idx = (pri + 1) % RQ_NQS;