2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include "opt_sched.h"
33 #include <sys/param.h>
34 #include <sys/systm.h>
36 #include <sys/kernel.h>
39 #include <sys/mutex.h>
41 #include <sys/queue.h>
42 #include <sys/sched.h>
44 #include <sys/sysctl.h>
46 #include <machine/cpu.h>
48 /* Uncomment this to enable logging of critical_enter/exit. */
50 #define KTR_CRITICAL KTR_SCHED
52 #define KTR_CRITICAL 0
55 #ifdef FULL_PREEMPTION
57 #error "The FULL_PREEMPTION option requires the PREEMPTION option"
61 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
64 * kern.sched.preemption allows user space to determine if preemption support
65 * is compiled in or not. It is not currently a boot or runtime flag that
69 static int kern_sched_preemption = 1;
71 static int kern_sched_preemption = 0;
73 SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
74 &kern_sched_preemption, 0, "Kernel preemption enabled");
77 * Support for scheduler stats exported via kern.sched.stats. All stats may
78 * be reset with kern.sched.stats.reset = 1. Stats may be defined elsewhere
79 * with SCHED_STAT_DEFINE().
82 SYSCTL_NODE(_kern_sched, OID_AUTO, stats, CTLFLAG_RW, 0, "switch stats");
84 /* Switch reasons from mi_switch(). */
85 DPCPU_DEFINE(long, sched_switch_stats[SWT_COUNT]);
86 SCHED_STAT_DEFINE_VAR(uncategorized,
87 &DPCPU_NAME(sched_switch_stats[SWT_NONE]), "");
88 SCHED_STAT_DEFINE_VAR(preempt,
89 &DPCPU_NAME(sched_switch_stats[SWT_PREEMPT]), "");
90 SCHED_STAT_DEFINE_VAR(owepreempt,
91 &DPCPU_NAME(sched_switch_stats[SWT_OWEPREEMPT]), "");
92 SCHED_STAT_DEFINE_VAR(turnstile,
93 &DPCPU_NAME(sched_switch_stats[SWT_TURNSTILE]), "");
94 SCHED_STAT_DEFINE_VAR(sleepq,
95 &DPCPU_NAME(sched_switch_stats[SWT_SLEEPQ]), "");
96 SCHED_STAT_DEFINE_VAR(sleepqtimo,
97 &DPCPU_NAME(sched_switch_stats[SWT_SLEEPQTIMO]), "");
98 SCHED_STAT_DEFINE_VAR(relinquish,
99 &DPCPU_NAME(sched_switch_stats[SWT_RELINQUISH]), "");
100 SCHED_STAT_DEFINE_VAR(needresched,
101 &DPCPU_NAME(sched_switch_stats[SWT_NEEDRESCHED]), "");
102 SCHED_STAT_DEFINE_VAR(idle,
103 &DPCPU_NAME(sched_switch_stats[SWT_IDLE]), "");
104 SCHED_STAT_DEFINE_VAR(iwait,
105 &DPCPU_NAME(sched_switch_stats[SWT_IWAIT]), "");
106 SCHED_STAT_DEFINE_VAR(suspend,
107 &DPCPU_NAME(sched_switch_stats[SWT_SUSPEND]), "");
108 SCHED_STAT_DEFINE_VAR(remotepreempt,
109 &DPCPU_NAME(sched_switch_stats[SWT_REMOTEPREEMPT]), "");
110 SCHED_STAT_DEFINE_VAR(remotewakeidle,
111 &DPCPU_NAME(sched_switch_stats[SWT_REMOTEWAKEIDLE]), "");
114 sysctl_stats_reset(SYSCTL_HANDLER_ARGS)
116 struct sysctl_oid *p;
123 error = sysctl_handle_int(oidp, &val, 0, req);
124 if (error != 0 || req->newptr == NULL)
129 * Traverse the list of children of _kern_sched_stats and reset each
130 * to 0. Skip the reset entry.
132 SLIST_FOREACH(p, oidp->oid_parent, oid_link) {
133 if (p == oidp || p->oid_arg1 == NULL)
135 counter = (uintptr_t)p->oid_arg1;
137 *(long *)(dpcpu_off[i] + counter) = 0;
143 SYSCTL_PROC(_kern_sched_stats, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_WR, NULL,
144 0, sysctl_stats_reset, "I", "Reset scheduler statistics");
147 /************************************************************************
148 * Functions that manipulate runnability from a thread perspective. *
149 ************************************************************************/
151 * Select the thread that will be run next.
162 * If we are in panic, only allow system threads,
163 * plus the one we are running in, to be run.
165 if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
166 (td->td_flags & TDF_INPANIC) == 0)) {
167 /* note that it is no longer on the run queue */
177 * Kernel thread preemption implementation. Critical sections mark
178 * regions of code in which preemptions are not allowed.
187 CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
188 (long)td->td_proc->p_pid, td->td_name, td->td_critnest);
198 KASSERT(td->td_critnest != 0,
199 ("critical_exit: td_critnest == 0"));
201 if (td->td_critnest == 1) {
203 if (td->td_owepreempt && !kdb_active) {
207 flags = SW_INVOL | SW_PREEMPT;
208 if (TD_IS_IDLETHREAD(td))
211 flags |= SWT_OWEPREEMPT;
212 mi_switch(flags, NULL);
218 CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
219 (long)td->td_proc->p_pid, td->td_name, td->td_critnest);
222 /************************************************************************
223 * SYSTEM RUN QUEUE manipulations and tests *
224 ************************************************************************/
226 * Initialize a run structure.
229 runq_init(struct runq *rq)
233 bzero(rq, sizeof *rq);
234 for (i = 0; i < RQ_NQS; i++)
235 TAILQ_INIT(&rq->rq_queues[i]);
239 * Clear the status bit of the queue corresponding to priority level pri,
240 * indicating that it is empty.
243 runq_clrbit(struct runq *rq, int pri)
247 rqb = &rq->rq_status;
248 CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
249 rqb->rqb_bits[RQB_WORD(pri)],
250 rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
251 RQB_BIT(pri), RQB_WORD(pri));
252 rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
256 * Find the index of the first non-empty run queue. This is done by
257 * scanning the status bits, a set bit indicates a non-empty queue.
260 runq_findbit(struct runq *rq)
266 rqb = &rq->rq_status;
267 for (i = 0; i < RQB_LEN; i++)
268 if (rqb->rqb_bits[i]) {
269 pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
270 CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
271 rqb->rqb_bits[i], i, pri);
279 runq_findbit_from(struct runq *rq, u_char pri)
286 * Set the mask for the first word so we ignore priorities before 'pri'.
288 mask = (rqb_word_t)-1 << (pri & (RQB_BPW - 1));
289 rqb = &rq->rq_status;
291 for (i = RQB_WORD(pri); i < RQB_LEN; mask = -1, i++) {
292 mask = rqb->rqb_bits[i] & mask;
295 pri = RQB_FFS(mask) + (i << RQB_L2BPW);
296 CTR3(KTR_RUNQ, "runq_findbit_from: bits=%#x i=%d pri=%d",
303 * Wrap back around to the beginning of the list just once so we
304 * scan the whole thing.
311 * Set the status bit of the queue corresponding to priority level pri,
312 * indicating that it is non-empty.
315 runq_setbit(struct runq *rq, int pri)
319 rqb = &rq->rq_status;
320 CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
321 rqb->rqb_bits[RQB_WORD(pri)],
322 rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
323 RQB_BIT(pri), RQB_WORD(pri));
324 rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
328 * Add the thread to the queue specified by its priority, and set the
329 * corresponding status bit.
332 runq_add(struct runq *rq, struct thread *td, int flags)
337 pri = td->td_priority / RQ_PPQ;
338 td->td_rqindex = pri;
339 runq_setbit(rq, pri);
340 rqh = &rq->rq_queues[pri];
341 CTR4(KTR_RUNQ, "runq_add: td=%p pri=%d %d rqh=%p",
342 td, td->td_priority, pri, rqh);
343 if (flags & SRQ_PREEMPTED) {
344 TAILQ_INSERT_HEAD(rqh, td, td_runq);
346 TAILQ_INSERT_TAIL(rqh, td, td_runq);
351 runq_add_pri(struct runq *rq, struct thread *td, u_char pri, int flags)
355 KASSERT(pri < RQ_NQS, ("runq_add_pri: %d out of range", pri));
356 td->td_rqindex = pri;
357 runq_setbit(rq, pri);
358 rqh = &rq->rq_queues[pri];
359 CTR4(KTR_RUNQ, "runq_add_pri: td=%p pri=%d idx=%d rqh=%p",
360 td, td->td_priority, pri, rqh);
361 if (flags & SRQ_PREEMPTED) {
362 TAILQ_INSERT_HEAD(rqh, td, td_runq);
364 TAILQ_INSERT_TAIL(rqh, td, td_runq);
368 * Return true if there are runnable processes of any priority on the run
369 * queue, false otherwise. Has no side effects, does not modify the run
373 runq_check(struct runq *rq)
378 rqb = &rq->rq_status;
379 for (i = 0; i < RQB_LEN; i++)
380 if (rqb->rqb_bits[i]) {
381 CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
382 rqb->rqb_bits[i], i);
385 CTR0(KTR_RUNQ, "runq_check: empty");
391 * Find the highest priority process on the run queue.
394 runq_choose_fuzz(struct runq *rq, int fuzz)
400 while ((pri = runq_findbit(rq)) != -1) {
401 rqh = &rq->rq_queues[pri];
402 /* fuzz == 1 is normal.. 0 or less are ignored */
405 * In the first couple of entries, check if
406 * there is one for our CPU as a preference.
409 int cpu = PCPU_GET(cpuid);
411 td2 = td = TAILQ_FIRST(rqh);
413 while (count-- && td2) {
414 if (td2->td_lastcpu == cpu) {
418 td2 = TAILQ_NEXT(td2, td_runq);
421 td = TAILQ_FIRST(rqh);
422 KASSERT(td != NULL, ("runq_choose_fuzz: no proc on busy queue"));
424 "runq_choose_fuzz: pri=%d thread=%p rqh=%p", pri, td, rqh);
427 CTR1(KTR_RUNQ, "runq_choose_fuzz: idleproc pri=%d", pri);
433 * Find the highest priority process on the run queue.
436 runq_choose(struct runq *rq)
442 while ((pri = runq_findbit(rq)) != -1) {
443 rqh = &rq->rq_queues[pri];
444 td = TAILQ_FIRST(rqh);
445 KASSERT(td != NULL, ("runq_choose: no thread on busy queue"));
447 "runq_choose: pri=%d thread=%p rqh=%p", pri, td, rqh);
450 CTR1(KTR_RUNQ, "runq_choose: idlethread pri=%d", pri);
456 runq_choose_from(struct runq *rq, u_char idx)
462 if ((pri = runq_findbit_from(rq, idx)) != -1) {
463 rqh = &rq->rq_queues[pri];
464 td = TAILQ_FIRST(rqh);
465 KASSERT(td != NULL, ("runq_choose: no thread on busy queue"));
467 "runq_choose_from: pri=%d thread=%p idx=%d rqh=%p",
468 pri, td, td->td_rqindex, rqh);
471 CTR1(KTR_RUNQ, "runq_choose_from: idlethread pri=%d", pri);
476 * Remove the thread from the queue specified by its priority, and clear the
477 * corresponding status bit if the queue becomes empty.
478 * Caller must set state afterwards.
481 runq_remove(struct runq *rq, struct thread *td)
484 runq_remove_idx(rq, td, NULL);
488 runq_remove_idx(struct runq *rq, struct thread *td, u_char *idx)
493 KASSERT(td->td_flags & TDF_INMEM,
494 ("runq_remove_idx: thread swapped out"));
495 pri = td->td_rqindex;
496 KASSERT(pri < RQ_NQS, ("runq_remove_idx: Invalid index %d\n", pri));
497 rqh = &rq->rq_queues[pri];
498 CTR4(KTR_RUNQ, "runq_remove_idx: td=%p, pri=%d %d rqh=%p",
499 td, td->td_priority, pri, rqh);
500 TAILQ_REMOVE(rqh, td, td_runq);
501 if (TAILQ_EMPTY(rqh)) {
502 CTR0(KTR_RUNQ, "runq_remove_idx: empty");
503 runq_clrbit(rq, pri);
504 if (idx != NULL && *idx == pri)
505 *idx = (pri + 1) % RQ_NQS;