2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include "opt_sched.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
38 #include <sys/kernel.h>
41 #include <sys/mutex.h>
43 #include <sys/queue.h>
44 #include <sys/sched.h>
46 #include <sys/sysctl.h>
48 #include <machine/cpu.h>
50 /* Uncomment this to enable logging of critical_enter/exit. */
52 #define KTR_CRITICAL KTR_SCHED
54 #define KTR_CRITICAL 0
57 #ifdef FULL_PREEMPTION
59 #error "The FULL_PREEMPTION option requires the PREEMPTION option"
63 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
66 * kern.sched.preemption allows user space to determine if preemption support
67 * is compiled in or not. It is not currently a boot or runtime flag that
71 static int kern_sched_preemption = 1;
73 static int kern_sched_preemption = 0;
75 SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
76 &kern_sched_preemption, 0, "Kernel preemption enabled");
79 * Support for scheduler stats exported via kern.sched.stats. All stats may
80 * be reset with kern.sched.stats.reset = 1. Stats may be defined elsewhere
81 * with SCHED_STAT_DEFINE().
84 SYSCTL_NODE(_kern_sched, OID_AUTO, stats, CTLFLAG_RW, 0, "switch stats");
86 /* Switch reasons from mi_switch(). */
87 DPCPU_DEFINE(long, sched_switch_stats[SWT_COUNT]);
88 SCHED_STAT_DEFINE_VAR(uncategorized,
89 &DPCPU_NAME(sched_switch_stats[SWT_NONE]), "");
90 SCHED_STAT_DEFINE_VAR(preempt,
91 &DPCPU_NAME(sched_switch_stats[SWT_PREEMPT]), "");
92 SCHED_STAT_DEFINE_VAR(owepreempt,
93 &DPCPU_NAME(sched_switch_stats[SWT_OWEPREEMPT]), "");
94 SCHED_STAT_DEFINE_VAR(turnstile,
95 &DPCPU_NAME(sched_switch_stats[SWT_TURNSTILE]), "");
96 SCHED_STAT_DEFINE_VAR(sleepq,
97 &DPCPU_NAME(sched_switch_stats[SWT_SLEEPQ]), "");
98 SCHED_STAT_DEFINE_VAR(sleepqtimo,
99 &DPCPU_NAME(sched_switch_stats[SWT_SLEEPQTIMO]), "");
100 SCHED_STAT_DEFINE_VAR(relinquish,
101 &DPCPU_NAME(sched_switch_stats[SWT_RELINQUISH]), "");
102 SCHED_STAT_DEFINE_VAR(needresched,
103 &DPCPU_NAME(sched_switch_stats[SWT_NEEDRESCHED]), "");
104 SCHED_STAT_DEFINE_VAR(idle,
105 &DPCPU_NAME(sched_switch_stats[SWT_IDLE]), "");
106 SCHED_STAT_DEFINE_VAR(iwait,
107 &DPCPU_NAME(sched_switch_stats[SWT_IWAIT]), "");
108 SCHED_STAT_DEFINE_VAR(suspend,
109 &DPCPU_NAME(sched_switch_stats[SWT_SUSPEND]), "");
110 SCHED_STAT_DEFINE_VAR(remotepreempt,
111 &DPCPU_NAME(sched_switch_stats[SWT_REMOTEPREEMPT]), "");
112 SCHED_STAT_DEFINE_VAR(remotewakeidle,
113 &DPCPU_NAME(sched_switch_stats[SWT_REMOTEWAKEIDLE]), "");
116 sysctl_stats_reset(SYSCTL_HANDLER_ARGS)
118 struct sysctl_oid *p;
125 error = sysctl_handle_int(oidp, &val, 0, req);
126 if (error != 0 || req->newptr == NULL)
131 * Traverse the list of children of _kern_sched_stats and reset each
132 * to 0. Skip the reset entry.
134 SLIST_FOREACH(p, oidp->oid_parent, oid_link) {
135 if (p == oidp || p->oid_arg1 == NULL)
137 counter = (uintptr_t)p->oid_arg1;
139 *(long *)(dpcpu_off[i] + counter) = 0;
145 SYSCTL_PROC(_kern_sched_stats, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_WR, NULL,
146 0, sysctl_stats_reset, "I", "Reset scheduler statistics");
149 /************************************************************************
150 * Functions that manipulate runnability from a thread perspective. *
151 ************************************************************************/
153 * Select the thread that will be run next.
156 static __noinline struct thread *
157 choosethread_panic(struct thread *td)
161 * If we are in panic, only allow system threads,
162 * plus the one we are running in, to be run.
165 if (((td->td_proc->p_flag & P_SYSTEM) == 0 &&
166 (td->td_flags & TDF_INPANIC) == 0)) {
167 /* note that it is no longer on the run queue */
184 if (__predict_false(panicstr != NULL))
185 return (choosethread_panic(td));
192 * Kernel thread preemption implementation. Critical sections mark
193 * regions of code in which preemptions are not allowed.
195 * It might seem a good idea to inline critical_enter() but, in order
196 * to prevent instructions reordering by the compiler, a __compiler_membar()
197 * would have to be used here (the same as sched_pin()). The performance
198 * penalty imposed by the membar could, then, produce slower code than
199 * the function call itself, for most cases.
202 critical_enter_KBI(void)
205 struct thread *td = curthread;
208 CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
209 (long)td->td_proc->p_pid, td->td_name, td->td_critnest);
213 critical_exit_preempt(void)
219 * If td_critnest is 0, it is possible that we are going to get
220 * preempted again before reaching the code below. This happens
221 * rarely and is harmless. However, this means td_owepreempt may
225 if (td->td_critnest != 0)
231 * Microoptimization: we committed to switch,
232 * disable preemption in interrupt handlers
233 * while spinning for the thread lock.
238 flags = SW_INVOL | SW_PREEMPT;
239 if (TD_IS_IDLETHREAD(td))
242 flags |= SWT_OWEPREEMPT;
247 critical_exit_KBI(void)
250 struct thread *td = curthread;
253 CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
254 (long)td->td_proc->p_pid, td->td_name, td->td_critnest);
257 /************************************************************************
258 * SYSTEM RUN QUEUE manipulations and tests *
259 ************************************************************************/
261 * Initialize a run structure.
264 runq_init(struct runq *rq)
268 bzero(rq, sizeof *rq);
269 for (i = 0; i < RQ_NQS; i++)
270 TAILQ_INIT(&rq->rq_queues[i]);
274 * Clear the status bit of the queue corresponding to priority level pri,
275 * indicating that it is empty.
278 runq_clrbit(struct runq *rq, int pri)
282 rqb = &rq->rq_status;
283 CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
284 rqb->rqb_bits[RQB_WORD(pri)],
285 rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
286 RQB_BIT(pri), RQB_WORD(pri));
287 rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
291 * Find the index of the first non-empty run queue. This is done by
292 * scanning the status bits, a set bit indicates a non-empty queue.
295 runq_findbit(struct runq *rq)
301 rqb = &rq->rq_status;
302 for (i = 0; i < RQB_LEN; i++)
303 if (rqb->rqb_bits[i]) {
304 pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
305 CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
306 rqb->rqb_bits[i], i, pri);
314 runq_findbit_from(struct runq *rq, u_char pri)
321 * Set the mask for the first word so we ignore priorities before 'pri'.
323 mask = (rqb_word_t)-1 << (pri & (RQB_BPW - 1));
324 rqb = &rq->rq_status;
326 for (i = RQB_WORD(pri); i < RQB_LEN; mask = -1, i++) {
327 mask = rqb->rqb_bits[i] & mask;
330 pri = RQB_FFS(mask) + (i << RQB_L2BPW);
331 CTR3(KTR_RUNQ, "runq_findbit_from: bits=%#x i=%d pri=%d",
338 * Wrap back around to the beginning of the list just once so we
339 * scan the whole thing.
346 * Set the status bit of the queue corresponding to priority level pri,
347 * indicating that it is non-empty.
350 runq_setbit(struct runq *rq, int pri)
354 rqb = &rq->rq_status;
355 CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
356 rqb->rqb_bits[RQB_WORD(pri)],
357 rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
358 RQB_BIT(pri), RQB_WORD(pri));
359 rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
363 * Add the thread to the queue specified by its priority, and set the
364 * corresponding status bit.
367 runq_add(struct runq *rq, struct thread *td, int flags)
372 pri = td->td_priority / RQ_PPQ;
373 td->td_rqindex = pri;
374 runq_setbit(rq, pri);
375 rqh = &rq->rq_queues[pri];
376 CTR4(KTR_RUNQ, "runq_add: td=%p pri=%d %d rqh=%p",
377 td, td->td_priority, pri, rqh);
378 if (flags & SRQ_PREEMPTED) {
379 TAILQ_INSERT_HEAD(rqh, td, td_runq);
381 TAILQ_INSERT_TAIL(rqh, td, td_runq);
386 runq_add_pri(struct runq *rq, struct thread *td, u_char pri, int flags)
390 KASSERT(pri < RQ_NQS, ("runq_add_pri: %d out of range", pri));
391 td->td_rqindex = pri;
392 runq_setbit(rq, pri);
393 rqh = &rq->rq_queues[pri];
394 CTR4(KTR_RUNQ, "runq_add_pri: td=%p pri=%d idx=%d rqh=%p",
395 td, td->td_priority, pri, rqh);
396 if (flags & SRQ_PREEMPTED) {
397 TAILQ_INSERT_HEAD(rqh, td, td_runq);
399 TAILQ_INSERT_TAIL(rqh, td, td_runq);
403 * Return true if there are runnable processes of any priority on the run
404 * queue, false otherwise. Has no side effects, does not modify the run
408 runq_check(struct runq *rq)
413 rqb = &rq->rq_status;
414 for (i = 0; i < RQB_LEN; i++)
415 if (rqb->rqb_bits[i]) {
416 CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
417 rqb->rqb_bits[i], i);
420 CTR0(KTR_RUNQ, "runq_check: empty");
426 * Find the highest priority process on the run queue.
429 runq_choose_fuzz(struct runq *rq, int fuzz)
435 while ((pri = runq_findbit(rq)) != -1) {
436 rqh = &rq->rq_queues[pri];
437 /* fuzz == 1 is normal.. 0 or less are ignored */
440 * In the first couple of entries, check if
441 * there is one for our CPU as a preference.
444 int cpu = PCPU_GET(cpuid);
446 td2 = td = TAILQ_FIRST(rqh);
448 while (count-- && td2) {
449 if (td2->td_lastcpu == cpu) {
453 td2 = TAILQ_NEXT(td2, td_runq);
456 td = TAILQ_FIRST(rqh);
457 KASSERT(td != NULL, ("runq_choose_fuzz: no proc on busy queue"));
459 "runq_choose_fuzz: pri=%d thread=%p rqh=%p", pri, td, rqh);
462 CTR1(KTR_RUNQ, "runq_choose_fuzz: idleproc pri=%d", pri);
468 * Find the highest priority process on the run queue.
471 runq_choose(struct runq *rq)
477 while ((pri = runq_findbit(rq)) != -1) {
478 rqh = &rq->rq_queues[pri];
479 td = TAILQ_FIRST(rqh);
480 KASSERT(td != NULL, ("runq_choose: no thread on busy queue"));
482 "runq_choose: pri=%d thread=%p rqh=%p", pri, td, rqh);
485 CTR1(KTR_RUNQ, "runq_choose: idlethread pri=%d", pri);
491 runq_choose_from(struct runq *rq, u_char idx)
497 if ((pri = runq_findbit_from(rq, idx)) != -1) {
498 rqh = &rq->rq_queues[pri];
499 td = TAILQ_FIRST(rqh);
500 KASSERT(td != NULL, ("runq_choose: no thread on busy queue"));
502 "runq_choose_from: pri=%d thread=%p idx=%d rqh=%p",
503 pri, td, td->td_rqindex, rqh);
506 CTR1(KTR_RUNQ, "runq_choose_from: idlethread pri=%d", pri);
511 * Remove the thread from the queue specified by its priority, and clear the
512 * corresponding status bit if the queue becomes empty.
513 * Caller must set state afterwards.
516 runq_remove(struct runq *rq, struct thread *td)
519 runq_remove_idx(rq, td, NULL);
523 runq_remove_idx(struct runq *rq, struct thread *td, u_char *idx)
528 KASSERT(td->td_flags & TDF_INMEM,
529 ("runq_remove_idx: thread swapped out"));
530 pri = td->td_rqindex;
531 KASSERT(pri < RQ_NQS, ("runq_remove_idx: Invalid index %d\n", pri));
532 rqh = &rq->rq_queues[pri];
533 CTR4(KTR_RUNQ, "runq_remove_idx: td=%p, pri=%d %d rqh=%p",
534 td, td->td_priority, pri, rqh);
535 TAILQ_REMOVE(rqh, td, td_runq);
536 if (TAILQ_EMPTY(rqh)) {
537 CTR0(KTR_RUNQ, "runq_remove_idx: empty");
538 runq_clrbit(rq, pri);
539 if (idx != NULL && *idx == pri)
540 *idx = (pri + 1) % RQ_NQS;