2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include "opt_sched.h"
34 #include <sys/param.h>
35 #include <sys/systm.h>
37 #include <sys/kernel.h>
40 #include <sys/mutex.h>
42 #include <sys/queue.h>
43 #include <sys/sched.h>
45 #include <sys/sysctl.h>
47 #include <machine/cpu.h>
49 /* Uncomment this to enable logging of critical_enter/exit. */
51 #define KTR_CRITICAL KTR_SCHED
53 #define KTR_CRITICAL 0
56 #ifdef FULL_PREEMPTION
58 #error "The FULL_PREEMPTION option requires the PREEMPTION option"
62 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
65 * kern.sched.preemption allows user space to determine if preemption support
66 * is compiled in or not. It is not currently a boot or runtime flag that
70 static int kern_sched_preemption = 1;
72 static int kern_sched_preemption = 0;
74 SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
75 &kern_sched_preemption, 0, "Kernel preemption enabled");
78 * Support for scheduler stats exported via kern.sched.stats. All stats may
79 * be reset with kern.sched.stats.reset = 1. Stats may be defined elsewhere
80 * with SCHED_STAT_DEFINE().
83 SYSCTL_NODE(_kern_sched, OID_AUTO, stats, CTLFLAG_RW, 0, "switch stats");
85 /* Switch reasons from mi_switch(). */
86 DPCPU_DEFINE(long, sched_switch_stats[SWT_COUNT]);
87 SCHED_STAT_DEFINE_VAR(uncategorized,
88 &DPCPU_NAME(sched_switch_stats[SWT_NONE]), "");
89 SCHED_STAT_DEFINE_VAR(preempt,
90 &DPCPU_NAME(sched_switch_stats[SWT_PREEMPT]), "");
91 SCHED_STAT_DEFINE_VAR(owepreempt,
92 &DPCPU_NAME(sched_switch_stats[SWT_OWEPREEMPT]), "");
93 SCHED_STAT_DEFINE_VAR(turnstile,
94 &DPCPU_NAME(sched_switch_stats[SWT_TURNSTILE]), "");
95 SCHED_STAT_DEFINE_VAR(sleepq,
96 &DPCPU_NAME(sched_switch_stats[SWT_SLEEPQ]), "");
97 SCHED_STAT_DEFINE_VAR(sleepqtimo,
98 &DPCPU_NAME(sched_switch_stats[SWT_SLEEPQTIMO]), "");
99 SCHED_STAT_DEFINE_VAR(relinquish,
100 &DPCPU_NAME(sched_switch_stats[SWT_RELINQUISH]), "");
101 SCHED_STAT_DEFINE_VAR(needresched,
102 &DPCPU_NAME(sched_switch_stats[SWT_NEEDRESCHED]), "");
103 SCHED_STAT_DEFINE_VAR(idle,
104 &DPCPU_NAME(sched_switch_stats[SWT_IDLE]), "");
105 SCHED_STAT_DEFINE_VAR(iwait,
106 &DPCPU_NAME(sched_switch_stats[SWT_IWAIT]), "");
107 SCHED_STAT_DEFINE_VAR(suspend,
108 &DPCPU_NAME(sched_switch_stats[SWT_SUSPEND]), "");
109 SCHED_STAT_DEFINE_VAR(remotepreempt,
110 &DPCPU_NAME(sched_switch_stats[SWT_REMOTEPREEMPT]), "");
111 SCHED_STAT_DEFINE_VAR(remotewakeidle,
112 &DPCPU_NAME(sched_switch_stats[SWT_REMOTEWAKEIDLE]), "");
115 sysctl_stats_reset(SYSCTL_HANDLER_ARGS)
117 struct sysctl_oid *p;
124 error = sysctl_handle_int(oidp, &val, 0, req);
125 if (error != 0 || req->newptr == NULL)
130 * Traverse the list of children of _kern_sched_stats and reset each
131 * to 0. Skip the reset entry.
133 SLIST_FOREACH(p, oidp->oid_parent, oid_link) {
134 if (p == oidp || p->oid_arg1 == NULL)
136 counter = (uintptr_t)p->oid_arg1;
138 *(long *)(dpcpu_off[i] + counter) = 0;
144 SYSCTL_PROC(_kern_sched_stats, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_WR, NULL,
145 0, sysctl_stats_reset, "I", "Reset scheduler statistics");
148 /************************************************************************
149 * Functions that manipulate runnability from a thread perspective. *
150 ************************************************************************/
152 * Select the thread that will be run next.
155 static __noinline struct thread *
156 choosethread_panic(struct thread *td)
160 * If we are in panic, only allow system threads,
161 * plus the one we are running in, to be run.
164 if (((td->td_proc->p_flag & P_SYSTEM) == 0 &&
165 (td->td_flags & TDF_INPANIC) == 0)) {
166 /* note that it is no longer on the run queue */
183 if (KERNEL_PANICKED())
184 return (choosethread_panic(td));
191 * Kernel thread preemption implementation. Critical sections mark
192 * regions of code in which preemptions are not allowed.
194 * It might seem a good idea to inline critical_enter() but, in order
195 * to prevent instructions reordering by the compiler, a __compiler_membar()
196 * would have to be used here (the same as sched_pin()). The performance
197 * penalty imposed by the membar could, then, produce slower code than
198 * the function call itself, for most cases.
201 critical_enter_KBI(void)
204 struct thread *td = curthread;
207 CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
208 (long)td->td_proc->p_pid, td->td_name, td->td_critnest);
212 critical_exit_preempt(void)
218 * If td_critnest is 0, it is possible that we are going to get
219 * preempted again before reaching the code below. This happens
220 * rarely and is harmless. However, this means td_owepreempt may
224 if (td->td_critnest != 0)
230 * Microoptimization: we committed to switch,
231 * disable preemption in interrupt handlers
232 * while spinning for the thread lock.
237 flags = SW_INVOL | SW_PREEMPT;
238 if (TD_IS_IDLETHREAD(td))
241 flags |= SWT_OWEPREEMPT;
246 critical_exit_KBI(void)
249 struct thread *td = curthread;
252 CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
253 (long)td->td_proc->p_pid, td->td_name, td->td_critnest);
256 /************************************************************************
257 * SYSTEM RUN QUEUE manipulations and tests *
258 ************************************************************************/
260 * Initialize a run structure.
263 runq_init(struct runq *rq)
267 bzero(rq, sizeof *rq);
268 for (i = 0; i < RQ_NQS; i++)
269 TAILQ_INIT(&rq->rq_queues[i]);
273 * Clear the status bit of the queue corresponding to priority level pri,
274 * indicating that it is empty.
277 runq_clrbit(struct runq *rq, int pri)
281 rqb = &rq->rq_status;
282 CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
283 rqb->rqb_bits[RQB_WORD(pri)],
284 rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
285 RQB_BIT(pri), RQB_WORD(pri));
286 rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
290 * Find the index of the first non-empty run queue. This is done by
291 * scanning the status bits, a set bit indicates a non-empty queue.
294 runq_findbit(struct runq *rq)
300 rqb = &rq->rq_status;
301 for (i = 0; i < RQB_LEN; i++)
302 if (rqb->rqb_bits[i]) {
303 pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
304 CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
305 rqb->rqb_bits[i], i, pri);
313 runq_findbit_from(struct runq *rq, u_char pri)
320 * Set the mask for the first word so we ignore priorities before 'pri'.
322 mask = (rqb_word_t)-1 << (pri & (RQB_BPW - 1));
323 rqb = &rq->rq_status;
325 for (i = RQB_WORD(pri); i < RQB_LEN; mask = -1, i++) {
326 mask = rqb->rqb_bits[i] & mask;
329 pri = RQB_FFS(mask) + (i << RQB_L2BPW);
330 CTR3(KTR_RUNQ, "runq_findbit_from: bits=%#x i=%d pri=%d",
337 * Wrap back around to the beginning of the list just once so we
338 * scan the whole thing.
345 * Set the status bit of the queue corresponding to priority level pri,
346 * indicating that it is non-empty.
349 runq_setbit(struct runq *rq, int pri)
353 rqb = &rq->rq_status;
354 CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
355 rqb->rqb_bits[RQB_WORD(pri)],
356 rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
357 RQB_BIT(pri), RQB_WORD(pri));
358 rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
362 * Add the thread to the queue specified by its priority, and set the
363 * corresponding status bit.
366 runq_add(struct runq *rq, struct thread *td, int flags)
371 pri = td->td_priority / RQ_PPQ;
372 td->td_rqindex = pri;
373 runq_setbit(rq, pri);
374 rqh = &rq->rq_queues[pri];
375 CTR4(KTR_RUNQ, "runq_add: td=%p pri=%d %d rqh=%p",
376 td, td->td_priority, pri, rqh);
377 if (flags & SRQ_PREEMPTED) {
378 TAILQ_INSERT_HEAD(rqh, td, td_runq);
380 TAILQ_INSERT_TAIL(rqh, td, td_runq);
385 runq_add_pri(struct runq *rq, struct thread *td, u_char pri, int flags)
389 KASSERT(pri < RQ_NQS, ("runq_add_pri: %d out of range", pri));
390 td->td_rqindex = pri;
391 runq_setbit(rq, pri);
392 rqh = &rq->rq_queues[pri];
393 CTR4(KTR_RUNQ, "runq_add_pri: td=%p pri=%d idx=%d rqh=%p",
394 td, td->td_priority, pri, rqh);
395 if (flags & SRQ_PREEMPTED) {
396 TAILQ_INSERT_HEAD(rqh, td, td_runq);
398 TAILQ_INSERT_TAIL(rqh, td, td_runq);
402 * Return true if there are runnable processes of any priority on the run
403 * queue, false otherwise. Has no side effects, does not modify the run
407 runq_check(struct runq *rq)
412 rqb = &rq->rq_status;
413 for (i = 0; i < RQB_LEN; i++)
414 if (rqb->rqb_bits[i]) {
415 CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
416 rqb->rqb_bits[i], i);
419 CTR0(KTR_RUNQ, "runq_check: empty");
425 * Find the highest priority process on the run queue.
428 runq_choose_fuzz(struct runq *rq, int fuzz)
434 while ((pri = runq_findbit(rq)) != -1) {
435 rqh = &rq->rq_queues[pri];
436 /* fuzz == 1 is normal.. 0 or less are ignored */
439 * In the first couple of entries, check if
440 * there is one for our CPU as a preference.
443 int cpu = PCPU_GET(cpuid);
445 td2 = td = TAILQ_FIRST(rqh);
447 while (count-- && td2) {
448 if (td2->td_lastcpu == cpu) {
452 td2 = TAILQ_NEXT(td2, td_runq);
455 td = TAILQ_FIRST(rqh);
456 KASSERT(td != NULL, ("runq_choose_fuzz: no proc on busy queue"));
458 "runq_choose_fuzz: pri=%d thread=%p rqh=%p", pri, td, rqh);
461 CTR1(KTR_RUNQ, "runq_choose_fuzz: idleproc pri=%d", pri);
467 * Find the highest priority process on the run queue.
470 runq_choose(struct runq *rq)
476 while ((pri = runq_findbit(rq)) != -1) {
477 rqh = &rq->rq_queues[pri];
478 td = TAILQ_FIRST(rqh);
479 KASSERT(td != NULL, ("runq_choose: no thread on busy queue"));
481 "runq_choose: pri=%d thread=%p rqh=%p", pri, td, rqh);
484 CTR1(KTR_RUNQ, "runq_choose: idlethread pri=%d", pri);
490 runq_choose_from(struct runq *rq, u_char idx)
496 if ((pri = runq_findbit_from(rq, idx)) != -1) {
497 rqh = &rq->rq_queues[pri];
498 td = TAILQ_FIRST(rqh);
499 KASSERT(td != NULL, ("runq_choose: no thread on busy queue"));
501 "runq_choose_from: pri=%d thread=%p idx=%d rqh=%p",
502 pri, td, td->td_rqindex, rqh);
505 CTR1(KTR_RUNQ, "runq_choose_from: idlethread pri=%d", pri);
510 * Remove the thread from the queue specified by its priority, and clear the
511 * corresponding status bit if the queue becomes empty.
512 * Caller must set state afterwards.
515 runq_remove(struct runq *rq, struct thread *td)
518 runq_remove_idx(rq, td, NULL);
522 runq_remove_idx(struct runq *rq, struct thread *td, u_char *idx)
527 KASSERT(td->td_flags & TDF_INMEM,
528 ("runq_remove_idx: thread swapped out"));
529 pri = td->td_rqindex;
530 KASSERT(pri < RQ_NQS, ("runq_remove_idx: Invalid index %d\n", pri));
531 rqh = &rq->rq_queues[pri];
532 CTR4(KTR_RUNQ, "runq_remove_idx: td=%p, pri=%d %d rqh=%p",
533 td, td->td_priority, pri, rqh);
534 TAILQ_REMOVE(rqh, td, td_runq);
535 if (TAILQ_EMPTY(rqh)) {
536 CTR0(KTR_RUNQ, "runq_remove_idx: empty");
537 runq_clrbit(rq, pri);
538 if (idx != NULL && *idx == pri)
539 *idx = (pri + 1) % RQ_NQS;