2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include "opt_sched.h"
34 #include <sys/param.h>
35 #include <sys/systm.h>
37 #include <sys/kernel.h>
40 #include <sys/mutex.h>
42 #include <sys/queue.h>
43 #include <sys/sched.h>
45 #include <sys/sysctl.h>
47 #include <machine/cpu.h>
49 /* Uncomment this to enable logging of critical_enter/exit. */
51 #define KTR_CRITICAL KTR_SCHED
53 #define KTR_CRITICAL 0
56 #ifdef FULL_PREEMPTION
58 #error "The FULL_PREEMPTION option requires the PREEMPTION option"
62 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
65 * kern.sched.preemption allows user space to determine if preemption support
66 * is compiled in or not. It is not currently a boot or runtime flag that
70 static int kern_sched_preemption = 1;
72 static int kern_sched_preemption = 0;
74 SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
75 &kern_sched_preemption, 0, "Kernel preemption enabled");
78 * Support for scheduler stats exported via kern.sched.stats. All stats may
79 * be reset with kern.sched.stats.reset = 1. Stats may be defined elsewhere
80 * with SCHED_STAT_DEFINE().
83 SYSCTL_NODE(_kern_sched, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
86 /* Switch reasons from mi_switch(9). */
87 DPCPU_DEFINE(long, sched_switch_stats[SWT_COUNT]);
88 SCHED_STAT_DEFINE_VAR(owepreempt,
89 &DPCPU_NAME(sched_switch_stats[SWT_OWEPREEMPT]), "");
90 SCHED_STAT_DEFINE_VAR(turnstile,
91 &DPCPU_NAME(sched_switch_stats[SWT_TURNSTILE]), "");
92 SCHED_STAT_DEFINE_VAR(sleepq,
93 &DPCPU_NAME(sched_switch_stats[SWT_SLEEPQ]), "");
94 SCHED_STAT_DEFINE_VAR(relinquish,
95 &DPCPU_NAME(sched_switch_stats[SWT_RELINQUISH]), "");
96 SCHED_STAT_DEFINE_VAR(needresched,
97 &DPCPU_NAME(sched_switch_stats[SWT_NEEDRESCHED]), "");
98 SCHED_STAT_DEFINE_VAR(idle,
99 &DPCPU_NAME(sched_switch_stats[SWT_IDLE]), "");
100 SCHED_STAT_DEFINE_VAR(iwait,
101 &DPCPU_NAME(sched_switch_stats[SWT_IWAIT]), "");
102 SCHED_STAT_DEFINE_VAR(suspend,
103 &DPCPU_NAME(sched_switch_stats[SWT_SUSPEND]), "");
104 SCHED_STAT_DEFINE_VAR(remotepreempt,
105 &DPCPU_NAME(sched_switch_stats[SWT_REMOTEPREEMPT]), "");
106 SCHED_STAT_DEFINE_VAR(remotewakeidle,
107 &DPCPU_NAME(sched_switch_stats[SWT_REMOTEWAKEIDLE]), "");
108 SCHED_STAT_DEFINE_VAR(bind,
109 &DPCPU_NAME(sched_switch_stats[SWT_BIND]), "");
112 sysctl_stats_reset(SYSCTL_HANDLER_ARGS)
114 struct sysctl_oid *p;
121 error = sysctl_handle_int(oidp, &val, 0, req);
122 if (error != 0 || req->newptr == NULL)
127 * Traverse the list of children of _kern_sched_stats and reset each
128 * to 0. Skip the reset entry.
130 RB_FOREACH(p, sysctl_oid_list, oidp->oid_parent) {
131 if (p == oidp || p->oid_arg1 == NULL)
133 counter = (uintptr_t)p->oid_arg1;
135 *(long *)(dpcpu_off[i] + counter) = 0;
141 SYSCTL_PROC(_kern_sched_stats, OID_AUTO, reset,
142 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_MPSAFE, NULL, 0,
143 sysctl_stats_reset, "I",
144 "Reset scheduler statistics");
147 /************************************************************************
148 * Functions that manipulate runnability from a thread perspective. *
149 ************************************************************************/
151 * Select the thread that will be run next.
154 static __noinline struct thread *
155 choosethread_panic(struct thread *td)
159 * If we are in panic, only allow system threads,
160 * plus the one we are running in, to be run.
163 if (((td->td_proc->p_flag & P_SYSTEM) == 0 &&
164 (td->td_flags & TDF_INPANIC) == 0)) {
165 /* note that it is no longer on the run queue */
182 if (KERNEL_PANICKED())
183 return (choosethread_panic(td));
190 * Kernel thread preemption implementation. Critical sections mark
191 * regions of code in which preemptions are not allowed.
193 * It might seem a good idea to inline critical_enter() but, in order
194 * to prevent instructions reordering by the compiler, a __compiler_membar()
195 * would have to be used here (the same as sched_pin()). The performance
196 * penalty imposed by the membar could, then, produce slower code than
197 * the function call itself, for most cases.
200 critical_enter_KBI(void)
203 struct thread *td = curthread;
206 CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
207 (long)td->td_proc->p_pid, td->td_name, td->td_critnest);
211 critical_exit_preempt(void)
217 * If td_critnest is 0, it is possible that we are going to get
218 * preempted again before reaching the code below. This happens
219 * rarely and is harmless. However, this means td_owepreempt may
223 if (td->td_critnest != 0)
229 * Microoptimization: we committed to switch,
230 * disable preemption in interrupt handlers
231 * while spinning for the thread lock.
236 flags = SW_INVOL | SW_PREEMPT;
237 if (TD_IS_IDLETHREAD(td))
240 flags |= SWT_OWEPREEMPT;
245 critical_exit_KBI(void)
248 struct thread *td = curthread;
251 CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
252 (long)td->td_proc->p_pid, td->td_name, td->td_critnest);
255 /************************************************************************
256 * SYSTEM RUN QUEUE manipulations and tests *
257 ************************************************************************/
259 * Initialize a run structure.
262 runq_init(struct runq *rq)
266 bzero(rq, sizeof *rq);
267 for (i = 0; i < RQ_NQS; i++)
268 TAILQ_INIT(&rq->rq_queues[i]);
272 * Clear the status bit of the queue corresponding to priority level pri,
273 * indicating that it is empty.
276 runq_clrbit(struct runq *rq, int pri)
280 rqb = &rq->rq_status;
281 CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
282 rqb->rqb_bits[RQB_WORD(pri)],
283 rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
284 RQB_BIT(pri), RQB_WORD(pri));
285 rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
289 * Find the index of the first non-empty run queue. This is done by
290 * scanning the status bits, a set bit indicates a non-empty queue.
293 runq_findbit(struct runq *rq)
299 rqb = &rq->rq_status;
300 for (i = 0; i < RQB_LEN; i++)
301 if (rqb->rqb_bits[i]) {
302 pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
303 CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
304 rqb->rqb_bits[i], i, pri);
312 runq_findbit_from(struct runq *rq, u_char pri)
319 * Set the mask for the first word so we ignore priorities before 'pri'.
321 mask = (rqb_word_t)-1 << (pri & (RQB_BPW - 1));
322 rqb = &rq->rq_status;
324 for (i = RQB_WORD(pri); i < RQB_LEN; mask = -1, i++) {
325 mask = rqb->rqb_bits[i] & mask;
328 pri = RQB_FFS(mask) + (i << RQB_L2BPW);
329 CTR3(KTR_RUNQ, "runq_findbit_from: bits=%#x i=%d pri=%d",
336 * Wrap back around to the beginning of the list just once so we
337 * scan the whole thing.
344 * Set the status bit of the queue corresponding to priority level pri,
345 * indicating that it is non-empty.
348 runq_setbit(struct runq *rq, int pri)
352 rqb = &rq->rq_status;
353 CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
354 rqb->rqb_bits[RQB_WORD(pri)],
355 rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
356 RQB_BIT(pri), RQB_WORD(pri));
357 rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
361 * Add the thread to the queue specified by its priority, and set the
362 * corresponding status bit.
365 runq_add(struct runq *rq, struct thread *td, int flags)
370 pri = td->td_priority / RQ_PPQ;
371 td->td_rqindex = pri;
372 runq_setbit(rq, pri);
373 rqh = &rq->rq_queues[pri];
374 CTR4(KTR_RUNQ, "runq_add: td=%p pri=%d %d rqh=%p",
375 td, td->td_priority, pri, rqh);
376 if (flags & SRQ_PREEMPTED) {
377 TAILQ_INSERT_HEAD(rqh, td, td_runq);
379 TAILQ_INSERT_TAIL(rqh, td, td_runq);
384 runq_add_pri(struct runq *rq, struct thread *td, u_char pri, int flags)
388 KASSERT(pri < RQ_NQS, ("runq_add_pri: %d out of range", pri));
389 td->td_rqindex = pri;
390 runq_setbit(rq, pri);
391 rqh = &rq->rq_queues[pri];
392 CTR4(KTR_RUNQ, "runq_add_pri: td=%p pri=%d idx=%d rqh=%p",
393 td, td->td_priority, pri, rqh);
394 if (flags & SRQ_PREEMPTED) {
395 TAILQ_INSERT_HEAD(rqh, td, td_runq);
397 TAILQ_INSERT_TAIL(rqh, td, td_runq);
401 * Return true if there are runnable processes of any priority on the run
402 * queue, false otherwise. Has no side effects, does not modify the run
406 runq_check(struct runq *rq)
411 rqb = &rq->rq_status;
412 for (i = 0; i < RQB_LEN; i++)
413 if (rqb->rqb_bits[i]) {
414 CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
415 rqb->rqb_bits[i], i);
418 CTR0(KTR_RUNQ, "runq_check: empty");
424 * Find the highest priority process on the run queue.
427 runq_choose_fuzz(struct runq *rq, int fuzz)
433 while ((pri = runq_findbit(rq)) != -1) {
434 rqh = &rq->rq_queues[pri];
435 /* fuzz == 1 is normal.. 0 or less are ignored */
438 * In the first couple of entries, check if
439 * there is one for our CPU as a preference.
442 int cpu = PCPU_GET(cpuid);
444 td2 = td = TAILQ_FIRST(rqh);
446 while (count-- && td2) {
447 if (td2->td_lastcpu == cpu) {
451 td2 = TAILQ_NEXT(td2, td_runq);
454 td = TAILQ_FIRST(rqh);
455 KASSERT(td != NULL, ("runq_choose_fuzz: no proc on busy queue"));
457 "runq_choose_fuzz: pri=%d thread=%p rqh=%p", pri, td, rqh);
460 CTR1(KTR_RUNQ, "runq_choose_fuzz: idleproc pri=%d", pri);
466 * Find the highest priority process on the run queue.
469 runq_choose(struct runq *rq)
475 while ((pri = runq_findbit(rq)) != -1) {
476 rqh = &rq->rq_queues[pri];
477 td = TAILQ_FIRST(rqh);
478 KASSERT(td != NULL, ("runq_choose: no thread on busy queue"));
480 "runq_choose: pri=%d thread=%p rqh=%p", pri, td, rqh);
483 CTR1(KTR_RUNQ, "runq_choose: idlethread pri=%d", pri);
489 runq_choose_from(struct runq *rq, u_char idx)
495 if ((pri = runq_findbit_from(rq, idx)) != -1) {
496 rqh = &rq->rq_queues[pri];
497 td = TAILQ_FIRST(rqh);
498 KASSERT(td != NULL, ("runq_choose: no thread on busy queue"));
500 "runq_choose_from: pri=%d thread=%p idx=%d rqh=%p",
501 pri, td, td->td_rqindex, rqh);
504 CTR1(KTR_RUNQ, "runq_choose_from: idlethread pri=%d", pri);
509 * Remove the thread from the queue specified by its priority, and clear the
510 * corresponding status bit if the queue becomes empty.
511 * Caller must set state afterwards.
514 runq_remove(struct runq *rq, struct thread *td)
517 runq_remove_idx(rq, td, NULL);
521 runq_remove_idx(struct runq *rq, struct thread *td, u_char *idx)
526 KASSERT(td->td_flags & TDF_INMEM,
527 ("runq_remove_idx: thread swapped out"));
528 pri = td->td_rqindex;
529 KASSERT(pri < RQ_NQS, ("runq_remove_idx: Invalid index %d\n", pri));
530 rqh = &rq->rq_queues[pri];
531 CTR4(KTR_RUNQ, "runq_remove_idx: td=%p, pri=%d %d rqh=%p",
532 td, td->td_priority, pri, rqh);
533 TAILQ_REMOVE(rqh, td, td_runq);
534 if (TAILQ_EMPTY(rqh)) {
535 CTR0(KTR_RUNQ, "runq_remove_idx: empty");
536 runq_clrbit(rq, pri);
537 if (idx != NULL && *idx == pri)
538 *idx = (pri + 1) % RQ_NQS;