2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include "opt_sched.h"
33 #ifndef KERN_SWITCH_INCLUDE
34 #include <sys/param.h>
35 #include <sys/systm.h>
37 #include <sys/kernel.h>
40 #include <sys/mutex.h>
42 #include <sys/queue.h>
43 #include <sys/sched.h>
44 #else /* KERN_SWITCH_INCLUDE */
45 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
48 #if defined(SMP) && defined(SCHED_4BSD)
49 #include <sys/sysctl.h>
52 /* Uncomment this to enable logging of critical_enter/exit. */
54 #define KTR_CRITICAL KTR_SCHED
56 #define KTR_CRITICAL 0
59 #ifdef FULL_PREEMPTION
61 #error "The FULL_PREEMPTION option requires the PREEMPTION option"
65 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
68 * kern.sched.preemption allows user space to determine if preemption support
69 * is compiled in or not. It is not currently a boot or runtime flag that
73 static int kern_sched_preemption = 1;
75 static int kern_sched_preemption = 0;
77 SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
78 &kern_sched_preemption, 0, "Kernel preemption enabled");
80 /************************************************************************
81 * Functions that manipulate runnability from a thread perspective. *
82 ************************************************************************/
84 * Select the thread that will be run next.
92 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
93 if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
94 /* Shutting down, run idlethread on AP's */
95 td = PCPU_GET(idlethread);
97 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
98 ts->ts_flags |= TSF_DIDRUN;
108 CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
109 td, td->td_priority);
111 /* Simulate runq_choose() having returned the idle thread */
112 td = PCPU_GET(idlethread);
114 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
116 ts->ts_flags |= TSF_DIDRUN;
119 * If we are in panic, only allow system threads,
120 * plus the one we are running in, to be run.
122 if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
123 (td->td_flags & TDF_INPANIC) == 0)) {
124 /* note that it is no longer on the run queue */
136 * currently not used.. threads remove themselves from the
137 * run queue by running.
140 remrunqueue(struct thread *td)
142 mtx_assert(&sched_lock, MA_OWNED);
143 KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
144 CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
146 /* remove from sys run queue */
153 * Change the priority of a thread that is on the run queue.
156 adjustrunqueue( struct thread *td, int newpri)
160 mtx_assert(&sched_lock, MA_OWNED);
161 KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
164 CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
165 /* We only care about the td_sched in the run queue. */
166 td->td_priority = newpri;
168 if (ts->ts_rqindex != (newpri / RQ_PPQ))
170 if (ts->ts_rqindex != newpri)
174 sched_add(td, SRQ_BORING);
179 setrunqueue(struct thread *td, int flags)
182 CTR2(KTR_RUNQ, "setrunqueue: td:%p pid:%d",
183 td, td->td_proc->p_pid);
184 CTR5(KTR_SCHED, "setrunqueue: %p(%s) prio %d by %p(%s)",
185 td, td->td_proc->p_comm, td->td_priority, curthread,
186 curthread->td_proc->p_comm);
187 mtx_assert(&sched_lock, MA_OWNED);
188 KASSERT((td->td_inhibitors == 0),
189 ("setrunqueue: trying to run inhibitted thread"));
190 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
191 ("setrunqueue: bad thread state"));
193 sched_add(td, flags);
197 * Kernel thread preemption implementation. Critical sections mark
198 * regions of code in which preemptions are not allowed.
207 CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
208 (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
217 KASSERT(td->td_critnest != 0,
218 ("critical_exit: td_critnest == 0"));
220 if (td->td_critnest == 1) {
222 mtx_assert(&sched_lock, MA_NOTOWNED);
223 if (td->td_owepreempt) {
225 mtx_lock_spin(&sched_lock);
227 mi_switch(SW_INVOL, NULL);
228 mtx_unlock_spin(&sched_lock);
234 CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
235 (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
239 * This function is called when a thread is about to be put on run queue
240 * because it has been made runnable or its priority has been adjusted. It
241 * determines if the new thread should be immediately preempted to. If so,
242 * it switches to it and eventually returns true. If not, it returns false
243 * so that the caller may place the thread on an appropriate run queue.
246 maybe_preempt(struct thread *td)
253 mtx_assert(&sched_lock, MA_OWNED);
256 * The new thread should not preempt the current thread if any of the
257 * following conditions are true:
259 * - The kernel is in the throes of crashing (panicstr).
260 * - The current thread has a higher (numerically lower) or
261 * equivalent priority. Note that this prevents curthread from
262 * trying to preempt to itself.
263 * - It is too early in the boot for context switches (cold is set).
264 * - The current thread has an inhibitor set or is in the process of
265 * exiting. In this case, the current thread is about to switch
266 * out anyways, so there's no point in preempting. If we did,
267 * the current thread would not be properly resumed as well, so
268 * just avoid that whole landmine.
269 * - If the new thread's priority is not a realtime priority and
270 * the current thread's priority is not an idle priority and
271 * FULL_PREEMPTION is disabled.
273 * If all of these conditions are false, but the current thread is in
274 * a nested critical section, then we have to defer the preemption
275 * until we exit the critical section. Otherwise, switch immediately
279 KASSERT ((ctd->td_sched != NULL && ctd->td_sched->ts_thread == ctd),
280 ("thread has no (or wrong) sched-private part."));
281 KASSERT((td->td_inhibitors == 0),
282 ("maybe_preempt: trying to run inhibitted thread"));
283 pri = td->td_priority;
284 cpri = ctd->td_priority;
285 if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
286 TD_IS_INHIBITED(ctd) || td->td_sched->ts_state != TSS_THREAD)
288 #ifndef FULL_PREEMPTION
289 if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
293 if (ctd->td_critnest > 1) {
294 CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
296 ctd->td_owepreempt = 1;
301 * Thread is runnable but not yet put on system run queue.
303 MPASS(TD_ON_RUNQ(td));
304 MPASS(td->td_sched->ts_state != TSS_ONRUNQ);
306 CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
307 td->td_proc->p_pid, td->td_proc->p_comm);
308 mi_switch(SW_INVOL|SW_PREEMPT, td);
317 /* XXX: There should be a non-static version of this. */
319 printf_caddr_t(void *data)
321 printf("%s", (char *)data);
323 static char preempt_warning[] =
324 "WARNING: Kernel preemption is disabled, expect reduced performance.\n";
325 SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
330 /************************************************************************
331 * SYSTEM RUN QUEUE manipulations and tests *
332 ************************************************************************/
334 * Initialize a run structure.
337 runq_init(struct runq *rq)
341 bzero(rq, sizeof *rq);
342 for (i = 0; i < RQ_NQS; i++)
343 TAILQ_INIT(&rq->rq_queues[i]);
347 * Clear the status bit of the queue corresponding to priority level pri,
348 * indicating that it is empty.
351 runq_clrbit(struct runq *rq, int pri)
355 rqb = &rq->rq_status;
356 CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
357 rqb->rqb_bits[RQB_WORD(pri)],
358 rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
359 RQB_BIT(pri), RQB_WORD(pri));
360 rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
364 * Find the index of the first non-empty run queue. This is done by
365 * scanning the status bits, a set bit indicates a non-empty queue.
368 runq_findbit(struct runq *rq)
374 rqb = &rq->rq_status;
375 for (i = 0; i < RQB_LEN; i++)
376 if (rqb->rqb_bits[i]) {
377 pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
378 CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
379 rqb->rqb_bits[i], i, pri);
387 * Set the status bit of the queue corresponding to priority level pri,
388 * indicating that it is non-empty.
391 runq_setbit(struct runq *rq, int pri)
395 rqb = &rq->rq_status;
396 CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
397 rqb->rqb_bits[RQB_WORD(pri)],
398 rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
399 RQB_BIT(pri), RQB_WORD(pri));
400 rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
404 * Add the thread to the queue specified by its priority, and set the
405 * corresponding status bit.
408 runq_add(struct runq *rq, struct td_sched *ts, int flags)
413 pri = ts->ts_thread->td_priority / RQ_PPQ;
414 ts->ts_rqindex = pri;
415 runq_setbit(rq, pri);
416 rqh = &rq->rq_queues[pri];
417 CTR5(KTR_RUNQ, "runq_add: td=%p ts=%p pri=%d %d rqh=%p",
418 ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
419 if (flags & SRQ_PREEMPTED) {
420 TAILQ_INSERT_HEAD(rqh, ts, ts_procq);
422 TAILQ_INSERT_TAIL(rqh, ts, ts_procq);
427 * Return true if there are runnable processes of any priority on the run
428 * queue, false otherwise. Has no side effects, does not modify the run
432 runq_check(struct runq *rq)
437 rqb = &rq->rq_status;
438 for (i = 0; i < RQB_LEN; i++)
439 if (rqb->rqb_bits[i]) {
440 CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
441 rqb->rqb_bits[i], i);
444 CTR0(KTR_RUNQ, "runq_check: empty");
449 #if defined(SMP) && defined(SCHED_4BSD)
451 SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
455 * Find the highest priority process on the run queue.
458 runq_choose(struct runq *rq)
464 mtx_assert(&sched_lock, MA_OWNED);
465 while ((pri = runq_findbit(rq)) != -1) {
466 rqh = &rq->rq_queues[pri];
467 #if defined(SMP) && defined(SCHED_4BSD)
468 /* fuzz == 1 is normal.. 0 or less are ignored */
471 * In the first couple of entries, check if
472 * there is one for our CPU as a preference.
474 int count = runq_fuzz;
475 int cpu = PCPU_GET(cpuid);
476 struct td_sched *ts2;
477 ts2 = ts = TAILQ_FIRST(rqh);
479 while (count-- && ts2) {
480 if (ts->ts_thread->td_lastcpu == cpu) {
484 ts2 = TAILQ_NEXT(ts2, ts_procq);
488 ts = TAILQ_FIRST(rqh);
489 KASSERT(ts != NULL, ("runq_choose: no proc on busy queue"));
491 "runq_choose: pri=%d td_sched=%p rqh=%p", pri, ts, rqh);
494 CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
500 * Remove the thread from the queue specified by its priority, and clear the
501 * corresponding status bit if the queue becomes empty.
502 * Caller must set ts->ts_state afterwards.
505 runq_remove(struct runq *rq, struct td_sched *ts)
510 KASSERT(ts->ts_thread->td_proc->p_sflag & PS_INMEM,
511 ("runq_remove: process swapped out"));
512 pri = ts->ts_rqindex;
513 rqh = &rq->rq_queues[pri];
514 CTR5(KTR_RUNQ, "runq_remove: td=%p, ts=%p pri=%d %d rqh=%p",
515 ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
516 KASSERT(ts != NULL, ("runq_remove: no proc on busy queue"));
517 TAILQ_REMOVE(rqh, ts, ts_procq);
518 if (TAILQ_EMPTY(rqh)) {
519 CTR0(KTR_RUNQ, "runq_remove: empty");
520 runq_clrbit(rq, pri);
524 /****** functions that are temporarily here ***********/
526 extern struct mtx kse_zombie_lock;
529 * Allocate scheduler specific per-process resources.
530 * The thread and proc have already been linked in.
533 * proc_init() (UMA init method)
536 sched_newproc(struct proc *p, struct thread *td)
541 * thread is being either created or recycled.
542 * Fix up the per-scheduler resources associated with it.
544 * sched_fork_thread()
545 * thread_dtor() (*may go away)
546 * thread_init() (*may go away)
549 sched_newthread(struct thread *td)
553 ts = (struct td_sched *) (td + 1);
554 bzero(ts, sizeof(*ts));
557 ts->ts_state = TSS_THREAD;
563 * proc_init() (UMA) via sched_newproc()
566 sched_init_concurrency(struct proc *p)
571 * Change the concurrency of an existing proc to N
579 sched_set_concurrency(struct proc *p, int concurrency)
584 * Called from thread_exit() for all exiting thread
586 * Not to be confused with sched_exit_thread()
587 * that is only called from thread_exit() for threads exiting
588 * without the rest of the process exiting because it is also called from
589 * sched_exit() and we wouldn't want to call it twice.
590 * XXX This can probably be fixed.
593 sched_thread_exit(struct thread *td)
597 #endif /* KERN_SWITCH_INCLUDE */