2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 If there are N processors, then there are at most N KSEs (kernel
31 schedulable entities) working to process threads that belong to a
32 KSEGROUP (kg). If there are X of these KSEs actually running at the
33 moment in question, then there are at most M (N-X) of these KSEs on
34 the run queue, as running KSEs are not on the queue.
36 Runnable threads are queued off the KSEGROUP in priority order.
37 If there are M or more threads runnable, the top M threads
38 (by priority) are 'preassigned' to the M KSEs not running. The KSEs take
39 their priority from those threads and are put on the run queue.
41 The last thread that had a priority high enough to have a KSE associated
42 with it, AND IS ON THE RUN QUEUE is pointed to by
43 kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
44 assigned as all the available KSEs are activly running, or because there
45 are no threads queued, that pointer is NULL.
47 When a KSE is removed from the run queue to become runnable, we know
48 it was associated with the highest priority thread in the queue (at the head
49 of the queue). If it is also the last assigned we know M was 1 and must
50 now be 0. Since the thread is no longer queued that pointer must be
51 removed from it. Since we know there were no more KSEs available,
52 (M was 1 and is now 0) and since we are not FREEING our KSE
53 but using it, we know there are STILL no more KSEs available, we can prove
54 that the next thread in the ksegrp list will not have a KSE to assign to
55 it, so we can show that the pointer must be made 'invalid' (NULL).
57 The pointer exists so that when a new thread is made runnable, it can
58 have its priority compared with the last assigned thread to see if
59 it should 'steal' its KSE or not.. i.e. is it 'earlier'
60 on the list than that thread or later.. If it's earlier, then the KSE is
61 removed from the last assigned (which is now not assigned a KSE)
62 and reassigned to the new thread, which is placed earlier in the list.
63 The pointer is then backed up to the previous thread (which may or may not
66 When a thread sleeps or is removed, the KSE becomes available and if there
67 are queued threads that are not assigned KSEs, the highest priority one of
68 them is assigned the KSE, which is then placed back on the run queue at
69 the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
72 The following diagram shows 2 KSEs and 3 threads from a single process.
74 RUNQ: --->KSE---KSE--... (KSEs queued at priorities from threads)
77 KSEGROUP---thread--thread--thread (queued in priority order)
82 The result of this scheme is that the M available KSEs are always
83 queued at the priorities they have inherrited from the M highest priority
84 threads for that KSEGROUP. If this situation changes, the KSEs are
85 reassigned to keep this true.
88 #include <sys/cdefs.h>
89 __FBSDID("$FreeBSD$");
91 #include "opt_sched.h"
93 #ifndef KERN_SWITCH_INCLUDE
94 #include <sys/param.h>
95 #include <sys/systm.h>
97 #include <sys/kernel.h>
100 #include <sys/mutex.h>
101 #include <sys/proc.h>
102 #include <sys/queue.h>
103 #include <sys/sched.h>
104 #else /* KERN_SWITCH_INCLUDE */
105 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
108 #if defined(SMP) && defined(SCHED_4BSD)
109 #include <sys/sysctl.h>
112 /* Uncomment this to enable logging of critical_enter/exit. */
114 #define KTR_CRITICAL KTR_SCHED
116 #define KTR_CRITICAL 0
119 #ifdef FULL_PREEMPTION
121 #error "The FULL_PREEMPTION option requires the PREEMPTION option"
125 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
127 #define td_kse td_sched
130 * kern.sched.preemption allows user space to determine if preemption support
131 * is compiled in or not. It is not currently a boot or runtime flag that
135 static int kern_sched_preemption = 1;
137 static int kern_sched_preemption = 0;
139 SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
140 &kern_sched_preemption, 0, "Kernel preemption enabled");
142 /************************************************************************
143 * Functions that manipulate runnability from a thread perspective. *
144 ************************************************************************/
146 * Select the KSE that will be run next. From that find the thread, and
147 * remove it from the KSEGRP's run queue. If there is thread clustering,
148 * this will be what does it.
157 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
158 if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
159 /* Shutting down, run idlethread on AP's */
160 td = PCPU_GET(idlethread);
162 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
163 ke->ke_flags |= KEF_DIDRUN;
173 KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
175 if (td->td_proc->p_flag & P_HADTHREADS) {
176 if (kg->kg_last_assigned == td) {
177 kg->kg_last_assigned = TAILQ_PREV(td,
178 threadqueue, td_runq);
180 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
182 CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
183 td, td->td_priority);
185 /* Simulate runq_choose() having returned the idle thread */
186 td = PCPU_GET(idlethread);
188 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
190 ke->ke_flags |= KEF_DIDRUN;
193 * If we are in panic, only allow system threads,
194 * plus the one we are running in, to be run.
196 if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
197 (td->td_flags & TDF_INPANIC) == 0)) {
198 /* note that it is no longer on the run queue */
208 * Given a surplus system slot, try assign a new runnable thread to it.
210 * sched_thread_exit() (local)
211 * sched_switch() (local)
212 * sched_thread_exit() (local)
213 * remrunqueue() (local) (not at the moment)
216 slot_fill(struct ksegrp *kg)
220 mtx_assert(&sched_lock, MA_OWNED);
221 while (kg->kg_avail_opennings > 0) {
223 * Find the first unassigned thread
225 if ((td = kg->kg_last_assigned) != NULL)
226 td = TAILQ_NEXT(td, td_runq);
228 td = TAILQ_FIRST(&kg->kg_runq);
231 * If we found one, send it to the system scheduler.
234 kg->kg_last_assigned = td;
235 sched_add(td, SRQ_YIELDING);
236 CTR2(KTR_RUNQ, "slot_fill: td%p -> kg%p", td, kg);
238 /* no threads to use up the slots. quit now */
246 * Remove a thread from its KSEGRP's run queue.
247 * This in turn may remove it from a KSE if it was already assigned
248 * to one, possibly causing a new thread to be assigned to the KSE
249 * and the KSE getting a new priority.
252 remrunqueue(struct thread *td)
254 struct thread *td2, *td3;
258 mtx_assert(&sched_lock, MA_OWNED);
259 KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
262 CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
265 * If it is not a threaded process, take the shortcut.
267 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
268 /* remve from sys run queue and free up a slot */
270 ke->ke_state = KES_THREAD;
273 td3 = TAILQ_PREV(td, threadqueue, td_runq);
274 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
275 if (ke->ke_state == KES_ONRUNQ) {
277 * This thread has been assigned to the system run queue.
278 * We need to dissociate it and try assign the
279 * KSE to the next available thread. Then, we should
280 * see if we need to move the KSE in the run queues.
283 ke->ke_state = KES_THREAD;
284 td2 = kg->kg_last_assigned;
285 KASSERT((td2 != NULL), ("last assigned has wrong value"));
287 kg->kg_last_assigned = td3;
288 /* slot_fill(kg); */ /* will replace it with another */
294 * Change the priority of a thread that is on the run queue.
297 adjustrunqueue( struct thread *td, int newpri)
302 mtx_assert(&sched_lock, MA_OWNED);
303 KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
306 CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
308 * If it is not a threaded process, take the shortcut.
310 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
311 /* We only care about the kse in the run queue. */
312 td->td_priority = newpri;
313 if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
315 sched_add(td, SRQ_BORING);
320 /* It is a threaded process */
322 if (ke->ke_state == KES_ONRUNQ
324 || ((ke->ke_flags & KEF_ASSIGNED) != 0 &&
325 (ke->ke_flags & KEF_REMOVED) == 0)
328 if (kg->kg_last_assigned == td) {
329 kg->kg_last_assigned =
330 TAILQ_PREV(td, threadqueue, td_runq);
334 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
336 td->td_priority = newpri;
337 setrunqueue(td, SRQ_BORING);
341 * This function is called when a thread is about to be put on a
342 * ksegrp run queue because it has been made runnable or its
343 * priority has been adjusted and the ksegrp does not have a
344 * free kse slot. It determines if a thread from the same ksegrp
345 * should be preempted. If so, it tries to switch threads
346 * if the thread is on the same cpu or notifies another cpu that
347 * it should switch threads.
351 maybe_preempt_in_ksegrp(struct thread *td)
354 struct thread *running_thread;
356 mtx_assert(&sched_lock, MA_OWNED);
357 running_thread = curthread;
359 if (running_thread->td_ksegrp != td->td_ksegrp)
362 if (td->td_priority >= running_thread->td_priority)
365 #ifndef FULL_PREEMPTION
366 if (td->td_priority > PRI_MAX_ITHD) {
367 running_thread->td_flags |= TDF_NEEDRESCHED;
370 #endif /* FULL_PREEMPTION */
372 if (running_thread->td_critnest > 1)
373 running_thread->td_owepreempt = 1;
375 mi_switch(SW_INVOL, NULL);
377 #else /* PREEMPTION */
378 running_thread->td_flags |= TDF_NEEDRESCHED;
379 #endif /* PREEMPTION */
385 struct thread *running_thread;
388 cpumask_t cpumask,dontuse;
390 struct pcpu *best_pcpu;
391 struct thread *cputhread;
393 mtx_assert(&sched_lock, MA_OWNED);
395 running_thread = curthread;
397 #if !defined(KSEG_PEEMPT_BEST_CPU)
398 if (running_thread->td_ksegrp != td->td_ksegrp) {
402 /* if someone is ahead of this thread, wait our turn */
403 if (td != TAILQ_FIRST(&kg->kg_runq))
406 worst_pri = td->td_priority;
408 dontuse = stopped_cpus | idle_cpus_mask;
411 * Find a cpu with the worst priority that runs at thread from
412 * the same ksegrp - if multiple exist give first the last run
413 * cpu and then the current cpu priority
416 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
417 cpumask = pc->pc_cpumask;
418 cputhread = pc->pc_curthread;
420 if ((cpumask & dontuse) ||
421 cputhread->td_ksegrp != kg)
424 if (cputhread->td_priority > worst_pri) {
425 worst_pri = cputhread->td_priority;
430 if (cputhread->td_priority == worst_pri &&
432 (td->td_lastcpu == pc->pc_cpuid ||
433 (PCPU_GET(cpumask) == cpumask &&
434 td->td_lastcpu != best_pcpu->pc_cpuid)))
438 /* Check if we need to preempt someone */
439 if (best_pcpu == NULL)
442 #if defined(IPI_PREEMPTION) && defined(PREEMPTION)
443 #if !defined(FULL_PREEMPTION)
444 if (td->td_priority <= PRI_MAX_ITHD)
445 #endif /* ! FULL_PREEMPTION */
447 ipi_selected(best_pcpu->pc_cpumask, IPI_PREEMPT);
450 #endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
452 if (PCPU_GET(cpuid) != best_pcpu->pc_cpuid) {
453 best_pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
454 ipi_selected(best_pcpu->pc_cpumask, IPI_AST);
457 #if !defined(KSEG_PEEMPT_BEST_CPU)
461 if (td->td_priority >= running_thread->td_priority)
465 #if !defined(FULL_PREEMPTION)
466 if (td->td_priority > PRI_MAX_ITHD) {
467 running_thread->td_flags |= TDF_NEEDRESCHED;
469 #endif /* ! FULL_PREEMPTION */
471 if (running_thread->td_critnest > 1)
472 running_thread->td_owepreempt = 1;
474 mi_switch(SW_INVOL, NULL);
476 #else /* PREEMPTION */
477 running_thread->td_flags |= TDF_NEEDRESCHED;
478 #endif /* PREEMPTION */
486 setrunqueue(struct thread *td, int flags)
492 CTR3(KTR_RUNQ, "setrunqueue: td:%p kg:%p pid:%d",
493 td, td->td_ksegrp, td->td_proc->p_pid);
494 CTR5(KTR_SCHED, "setrunqueue: %p(%s) prio %d by %p(%s)",
495 td, td->td_proc->p_comm, td->td_priority, curthread,
496 curthread->td_proc->p_comm);
497 mtx_assert(&sched_lock, MA_OWNED);
498 KASSERT((td->td_inhibitors == 0),
499 ("setrunqueue: trying to run inhibitted thread"));
500 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
501 ("setrunqueue: bad thread state"));
504 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
506 * Common path optimisation: Only one of everything
507 * and the KSE is always already attached.
508 * Totally ignore the ksegrp run queue.
510 if (kg->kg_avail_opennings != 1) {
511 if (limitcount < 1) {
513 printf("pid %d: corrected slot count (%d->1)\n",
514 td->td_proc->p_pid, kg->kg_avail_opennings);
517 kg->kg_avail_opennings = 1;
519 sched_add(td, flags);
524 * If the concurrency has reduced, and we would go in the
525 * assigned section, then keep removing entries from the
526 * system run queue, until we are not in that section
527 * or there is room for us to be put in that section.
528 * What we MUST avoid is the case where there are threads of less
529 * priority than the new one scheduled, but it can not
530 * be scheduled itself. That would lead to a non contiguous set
531 * of scheduled threads, and everything would break.
533 tda = kg->kg_last_assigned;
534 while ((kg->kg_avail_opennings <= 0) &&
535 (tda && (tda->td_priority > td->td_priority))) {
537 * None free, but there is one we can commandeer.
540 "setrunqueue: kg:%p: take slot from td: %p", kg, tda);
542 tda = kg->kg_last_assigned =
543 TAILQ_PREV(tda, threadqueue, td_runq);
547 * Add the thread to the ksegrp's run queue at
548 * the appropriate place.
550 TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
551 if (td2->td_priority > td->td_priority) {
552 TAILQ_INSERT_BEFORE(td2, td, td_runq);
557 /* We ran off the end of the TAILQ or it was empty. */
558 TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
562 * If we have a slot to use, then put the thread on the system
563 * run queue and if needed, readjust the last_assigned pointer.
564 * it may be that we need to schedule something anyhow
565 * even if the availabel slots are -ve so that
566 * all the items < last_assigned are scheduled.
568 if (kg->kg_avail_opennings > 0) {
571 * No pre-existing last assigned so whoever is first
572 * gets the slot.. (maybe us)
574 td2 = TAILQ_FIRST(&kg->kg_runq);
575 kg->kg_last_assigned = td2;
576 } else if (tda->td_priority > td->td_priority) {
580 * We are past last_assigned, so
581 * give the next slot to whatever is next,
582 * which may or may not be us.
584 td2 = TAILQ_NEXT(tda, td_runq);
585 kg->kg_last_assigned = td2;
587 sched_add(td2, flags);
589 CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d",
590 td, td->td_ksegrp, td->td_proc->p_pid);
591 if ((flags & SRQ_YIELDING) == 0)
592 maybe_preempt_in_ksegrp(td);
597 * Kernel thread preemption implementation. Critical sections mark
598 * regions of code in which preemptions are not allowed.
607 CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
608 (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
617 KASSERT(td->td_critnest != 0,
618 ("critical_exit: td_critnest == 0"));
620 if (td->td_critnest == 1) {
622 mtx_assert(&sched_lock, MA_NOTOWNED);
623 if (td->td_owepreempt) {
625 mtx_lock_spin(&sched_lock);
627 mi_switch(SW_INVOL, NULL);
628 mtx_unlock_spin(&sched_lock);
634 CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
635 (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
639 * This function is called when a thread is about to be put on run queue
640 * because it has been made runnable or its priority has been adjusted. It
641 * determines if the new thread should be immediately preempted to. If so,
642 * it switches to it and eventually returns true. If not, it returns false
643 * so that the caller may place the thread on an appropriate run queue.
646 maybe_preempt(struct thread *td)
653 mtx_assert(&sched_lock, MA_OWNED);
656 * The new thread should not preempt the current thread if any of the
657 * following conditions are true:
659 * - The kernel is in the throes of crashing (panicstr).
660 * - The current thread has a higher (numerically lower) or
661 * equivalent priority. Note that this prevents curthread from
662 * trying to preempt to itself.
663 * - It is too early in the boot for context switches (cold is set).
664 * - The current thread has an inhibitor set or is in the process of
665 * exiting. In this case, the current thread is about to switch
666 * out anyways, so there's no point in preempting. If we did,
667 * the current thread would not be properly resumed as well, so
668 * just avoid that whole landmine.
669 * - If the new thread's priority is not a realtime priority and
670 * the current thread's priority is not an idle priority and
671 * FULL_PREEMPTION is disabled.
673 * If all of these conditions are false, but the current thread is in
674 * a nested critical section, then we have to defer the preemption
675 * until we exit the critical section. Otherwise, switch immediately
679 KASSERT ((ctd->td_kse != NULL && ctd->td_kse->ke_thread == ctd),
680 ("thread has no (or wrong) sched-private part."));
681 KASSERT((td->td_inhibitors == 0),
682 ("maybe_preempt: trying to run inhibitted thread"));
683 pri = td->td_priority;
684 cpri = ctd->td_priority;
685 if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
686 TD_IS_INHIBITED(ctd) || td->td_kse->ke_state != KES_THREAD)
688 #ifndef FULL_PREEMPTION
689 if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
693 if (ctd->td_critnest > 1) {
694 CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
696 ctd->td_owepreempt = 1;
701 * Thread is runnable but not yet put on system run queue.
703 MPASS(TD_ON_RUNQ(td));
704 MPASS(td->td_sched->ke_state != KES_ONRUNQ);
705 if (td->td_proc->p_flag & P_HADTHREADS) {
707 * If this is a threaded process we actually ARE on the
708 * ksegrp run queue so take it off that first.
709 * Also undo any damage done to the last_assigned pointer.
710 * XXX Fix setrunqueue so this isn't needed
715 if (kg->kg_last_assigned == td)
716 kg->kg_last_assigned =
717 TAILQ_PREV(td, threadqueue, td_runq);
718 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
722 CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
723 td->td_proc->p_pid, td->td_proc->p_comm);
724 mi_switch(SW_INVOL|SW_PREEMPT, td);
733 /* XXX: There should be a non-static version of this. */
735 printf_caddr_t(void *data)
737 printf("%s", (char *)data);
739 static char preempt_warning[] =
740 "WARNING: Kernel preemption is disabled, expect reduced performance.\n";
741 SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
746 /************************************************************************
747 * SYSTEM RUN QUEUE manipulations and tests *
748 ************************************************************************/
750 * Initialize a run structure.
753 runq_init(struct runq *rq)
757 bzero(rq, sizeof *rq);
758 for (i = 0; i < RQ_NQS; i++)
759 TAILQ_INIT(&rq->rq_queues[i]);
763 * Clear the status bit of the queue corresponding to priority level pri,
764 * indicating that it is empty.
767 runq_clrbit(struct runq *rq, int pri)
771 rqb = &rq->rq_status;
772 CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
773 rqb->rqb_bits[RQB_WORD(pri)],
774 rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
775 RQB_BIT(pri), RQB_WORD(pri));
776 rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
780 * Find the index of the first non-empty run queue. This is done by
781 * scanning the status bits, a set bit indicates a non-empty queue.
784 runq_findbit(struct runq *rq)
790 rqb = &rq->rq_status;
791 for (i = 0; i < RQB_LEN; i++)
792 if (rqb->rqb_bits[i]) {
793 pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
794 CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
795 rqb->rqb_bits[i], i, pri);
803 * Set the status bit of the queue corresponding to priority level pri,
804 * indicating that it is non-empty.
807 runq_setbit(struct runq *rq, int pri)
811 rqb = &rq->rq_status;
812 CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
813 rqb->rqb_bits[RQB_WORD(pri)],
814 rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
815 RQB_BIT(pri), RQB_WORD(pri));
816 rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
820 * Add the KSE to the queue specified by its priority, and set the
821 * corresponding status bit.
824 runq_add(struct runq *rq, struct kse *ke, int flags)
829 pri = ke->ke_thread->td_priority / RQ_PPQ;
830 ke->ke_rqindex = pri;
831 runq_setbit(rq, pri);
832 rqh = &rq->rq_queues[pri];
833 CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p",
834 ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
835 if (flags & SRQ_PREEMPTED) {
836 TAILQ_INSERT_HEAD(rqh, ke, ke_procq);
838 TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
843 * Return true if there are runnable processes of any priority on the run
844 * queue, false otherwise. Has no side effects, does not modify the run
848 runq_check(struct runq *rq)
853 rqb = &rq->rq_status;
854 for (i = 0; i < RQB_LEN; i++)
855 if (rqb->rqb_bits[i]) {
856 CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
857 rqb->rqb_bits[i], i);
860 CTR0(KTR_RUNQ, "runq_check: empty");
865 #if defined(SMP) && defined(SCHED_4BSD)
867 SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
871 * Find the highest priority process on the run queue.
874 runq_choose(struct runq *rq)
880 mtx_assert(&sched_lock, MA_OWNED);
881 while ((pri = runq_findbit(rq)) != -1) {
882 rqh = &rq->rq_queues[pri];
883 #if defined(SMP) && defined(SCHED_4BSD)
884 /* fuzz == 1 is normal.. 0 or less are ignored */
887 * In the first couple of entries, check if
888 * there is one for our CPU as a preference.
890 int count = runq_fuzz;
891 int cpu = PCPU_GET(cpuid);
893 ke2 = ke = TAILQ_FIRST(rqh);
895 while (count-- && ke2) {
896 if (ke->ke_thread->td_lastcpu == cpu) {
900 ke2 = TAILQ_NEXT(ke2, ke_procq);
904 ke = TAILQ_FIRST(rqh);
905 KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
907 "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
910 CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
916 * Remove the KSE from the queue specified by its priority, and clear the
917 * corresponding status bit if the queue becomes empty.
918 * Caller must set ke->ke_state afterwards.
921 runq_remove(struct runq *rq, struct kse *ke)
926 KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
927 ("runq_remove: process swapped out"));
928 pri = ke->ke_rqindex;
929 rqh = &rq->rq_queues[pri];
930 CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p",
931 ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
932 KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
933 TAILQ_REMOVE(rqh, ke, ke_procq);
934 if (TAILQ_EMPTY(rqh)) {
935 CTR0(KTR_RUNQ, "runq_remove: empty");
936 runq_clrbit(rq, pri);
940 /****** functions that are temporarily here ***********/
942 extern struct mtx kse_zombie_lock;
945 * Allocate scheduler specific per-process resources.
946 * The thread and ksegrp have already been linked in.
947 * In this case just set the default concurrency value.
950 * proc_init() (UMA init method)
953 sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td)
956 /* This can go in sched_fork */
957 sched_init_concurrency(kg);
961 * thread is being either created or recycled.
962 * Fix up the per-scheduler resources associated with it.
964 * sched_fork_thread()
965 * thread_dtor() (*may go away)
966 * thread_init() (*may go away)
969 sched_newthread(struct thread *td)
973 ke = (struct td_sched *) (td + 1);
974 bzero(ke, sizeof(*ke));
977 ke->ke_state = KES_THREAD;
981 * Set up an initial concurrency of 1
982 * and set the given thread (if given) to be using that
984 * May be used "offline"..before the ksegrp is attached to the world
985 * and thus wouldn't need schedlock in that case.
988 * proc_init() (UMA) via sched_newproc()
991 sched_init_concurrency(struct ksegrp *kg)
994 CTR1(KTR_RUNQ,"kg %p init slots and concurrency to 1", kg);
995 kg->kg_concurrency = 1;
996 kg->kg_avail_opennings = 1;
1000 * Change the concurrency of an existing ksegrp to N
1008 sched_set_concurrency(struct ksegrp *kg, int concurrency)
1011 CTR4(KTR_RUNQ,"kg %p set concurrency to %d, slots %d -> %d",
1014 kg->kg_avail_opennings,
1015 kg->kg_avail_opennings + (concurrency - kg->kg_concurrency));
1016 kg->kg_avail_opennings += (concurrency - kg->kg_concurrency);
1017 kg->kg_concurrency = concurrency;
1021 * Called from thread_exit() for all exiting thread
1023 * Not to be confused with sched_exit_thread()
1024 * that is only called from thread_exit() for threads exiting
1025 * without the rest of the process exiting because it is also called from
1026 * sched_exit() and we wouldn't want to call it twice.
1027 * XXX This can probably be fixed.
1030 sched_thread_exit(struct thread *td)
1033 SLOT_RELEASE(td->td_ksegrp);
1034 slot_fill(td->td_ksegrp);
1037 #endif /* KERN_SWITCH_INCLUDE */