2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 If there are N processors, then there are at most N KSEs (kernel
31 schedulable entities) working to process threads that belong to a
32 KSEGROUP (kg). If there are X of these KSEs actually running at the
33 moment in question, then there are at most M (N-X) of these KSEs on
34 the run queue, as running KSEs are not on the queue.
36 Runnable threads are queued off the KSEGROUP in priority order.
37 If there are M or more threads runnable, the top M threads
38 (by priority) are 'preassigned' to the M KSEs not running. The KSEs take
39 their priority from those threads and are put on the run queue.
41 The last thread that had a priority high enough to have a KSE associated
42 with it, AND IS ON THE RUN QUEUE is pointed to by
43 kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
44 assigned as all the available KSEs are activly running, or because there
45 are no threads queued, that pointer is NULL.
47 When a KSE is removed from the run queue to become runnable, we know
48 it was associated with the highest priority thread in the queue (at the head
49 of the queue). If it is also the last assigned we know M was 1 and must
50 now be 0. Since the thread is no longer queued that pointer must be
51 removed from it. Since we know there were no more KSEs available,
52 (M was 1 and is now 0) and since we are not FREEING our KSE
53 but using it, we know there are STILL no more KSEs available, we can prove
54 that the next thread in the ksegrp list will not have a KSE to assign to
55 it, so we can show that the pointer must be made 'invalid' (NULL).
57 The pointer exists so that when a new thread is made runnable, it can
58 have its priority compared with the last assigned thread to see if
59 it should 'steal' its KSE or not.. i.e. is it 'earlier'
60 on the list than that thread or later.. If it's earlier, then the KSE is
61 removed from the last assigned (which is now not assigned a KSE)
62 and reassigned to the new thread, which is placed earlier in the list.
63 The pointer is then backed up to the previous thread (which may or may not
66 When a thread sleeps or is removed, the KSE becomes available and if there
67 are queued threads that are not assigned KSEs, the highest priority one of
68 them is assigned the KSE, which is then placed back on the run queue at
69 the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
72 The following diagram shows 2 KSEs and 3 threads from a single process.
74 RUNQ: --->KSE---KSE--... (KSEs queued at priorities from threads)
77 KSEGROUP---thread--thread--thread (queued in priority order)
82 The result of this scheme is that the M available KSEs are always
83 queued at the priorities they have inherrited from the M highest priority
84 threads for that KSEGROUP. If this situation changes, the KSEs are
85 reassigned to keep this true.
88 #include <sys/cdefs.h>
89 __FBSDID("$FreeBSD$");
91 #include "opt_sched.h"
93 #ifndef KERN_SWITCH_INCLUDE
94 #include <sys/param.h>
95 #include <sys/systm.h>
97 #include <sys/kernel.h>
100 #include <sys/mutex.h>
101 #include <sys/proc.h>
102 #include <sys/queue.h>
103 #include <sys/sched.h>
104 #else /* KERN_SWITCH_INCLUDE */
105 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
108 #if defined(SMP) && defined(SCHED_4BSD)
109 #include <sys/sysctl.h>
112 #ifdef FULL_PREEMPTION
114 #error "The FULL_PREEMPTION option requires the PREEMPTION option"
118 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
120 #define td_kse td_sched
123 * kern.sched.preemption allows user space to determine if preemption support
124 * is compiled in or not. It is not currently a boot or runtime flag that
128 static int kern_sched_preemption = 1;
130 static int kern_sched_preemption = 0;
132 SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
133 &kern_sched_preemption, 0, "Kernel preemption enabled");
135 /************************************************************************
136 * Functions that manipulate runnability from a thread perspective. *
137 ************************************************************************/
139 * Select the KSE that will be run next. From that find the thread, and
140 * remove it from the KSEGRP's run queue. If there is thread clustering,
141 * this will be what does it.
150 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
151 if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
152 /* Shutting down, run idlethread on AP's */
153 td = PCPU_GET(idlethread);
155 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
156 ke->ke_flags |= KEF_DIDRUN;
166 KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
168 if (td->td_proc->p_flag & P_HADTHREADS) {
169 if (kg->kg_last_assigned == td) {
170 kg->kg_last_assigned = TAILQ_PREV(td,
171 threadqueue, td_runq);
173 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
175 CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
176 td, td->td_priority);
178 /* Simulate runq_choose() having returned the idle thread */
179 td = PCPU_GET(idlethread);
181 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
183 ke->ke_flags |= KEF_DIDRUN;
186 * If we are in panic, only allow system threads,
187 * plus the one we are running in, to be run.
189 if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
190 (td->td_flags & TDF_INPANIC) == 0)) {
191 /* note that it is no longer on the run queue */
201 * Given a surplus system slot, try assign a new runnable thread to it.
203 * sched_thread_exit() (local)
204 * sched_switch() (local)
205 * sched_thread_exit() (local)
206 * remrunqueue() (local) (not at the moment)
209 slot_fill(struct ksegrp *kg)
213 mtx_assert(&sched_lock, MA_OWNED);
214 while (kg->kg_avail_opennings > 0) {
216 * Find the first unassigned thread
218 if ((td = kg->kg_last_assigned) != NULL)
219 td = TAILQ_NEXT(td, td_runq);
221 td = TAILQ_FIRST(&kg->kg_runq);
224 * If we found one, send it to the system scheduler.
227 kg->kg_last_assigned = td;
228 sched_add(td, SRQ_YIELDING);
229 CTR2(KTR_RUNQ, "slot_fill: td%p -> kg%p", td, kg);
231 /* no threads to use up the slots. quit now */
239 * Remove a thread from its KSEGRP's run queue.
240 * This in turn may remove it from a KSE if it was already assigned
241 * to one, possibly causing a new thread to be assigned to the KSE
242 * and the KSE getting a new priority.
245 remrunqueue(struct thread *td)
247 struct thread *td2, *td3;
251 mtx_assert(&sched_lock, MA_OWNED);
252 KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
255 CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
258 * If it is not a threaded process, take the shortcut.
260 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
261 /* remve from sys run queue and free up a slot */
263 ke->ke_state = KES_THREAD;
266 td3 = TAILQ_PREV(td, threadqueue, td_runq);
267 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
268 if (ke->ke_state == KES_ONRUNQ) {
270 * This thread has been assigned to the system run queue.
271 * We need to dissociate it and try assign the
272 * KSE to the next available thread. Then, we should
273 * see if we need to move the KSE in the run queues.
276 ke->ke_state = KES_THREAD;
277 td2 = kg->kg_last_assigned;
278 KASSERT((td2 != NULL), ("last assigned has wrong value"));
280 kg->kg_last_assigned = td3;
281 /* slot_fill(kg); */ /* will replace it with another */
287 * Change the priority of a thread that is on the run queue.
290 adjustrunqueue( struct thread *td, int newpri)
295 mtx_assert(&sched_lock, MA_OWNED);
296 KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
299 CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
301 * If it is not a threaded process, take the shortcut.
303 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
304 /* We only care about the kse in the run queue. */
305 td->td_priority = newpri;
306 if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
308 sched_add(td, SRQ_BORING);
313 /* It is a threaded process */
315 if (ke->ke_state == KES_ONRUNQ
317 || ((ke->ke_flags & KEF_ASSIGNED) != 0 &&
318 (ke->ke_flags & KEF_REMOVED) == 0)
321 if (kg->kg_last_assigned == td) {
322 kg->kg_last_assigned =
323 TAILQ_PREV(td, threadqueue, td_runq);
327 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
329 td->td_priority = newpri;
330 setrunqueue(td, SRQ_BORING);
334 * This function is called when a thread is about to be put on a
335 * ksegrp run queue because it has been made runnable or its
336 * priority has been adjusted and the ksegrp does not have a
337 * free kse slot. It determines if a thread from the same ksegrp
338 * should be preempted. If so, it tries to switch threads
339 * if the thread is on the same cpu or notifies another cpu that
340 * it should switch threads.
344 maybe_preempt_in_ksegrp(struct thread *td)
347 struct thread *running_thread;
349 mtx_assert(&sched_lock, MA_OWNED);
350 running_thread = curthread;
352 if (running_thread->td_ksegrp != td->td_ksegrp)
355 if (td->td_priority >= running_thread->td_priority)
358 #ifndef FULL_PREEMPTION
359 if (td->td_priority > PRI_MAX_ITHD) {
360 running_thread->td_flags |= TDF_NEEDRESCHED;
363 #endif /* FULL_PREEMPTION */
365 if (running_thread->td_critnest > 1)
366 running_thread->td_owepreempt = 1;
368 mi_switch(SW_INVOL, NULL);
370 #else /* PREEMPTION */
371 running_thread->td_flags |= TDF_NEEDRESCHED;
372 #endif /* PREEMPTION */
378 struct thread *running_thread;
381 cpumask_t cpumask,dontuse;
383 struct pcpu *best_pcpu;
384 struct thread *cputhread;
386 mtx_assert(&sched_lock, MA_OWNED);
388 running_thread = curthread;
390 #if !defined(KSEG_PEEMPT_BEST_CPU)
391 if (running_thread->td_ksegrp != td->td_ksegrp) {
395 /* if someone is ahead of this thread, wait our turn */
396 if (td != TAILQ_FIRST(&kg->kg_runq))
399 worst_pri = td->td_priority;
401 dontuse = stopped_cpus | idle_cpus_mask;
404 * Find a cpu with the worst priority that runs at thread from
405 * the same ksegrp - if multiple exist give first the last run
406 * cpu and then the current cpu priority
409 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
410 cpumask = pc->pc_cpumask;
411 cputhread = pc->pc_curthread;
413 if ((cpumask & dontuse) ||
414 cputhread->td_ksegrp != kg)
417 if (cputhread->td_priority > worst_pri) {
418 worst_pri = cputhread->td_priority;
423 if (cputhread->td_priority == worst_pri &&
425 (td->td_lastcpu == pc->pc_cpuid ||
426 (PCPU_GET(cpumask) == cpumask &&
427 td->td_lastcpu != best_pcpu->pc_cpuid)))
431 /* Check if we need to preempt someone */
432 if (best_pcpu == NULL)
435 #if defined(IPI_PREEMPTION) && defined(PREEMPTION)
436 #if !defined(FULL_PREEMPTION)
437 if (td->td_priority <= PRI_MAX_ITHD)
438 #endif /* ! FULL_PREEMPTION */
440 ipi_selected(best_pcpu->pc_cpumask, IPI_PREEMPT);
443 #endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
445 if (PCPU_GET(cpuid) != best_pcpu->pc_cpuid) {
446 best_pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
447 ipi_selected(best_pcpu->pc_cpumask, IPI_AST);
450 #if !defined(KSEG_PEEMPT_BEST_CPU)
454 if (td->td_priority >= running_thread->td_priority)
458 #if !defined(FULL_PREEMPTION)
459 if (td->td_priority > PRI_MAX_ITHD) {
460 running_thread->td_flags |= TDF_NEEDRESCHED;
462 #endif /* ! FULL_PREEMPTION */
464 if (running_thread->td_critnest > 1)
465 running_thread->td_owepreempt = 1;
467 mi_switch(SW_INVOL, NULL);
469 #else /* PREEMPTION */
470 running_thread->td_flags |= TDF_NEEDRESCHED;
471 #endif /* PREEMPTION */
479 setrunqueue(struct thread *td, int flags)
485 CTR3(KTR_RUNQ, "setrunqueue: td:%p kg:%p pid:%d",
486 td, td->td_ksegrp, td->td_proc->p_pid);
487 CTR5(KTR_SCHED, "setrunqueue: %p(%s) prio %d by %p(%s)",
488 td, td->td_proc->p_comm, td->td_priority, curthread,
489 curthread->td_proc->p_comm);
490 mtx_assert(&sched_lock, MA_OWNED);
491 KASSERT((td->td_inhibitors == 0),
492 ("setrunqueue: trying to run inhibitted thread"));
493 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
494 ("setrunqueue: bad thread state"));
497 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
499 * Common path optimisation: Only one of everything
500 * and the KSE is always already attached.
501 * Totally ignore the ksegrp run queue.
503 if (kg->kg_avail_opennings != 1) {
504 if (limitcount < 1) {
506 printf("pid %d: corrected slot count (%d->1)\n",
507 td->td_proc->p_pid, kg->kg_avail_opennings);
510 kg->kg_avail_opennings = 1;
512 sched_add(td, flags);
517 * If the concurrency has reduced, and we would go in the
518 * assigned section, then keep removing entries from the
519 * system run queue, until we are not in that section
520 * or there is room for us to be put in that section.
521 * What we MUST avoid is the case where there are threads of less
522 * priority than the new one scheduled, but it can not
523 * be scheduled itself. That would lead to a non contiguous set
524 * of scheduled threads, and everything would break.
526 tda = kg->kg_last_assigned;
527 while ((kg->kg_avail_opennings <= 0) &&
528 (tda && (tda->td_priority > td->td_priority))) {
530 * None free, but there is one we can commandeer.
533 "setrunqueue: kg:%p: take slot from td: %p", kg, tda);
535 tda = kg->kg_last_assigned =
536 TAILQ_PREV(tda, threadqueue, td_runq);
540 * Add the thread to the ksegrp's run queue at
541 * the appropriate place.
543 TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
544 if (td2->td_priority > td->td_priority) {
545 TAILQ_INSERT_BEFORE(td2, td, td_runq);
550 /* We ran off the end of the TAILQ or it was empty. */
551 TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
555 * If we have a slot to use, then put the thread on the system
556 * run queue and if needed, readjust the last_assigned pointer.
557 * it may be that we need to schedule something anyhow
558 * even if the availabel slots are -ve so that
559 * all the items < last_assigned are scheduled.
561 if (kg->kg_avail_opennings > 0) {
564 * No pre-existing last assigned so whoever is first
565 * gets the slot.. (maybe us)
567 td2 = TAILQ_FIRST(&kg->kg_runq);
568 kg->kg_last_assigned = td2;
569 } else if (tda->td_priority > td->td_priority) {
573 * We are past last_assigned, so
574 * give the next slot to whatever is next,
575 * which may or may not be us.
577 td2 = TAILQ_NEXT(tda, td_runq);
578 kg->kg_last_assigned = td2;
580 sched_add(td2, flags);
582 CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d",
583 td, td->td_ksegrp, td->td_proc->p_pid);
584 if ((flags & SRQ_YIELDING) == 0)
585 maybe_preempt_in_ksegrp(td);
590 * Kernel thread preemption implementation. Critical sections mark
591 * regions of code in which preemptions are not allowed.
600 CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
601 (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
610 KASSERT(td->td_critnest != 0,
611 ("critical_exit: td_critnest == 0"));
613 if (td->td_critnest == 1) {
615 mtx_assert(&sched_lock, MA_NOTOWNED);
616 if (td->td_owepreempt) {
618 mtx_lock_spin(&sched_lock);
620 mi_switch(SW_INVOL, NULL);
621 mtx_unlock_spin(&sched_lock);
628 CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
629 (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
633 * This function is called when a thread is about to be put on run queue
634 * because it has been made runnable or its priority has been adjusted. It
635 * determines if the new thread should be immediately preempted to. If so,
636 * it switches to it and eventually returns true. If not, it returns false
637 * so that the caller may place the thread on an appropriate run queue.
640 maybe_preempt(struct thread *td)
647 mtx_assert(&sched_lock, MA_OWNED);
650 * The new thread should not preempt the current thread if any of the
651 * following conditions are true:
653 * - The kernel is in the throes of crashing (panicstr).
654 * - The current thread has a higher (numerically lower) or
655 * equivalent priority. Note that this prevents curthread from
656 * trying to preempt to itself.
657 * - It is too early in the boot for context switches (cold is set).
658 * - The current thread has an inhibitor set or is in the process of
659 * exiting. In this case, the current thread is about to switch
660 * out anyways, so there's no point in preempting. If we did,
661 * the current thread would not be properly resumed as well, so
662 * just avoid that whole landmine.
663 * - If the new thread's priority is not a realtime priority and
664 * the current thread's priority is not an idle priority and
665 * FULL_PREEMPTION is disabled.
667 * If all of these conditions are false, but the current thread is in
668 * a nested critical section, then we have to defer the preemption
669 * until we exit the critical section. Otherwise, switch immediately
673 KASSERT ((ctd->td_kse != NULL && ctd->td_kse->ke_thread == ctd),
674 ("thread has no (or wrong) sched-private part."));
675 KASSERT((td->td_inhibitors == 0),
676 ("maybe_preempt: trying to run inhibitted thread"));
677 pri = td->td_priority;
678 cpri = ctd->td_priority;
679 if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
680 TD_IS_INHIBITED(ctd) || td->td_kse->ke_state != KES_THREAD)
682 #ifndef FULL_PREEMPTION
683 if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
687 if (ctd->td_critnest > 1) {
688 CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
690 ctd->td_owepreempt = 1;
695 * Thread is runnable but not yet put on system run queue.
697 MPASS(TD_ON_RUNQ(td));
698 MPASS(td->td_sched->ke_state != KES_ONRUNQ);
699 if (td->td_proc->p_flag & P_HADTHREADS) {
701 * If this is a threaded process we actually ARE on the
702 * ksegrp run queue so take it off that first.
703 * Also undo any damage done to the last_assigned pointer.
704 * XXX Fix setrunqueue so this isn't needed
709 if (kg->kg_last_assigned == td)
710 kg->kg_last_assigned =
711 TAILQ_PREV(td, threadqueue, td_runq);
712 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
716 CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
717 td->td_proc->p_pid, td->td_proc->p_comm);
718 mi_switch(SW_INVOL|SW_PREEMPT, td);
727 /* XXX: There should be a non-static version of this. */
729 printf_caddr_t(void *data)
731 printf("%s", (char *)data);
733 static char preempt_warning[] =
734 "WARNING: Kernel preemption is disabled, expect reduced performance.\n";
735 SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
740 /************************************************************************
741 * SYSTEM RUN QUEUE manipulations and tests *
742 ************************************************************************/
744 * Initialize a run structure.
747 runq_init(struct runq *rq)
751 bzero(rq, sizeof *rq);
752 for (i = 0; i < RQ_NQS; i++)
753 TAILQ_INIT(&rq->rq_queues[i]);
757 * Clear the status bit of the queue corresponding to priority level pri,
758 * indicating that it is empty.
761 runq_clrbit(struct runq *rq, int pri)
765 rqb = &rq->rq_status;
766 CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
767 rqb->rqb_bits[RQB_WORD(pri)],
768 rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
769 RQB_BIT(pri), RQB_WORD(pri));
770 rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
774 * Find the index of the first non-empty run queue. This is done by
775 * scanning the status bits, a set bit indicates a non-empty queue.
778 runq_findbit(struct runq *rq)
784 rqb = &rq->rq_status;
785 for (i = 0; i < RQB_LEN; i++)
786 if (rqb->rqb_bits[i]) {
787 pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
788 CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
789 rqb->rqb_bits[i], i, pri);
797 * Set the status bit of the queue corresponding to priority level pri,
798 * indicating that it is non-empty.
801 runq_setbit(struct runq *rq, int pri)
805 rqb = &rq->rq_status;
806 CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
807 rqb->rqb_bits[RQB_WORD(pri)],
808 rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
809 RQB_BIT(pri), RQB_WORD(pri));
810 rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
814 * Add the KSE to the queue specified by its priority, and set the
815 * corresponding status bit.
818 runq_add(struct runq *rq, struct kse *ke, int flags)
823 pri = ke->ke_thread->td_priority / RQ_PPQ;
824 ke->ke_rqindex = pri;
825 runq_setbit(rq, pri);
826 rqh = &rq->rq_queues[pri];
827 CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p",
828 ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
829 if (flags & SRQ_PREEMPTED) {
830 TAILQ_INSERT_HEAD(rqh, ke, ke_procq);
832 TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
837 * Return true if there are runnable processes of any priority on the run
838 * queue, false otherwise. Has no side effects, does not modify the run
842 runq_check(struct runq *rq)
847 rqb = &rq->rq_status;
848 for (i = 0; i < RQB_LEN; i++)
849 if (rqb->rqb_bits[i]) {
850 CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
851 rqb->rqb_bits[i], i);
854 CTR0(KTR_RUNQ, "runq_check: empty");
859 #if defined(SMP) && defined(SCHED_4BSD)
861 SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
865 * Find the highest priority process on the run queue.
868 runq_choose(struct runq *rq)
874 mtx_assert(&sched_lock, MA_OWNED);
875 while ((pri = runq_findbit(rq)) != -1) {
876 rqh = &rq->rq_queues[pri];
877 #if defined(SMP) && defined(SCHED_4BSD)
878 /* fuzz == 1 is normal.. 0 or less are ignored */
881 * In the first couple of entries, check if
882 * there is one for our CPU as a preference.
884 int count = runq_fuzz;
885 int cpu = PCPU_GET(cpuid);
887 ke2 = ke = TAILQ_FIRST(rqh);
889 while (count-- && ke2) {
890 if (ke->ke_thread->td_lastcpu == cpu) {
894 ke2 = TAILQ_NEXT(ke2, ke_procq);
898 ke = TAILQ_FIRST(rqh);
899 KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
901 "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
904 CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
910 * Remove the KSE from the queue specified by its priority, and clear the
911 * corresponding status bit if the queue becomes empty.
912 * Caller must set ke->ke_state afterwards.
915 runq_remove(struct runq *rq, struct kse *ke)
920 KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
921 ("runq_remove: process swapped out"));
922 pri = ke->ke_rqindex;
923 rqh = &rq->rq_queues[pri];
924 CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p",
925 ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
926 KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
927 TAILQ_REMOVE(rqh, ke, ke_procq);
928 if (TAILQ_EMPTY(rqh)) {
929 CTR0(KTR_RUNQ, "runq_remove: empty");
930 runq_clrbit(rq, pri);
934 /****** functions that are temporarily here ***********/
936 extern struct mtx kse_zombie_lock;
939 * Allocate scheduler specific per-process resources.
940 * The thread and ksegrp have already been linked in.
941 * In this case just set the default concurrency value.
944 * proc_init() (UMA init method)
947 sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td)
950 /* This can go in sched_fork */
951 sched_init_concurrency(kg);
955 * thread is being either created or recycled.
956 * Fix up the per-scheduler resources associated with it.
958 * sched_fork_thread()
959 * thread_dtor() (*may go away)
960 * thread_init() (*may go away)
963 sched_newthread(struct thread *td)
967 ke = (struct td_sched *) (td + 1);
968 bzero(ke, sizeof(*ke));
971 ke->ke_state = KES_THREAD;
975 * Set up an initial concurrency of 1
976 * and set the given thread (if given) to be using that
978 * May be used "offline"..before the ksegrp is attached to the world
979 * and thus wouldn't need schedlock in that case.
982 * proc_init() (UMA) via sched_newproc()
985 sched_init_concurrency(struct ksegrp *kg)
988 CTR1(KTR_RUNQ,"kg %p init slots and concurrency to 1", kg);
989 kg->kg_concurrency = 1;
990 kg->kg_avail_opennings = 1;
994 * Change the concurrency of an existing ksegrp to N
1002 sched_set_concurrency(struct ksegrp *kg, int concurrency)
1005 CTR4(KTR_RUNQ,"kg %p set concurrency to %d, slots %d -> %d",
1008 kg->kg_avail_opennings,
1009 kg->kg_avail_opennings + (concurrency - kg->kg_concurrency));
1010 kg->kg_avail_opennings += (concurrency - kg->kg_concurrency);
1011 kg->kg_concurrency = concurrency;
1015 * Called from thread_exit() for all exiting thread
1017 * Not to be confused with sched_exit_thread()
1018 * that is only called from thread_exit() for threads exiting
1019 * without the rest of the process exiting because it is also called from
1020 * sched_exit() and we wouldn't want to call it twice.
1021 * XXX This can probably be fixed.
1024 sched_thread_exit(struct thread *td)
1027 SLOT_RELEASE(td->td_ksegrp);
1028 slot_fill(td->td_ksegrp);
1031 #endif /* KERN_SWITCH_INCLUDE */