2 * Copyright (c) 2002-2005, Jeffrey Roberson <jeff@freebsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include "opt_hwpmc_hooks.h"
31 #include "opt_sched.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
38 #include <sys/kernel.h>
41 #include <sys/mutex.h>
43 #include <sys/resource.h>
44 #include <sys/resourcevar.h>
45 #include <sys/sched.h>
48 #include <sys/sysctl.h>
49 #include <sys/sysproto.h>
50 #include <sys/turnstile.h>
51 #include <sys/vmmeter.h>
54 #include <sys/ktrace.h>
58 #include <sys/pmckern.h>
61 #include <machine/cpu.h>
62 #include <machine/smp.h>
64 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
65 /* XXX This is bogus compatability crap for ps */
66 static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
67 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
69 static void sched_setup(void *dummy);
70 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
72 static void sched_initticks(void *dummy);
73 SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL)
75 static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");
77 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ule", 0,
80 static int slice_min = 1;
81 SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, "");
83 static int slice_max = 10;
84 SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, "");
87 int tickincr = 1 << 10;
90 * The following datastructures are allocated within their parent structure
91 * but are scheduler specific.
94 * The schedulable entity that can be given a context to run. A process may
95 * have several of these.
98 TAILQ_ENTRY(kse) ke_procq; /* (j/z) Run queue. */
99 int ke_flags; /* (j) KEF_* flags. */
100 struct thread *ke_thread; /* (*) Active associated thread. */
101 fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */
102 char ke_rqindex; /* (j) Run queue index. */
104 KES_THREAD = 0x0, /* slaved to thread state */
106 } ke_state; /* (j) thread sched specific status. */
109 struct runq *ke_runq;
110 u_char ke_cpu; /* CPU that we have affinity for. */
111 /* The following variables are only used for pctcpu calculation */
112 int ke_ltick; /* Last tick that we were running on */
113 int ke_ftick; /* First tick that we were running on */
114 int ke_ticks; /* Tick count */
117 #define td_kse td_sched
118 #define td_slptime td_kse->ke_slptime
119 #define ke_proc ke_thread->td_proc
120 #define ke_ksegrp ke_thread->td_ksegrp
121 #define ke_assign ke_procq.tqe_next
122 /* flags kept in ke_flags */
123 #define KEF_ASSIGNED 0x0001 /* Thread is being migrated. */
124 #define KEF_BOUND 0x0002 /* Thread can not migrate. */
125 #define KEF_XFERABLE 0x0004 /* Thread was added as transferable. */
126 #define KEF_HOLD 0x0008 /* Thread is temporarily bound. */
127 #define KEF_REMOVED 0x0010 /* Thread was removed while ASSIGNED */
128 #define KEF_INTERNAL 0x0020 /* Thread added due to migration. */
129 #define KEF_PREEMPTED 0x0040 /* Thread was preempted */
130 #define KEF_DIDRUN 0x02000 /* Thread actually ran. */
131 #define KEF_EXIT 0x04000 /* Thread is being killed. */
134 struct thread *skg_last_assigned; /* (j) Last thread assigned to */
135 /* the system scheduler */
136 int skg_slptime; /* Number of ticks we vol. slept */
137 int skg_runtime; /* Number of ticks we were running */
138 int skg_avail_opennings; /* (j) Num unfilled slots in group.*/
139 int skg_concurrency; /* (j) Num threads requested in group.*/
141 #define kg_last_assigned kg_sched->skg_last_assigned
142 #define kg_avail_opennings kg_sched->skg_avail_opennings
143 #define kg_concurrency kg_sched->skg_concurrency
144 #define kg_runtime kg_sched->skg_runtime
145 #define kg_slptime kg_sched->skg_slptime
147 #define SLOT_RELEASE(kg) (kg)->kg_avail_opennings++
148 #define SLOT_USE(kg) (kg)->kg_avail_opennings--
150 static struct kse kse0;
151 static struct kg_sched kg_sched0;
154 * The priority is primarily determined by the interactivity score. Thus, we
155 * give lower(better) priorities to kse groups that use less CPU. The nice
156 * value is then directly added to this to allow nice to have some effect
159 * PRI_RANGE: Total priority range for timeshare threads.
160 * PRI_NRESV: Number of nice values.
161 * PRI_BASE: The start of the dynamic range.
163 #define SCHED_PRI_RANGE (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
164 #define SCHED_PRI_NRESV ((PRIO_MAX - PRIO_MIN) + 1)
165 #define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2)
166 #define SCHED_PRI_BASE (PRI_MIN_TIMESHARE)
167 #define SCHED_PRI_INTERACT(score) \
168 ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX)
171 * These determine the interactivity of a process.
173 * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate
174 * before throttling back.
175 * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time.
176 * INTERACT_MAX: Maximum interactivity value. Smaller is better.
177 * INTERACT_THRESH: Threshhold for placement on the current runq.
179 #define SCHED_SLP_RUN_MAX ((hz * 5) << 10)
180 #define SCHED_SLP_RUN_FORK ((hz / 2) << 10)
181 #define SCHED_INTERACT_MAX (100)
182 #define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2)
183 #define SCHED_INTERACT_THRESH (30)
186 * These parameters and macros determine the size of the time slice that is
187 * granted to each thread.
189 * SLICE_MIN: Minimum time slice granted, in units of ticks.
190 * SLICE_MAX: Maximum time slice granted.
191 * SLICE_RANGE: Range of available time slices scaled by hz.
192 * SLICE_SCALE: The number slices granted per val in the range of [0, max].
193 * SLICE_NICE: Determine the amount of slice granted to a scaled nice.
194 * SLICE_NTHRESH: The nice cutoff point for slice assignment.
196 #define SCHED_SLICE_MIN (slice_min)
197 #define SCHED_SLICE_MAX (slice_max)
198 #define SCHED_SLICE_INTERACTIVE (slice_max)
199 #define SCHED_SLICE_NTHRESH (SCHED_PRI_NHALF - 1)
200 #define SCHED_SLICE_RANGE (SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1)
201 #define SCHED_SLICE_SCALE(val, max) (((val) * SCHED_SLICE_RANGE) / (max))
202 #define SCHED_SLICE_NICE(nice) \
203 (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH))
206 * This macro determines whether or not the thread belongs on the current or
209 #define SCHED_INTERACTIVE(kg) \
210 (sched_interact_score(kg) < SCHED_INTERACT_THRESH)
211 #define SCHED_CURR(kg, ke) \
212 ((ke->ke_thread->td_flags & TDF_BORROWING) || \
213 (ke->ke_flags & KEF_PREEMPTED) || SCHED_INTERACTIVE(kg))
216 * Cpu percentage computation macros and defines.
218 * SCHED_CPU_TIME: Number of seconds to average the cpu usage across.
219 * SCHED_CPU_TICKS: Number of hz ticks to average the cpu usage across.
222 #define SCHED_CPU_TIME 10
223 #define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME)
226 * kseq - per processor runqs and statistics.
229 struct runq ksq_idle; /* Queue of IDLE threads. */
230 struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */
231 struct runq *ksq_next; /* Next timeshare queue. */
232 struct runq *ksq_curr; /* Current queue. */
233 int ksq_load_timeshare; /* Load for timeshare. */
234 int ksq_load; /* Aggregate load. */
235 short ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */
236 short ksq_nicemin; /* Least nice. */
238 int ksq_transferable;
239 LIST_ENTRY(kseq) ksq_siblings; /* Next in kseq group. */
240 struct kseq_group *ksq_group; /* Our processor group. */
241 volatile struct kse *ksq_assigned; /* assigned by another CPU. */
243 int ksq_sysload; /* For loadavg, !ITHD load. */
249 * kseq groups are groups of processors which can cheaply share threads. When
250 * one processor in the group goes idle it will check the runqs of the other
251 * processors in its group prior to halting and waiting for an interrupt.
252 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA.
253 * In a numa environment we'd want an idle bitmap per group and a two tiered
257 int ksg_cpus; /* Count of CPUs in this kseq group. */
258 cpumask_t ksg_cpumask; /* Mask of cpus in this group. */
259 cpumask_t ksg_idlemask; /* Idle cpus in this group. */
260 cpumask_t ksg_mask; /* Bit mask for first cpu. */
261 int ksg_load; /* Total load of this group. */
262 int ksg_transferable; /* Transferable load of this group. */
263 LIST_HEAD(, kseq) ksg_members; /* Linked list of all members. */
268 * One kse queue per processor.
271 static cpumask_t kseq_idle;
272 static int ksg_maxid;
273 static struct kseq kseq_cpu[MAXCPU];
274 static struct kseq_group kseq_groups[MAXCPU];
276 static int gbal_tick;
277 static int balance_groups;
279 #define KSEQ_SELF() (&kseq_cpu[PCPU_GET(cpuid)])
280 #define KSEQ_CPU(x) (&kseq_cpu[(x)])
281 #define KSEQ_ID(x) ((x) - kseq_cpu)
282 #define KSEQ_GROUP(x) (&kseq_groups[(x)])
284 static struct kseq kseq_cpu;
286 #define KSEQ_SELF() (&kseq_cpu)
287 #define KSEQ_CPU(x) (&kseq_cpu)
290 static void slot_fill(struct ksegrp *);
291 static struct kse *sched_choose(void); /* XXX Should be thread * */
292 static void sched_slice(struct kse *);
293 static void sched_priority(struct ksegrp *);
294 static void sched_thread_priority(struct thread *, u_char);
295 static int sched_interact_score(struct ksegrp *);
296 static void sched_interact_update(struct ksegrp *);
297 static void sched_interact_fork(struct ksegrp *);
298 static void sched_pctcpu_update(struct kse *);
300 /* Operations on per processor queues */
301 static struct kse * kseq_choose(struct kseq *);
302 static void kseq_setup(struct kseq *);
303 static void kseq_load_add(struct kseq *, struct kse *);
304 static void kseq_load_rem(struct kseq *, struct kse *);
305 static __inline void kseq_runq_add(struct kseq *, struct kse *, int);
306 static __inline void kseq_runq_rem(struct kseq *, struct kse *);
307 static void kseq_nice_add(struct kseq *, int);
308 static void kseq_nice_rem(struct kseq *, int);
309 void kseq_print(int cpu);
311 static int kseq_transfer(struct kseq *, struct kse *, int);
312 static struct kse *runq_steal(struct runq *);
313 static void sched_balance(void);
314 static void sched_balance_groups(void);
315 static void sched_balance_group(struct kseq_group *);
316 static void sched_balance_pair(struct kseq *, struct kseq *);
317 static void kseq_move(struct kseq *, int);
318 static int kseq_idled(struct kseq *);
319 static void kseq_notify(struct kse *, int);
320 static void kseq_assign(struct kseq *);
321 static struct kse *kseq_steal(struct kseq *, int);
322 #define KSE_CAN_MIGRATE(ke) \
323 ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
332 kseq = KSEQ_CPU(cpu);
335 printf("\tload: %d\n", kseq->ksq_load);
336 printf("\tload TIMESHARE: %d\n", kseq->ksq_load_timeshare);
338 printf("\tload transferable: %d\n", kseq->ksq_transferable);
340 printf("\tnicemin:\t%d\n", kseq->ksq_nicemin);
341 printf("\tnice counts:\n");
342 for (i = 0; i < SCHED_PRI_NRESV; i++)
343 if (kseq->ksq_nice[i])
344 printf("\t\t%d = %d\n",
345 i - SCHED_PRI_NHALF, kseq->ksq_nice[i]);
349 kseq_runq_add(struct kseq *kseq, struct kse *ke, int flags)
352 if (KSE_CAN_MIGRATE(ke)) {
353 kseq->ksq_transferable++;
354 kseq->ksq_group->ksg_transferable++;
355 ke->ke_flags |= KEF_XFERABLE;
358 if (ke->ke_flags & KEF_PREEMPTED)
359 flags |= SRQ_PREEMPTED;
360 runq_add(ke->ke_runq, ke, flags);
364 kseq_runq_rem(struct kseq *kseq, struct kse *ke)
367 if (ke->ke_flags & KEF_XFERABLE) {
368 kseq->ksq_transferable--;
369 kseq->ksq_group->ksg_transferable--;
370 ke->ke_flags &= ~KEF_XFERABLE;
373 runq_remove(ke->ke_runq, ke);
377 kseq_load_add(struct kseq *kseq, struct kse *ke)
380 mtx_assert(&sched_lock, MA_OWNED);
381 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
382 if (class == PRI_TIMESHARE)
383 kseq->ksq_load_timeshare++;
385 CTR1(KTR_SCHED, "load: %d", kseq->ksq_load);
386 if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
388 kseq->ksq_group->ksg_load++;
392 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
393 kseq_nice_add(kseq, ke->ke_proc->p_nice);
397 kseq_load_rem(struct kseq *kseq, struct kse *ke)
400 mtx_assert(&sched_lock, MA_OWNED);
401 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
402 if (class == PRI_TIMESHARE)
403 kseq->ksq_load_timeshare--;
404 if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
406 kseq->ksq_group->ksg_load--;
411 CTR1(KTR_SCHED, "load: %d", kseq->ksq_load);
413 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
414 kseq_nice_rem(kseq, ke->ke_proc->p_nice);
418 kseq_nice_add(struct kseq *kseq, int nice)
420 mtx_assert(&sched_lock, MA_OWNED);
421 /* Normalize to zero. */
422 kseq->ksq_nice[nice + SCHED_PRI_NHALF]++;
423 if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1)
424 kseq->ksq_nicemin = nice;
428 kseq_nice_rem(struct kseq *kseq, int nice)
432 mtx_assert(&sched_lock, MA_OWNED);
433 /* Normalize to zero. */
434 n = nice + SCHED_PRI_NHALF;
436 KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count."));
439 * If this wasn't the smallest nice value or there are more in
440 * this bucket we can just return. Otherwise we have to recalculate
443 if (nice != kseq->ksq_nicemin ||
444 kseq->ksq_nice[n] != 0 ||
445 kseq->ksq_load_timeshare == 0)
448 for (; n < SCHED_PRI_NRESV; n++)
449 if (kseq->ksq_nice[n]) {
450 kseq->ksq_nicemin = n - SCHED_PRI_NHALF;
457 * sched_balance is a simple CPU load balancing algorithm. It operates by
458 * finding the least loaded and most loaded cpu and equalizing their load
459 * by migrating some processes.
461 * Dealing only with two CPUs at a time has two advantages. Firstly, most
462 * installations will only have 2 cpus. Secondly, load balancing too much at
463 * once can have an unpleasant effect on the system. The scheduler rarely has
464 * enough information to make perfect decisions. So this algorithm chooses
465 * algorithm simplicity and more gradual effects on load in larger systems.
467 * It could be improved by considering the priorities and slices assigned to
468 * each task prior to balancing them. There are many pathological cases with
469 * any approach and so the semi random algorithm below may work as well as any.
475 struct kseq_group *high;
476 struct kseq_group *low;
477 struct kseq_group *ksg;
481 bal_tick = ticks + (random() % (hz * 2));
482 if (smp_started == 0)
485 i = random() % (ksg_maxid + 1);
486 for (cnt = 0; cnt <= ksg_maxid; cnt++) {
489 * Find the CPU with the highest load that has some
490 * threads to transfer.
492 if ((high == NULL || ksg->ksg_load > high->ksg_load)
493 && ksg->ksg_transferable)
495 if (low == NULL || ksg->ksg_load < low->ksg_load)
500 if (low != NULL && high != NULL && high != low)
501 sched_balance_pair(LIST_FIRST(&high->ksg_members),
502 LIST_FIRST(&low->ksg_members));
506 sched_balance_groups(void)
510 gbal_tick = ticks + (random() % (hz * 2));
511 mtx_assert(&sched_lock, MA_OWNED);
513 for (i = 0; i <= ksg_maxid; i++)
514 sched_balance_group(KSEQ_GROUP(i));
518 sched_balance_group(struct kseq_group *ksg)
525 if (ksg->ksg_transferable == 0)
529 LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
530 load = kseq->ksq_load;
531 if (high == NULL || load > high->ksq_load)
533 if (low == NULL || load < low->ksq_load)
536 if (high != NULL && low != NULL && high != low)
537 sched_balance_pair(high, low);
541 sched_balance_pair(struct kseq *high, struct kseq *low)
551 * If we're transfering within a group we have to use this specific
552 * kseq's transferable count, otherwise we can steal from other members
555 if (high->ksq_group == low->ksq_group) {
556 transferable = high->ksq_transferable;
557 high_load = high->ksq_load;
558 low_load = low->ksq_load;
560 transferable = high->ksq_group->ksg_transferable;
561 high_load = high->ksq_group->ksg_load;
562 low_load = low->ksq_group->ksg_load;
564 if (transferable == 0)
567 * Determine what the imbalance is and then adjust that to how many
568 * kses we actually have to give up (transferable).
570 diff = high_load - low_load;
574 move = min(move, transferable);
575 for (i = 0; i < move; i++)
576 kseq_move(high, KSEQ_ID(low));
581 kseq_move(struct kseq *from, int cpu)
589 ke = kseq_steal(kseq, 1);
591 struct kseq_group *ksg;
593 ksg = kseq->ksq_group;
594 LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
595 if (kseq == from || kseq->ksq_transferable == 0)
597 ke = kseq_steal(kseq, 1);
601 panic("kseq_move: No KSEs available with a "
602 "transferable count of %d\n",
603 ksg->ksg_transferable);
607 ke->ke_state = KES_THREAD;
608 kseq_runq_rem(kseq, ke);
609 kseq_load_rem(kseq, ke);
610 kseq_notify(ke, cpu);
614 kseq_idled(struct kseq *kseq)
616 struct kseq_group *ksg;
620 ksg = kseq->ksq_group;
622 * If we're in a cpu group, try and steal kses from another cpu in
623 * the group before idling.
625 if (ksg->ksg_cpus > 1 && ksg->ksg_transferable) {
626 LIST_FOREACH(steal, &ksg->ksg_members, ksq_siblings) {
627 if (steal == kseq || steal->ksq_transferable == 0)
629 ke = kseq_steal(steal, 0);
632 ke->ke_state = KES_THREAD;
633 kseq_runq_rem(steal, ke);
634 kseq_load_rem(steal, ke);
635 ke->ke_cpu = PCPU_GET(cpuid);
636 ke->ke_flags |= KEF_INTERNAL | KEF_HOLD;
637 sched_add(ke->ke_thread, SRQ_YIELDING);
642 * We only set the idled bit when all of the cpus in the group are
643 * idle. Otherwise we could get into a situation where a KSE bounces
644 * back and forth between two idle cores on seperate physical CPUs.
646 ksg->ksg_idlemask |= PCPU_GET(cpumask);
647 if (ksg->ksg_idlemask != ksg->ksg_cpumask)
649 atomic_set_int(&kseq_idle, ksg->ksg_mask);
654 kseq_assign(struct kseq *kseq)
660 *(volatile struct kse **)&ke = kseq->ksq_assigned;
661 } while(!atomic_cmpset_ptr((volatile uintptr_t *)&kseq->ksq_assigned,
662 (uintptr_t)ke, (uintptr_t)NULL));
663 for (; ke != NULL; ke = nke) {
665 kseq->ksq_group->ksg_load--;
667 ke->ke_flags &= ~KEF_ASSIGNED;
668 if (ke->ke_flags & KEF_REMOVED) {
669 ke->ke_flags &= ~KEF_REMOVED;
672 ke->ke_flags |= KEF_INTERNAL | KEF_HOLD;
673 sched_add(ke->ke_thread, SRQ_YIELDING);
678 kseq_notify(struct kse *ke, int cpu)
686 kseq = KSEQ_CPU(cpu);
688 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
689 if ((class == PRI_TIMESHARE || class == PRI_REALTIME) &&
690 (kseq_idle & kseq->ksq_group->ksg_mask))
691 atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask);
692 kseq->ksq_group->ksg_load++;
695 ke->ke_flags |= KEF_ASSIGNED;
696 prio = ke->ke_thread->td_priority;
699 * Place a KSE on another cpu's queue and force a resched.
702 *(volatile struct kse **)&ke->ke_assign = kseq->ksq_assigned;
703 } while(!atomic_cmpset_ptr((volatile uintptr_t *)&kseq->ksq_assigned,
704 (uintptr_t)ke->ke_assign, (uintptr_t)ke));
706 * Without sched_lock we could lose a race where we set NEEDRESCHED
707 * on a thread that is switched out before the IPI is delivered. This
708 * would lead us to miss the resched. This will be a problem once
709 * sched_lock is pushed down.
711 pcpu = pcpu_find(cpu);
712 td = pcpu->pc_curthread;
713 if (ke->ke_thread->td_priority < td->td_priority ||
714 td == pcpu->pc_idlethread) {
715 td->td_flags |= TDF_NEEDRESCHED;
716 ipi_selected(1 << cpu, IPI_AST);
721 runq_steal(struct runq *rq)
729 mtx_assert(&sched_lock, MA_OWNED);
730 rqb = &rq->rq_status;
731 for (word = 0; word < RQB_LEN; word++) {
732 if (rqb->rqb_bits[word] == 0)
734 for (bit = 0; bit < RQB_BPW; bit++) {
735 if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
737 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
738 TAILQ_FOREACH(ke, rqh, ke_procq) {
739 if (KSE_CAN_MIGRATE(ke))
748 kseq_steal(struct kseq *kseq, int stealidle)
753 * Steal from next first to try to get a non-interactive task that
754 * may not have run for a while.
756 if ((ke = runq_steal(kseq->ksq_next)) != NULL)
758 if ((ke = runq_steal(kseq->ksq_curr)) != NULL)
761 return (runq_steal(&kseq->ksq_idle));
766 kseq_transfer(struct kseq *kseq, struct kse *ke, int class)
768 struct kseq_group *nksg;
769 struct kseq_group *ksg;
774 if (smp_started == 0)
778 * If our load exceeds a certain threshold we should attempt to
779 * reassign this thread. The first candidate is the cpu that
780 * originally ran the thread. If it is idle, assign it there,
781 * otherwise, pick an idle cpu.
783 * The threshold at which we start to reassign kses has a large impact
784 * on the overall performance of the system. Tuned too high and
785 * some CPUs may idle. Too low and there will be excess migration
786 * and context switches.
788 old = KSEQ_CPU(ke->ke_cpu);
789 nksg = old->ksq_group;
790 ksg = kseq->ksq_group;
792 if (kseq_idle & nksg->ksg_mask) {
793 cpu = ffs(nksg->ksg_idlemask);
796 "kseq_transfer: %p found old cpu %X "
797 "in idlemask.", ke, cpu);
802 * Multiple cpus could find this bit simultaneously
803 * but the race shouldn't be terrible.
805 cpu = ffs(kseq_idle);
807 CTR2(KTR_SCHED, "kseq_transfer: %p found %X "
808 "in idlemask.", ke, cpu);
814 if (old->ksq_load < kseq->ksq_load) {
815 cpu = ke->ke_cpu + 1;
816 CTR2(KTR_SCHED, "kseq_transfer: %p old cpu %X "
817 "load less than ours.", ke, cpu);
821 * No new CPU was found, look for one with less load.
823 for (idx = 0; idx <= ksg_maxid; idx++) {
824 nksg = KSEQ_GROUP(idx);
825 if (nksg->ksg_load /*+ (nksg->ksg_cpus * 2)*/ < ksg->ksg_load) {
826 cpu = ffs(nksg->ksg_cpumask);
827 CTR2(KTR_SCHED, "kseq_transfer: %p cpu %X load less "
828 "than ours.", ke, cpu);
834 * If another cpu in this group has idled, assign a thread over
835 * to them after checking to see if there are idled groups.
837 if (ksg->ksg_idlemask) {
838 cpu = ffs(ksg->ksg_idlemask);
840 CTR2(KTR_SCHED, "kseq_transfer: %p cpu %X idle in "
848 * Now that we've found an idle CPU, migrate the thread.
852 kseq_notify(ke, cpu);
860 * Pick the highest priority task we have and return it.
864 kseq_choose(struct kseq *kseq)
870 mtx_assert(&sched_lock, MA_OWNED);
874 ke = runq_choose(kseq->ksq_curr);
877 * We already swapped once and didn't get anywhere.
881 swap = kseq->ksq_curr;
882 kseq->ksq_curr = kseq->ksq_next;
883 kseq->ksq_next = swap;
887 * If we encounter a slice of 0 the kse is in a
888 * TIMESHARE kse group and its nice was too far out
889 * of the range that receives slices.
891 nice = ke->ke_proc->p_nice + (0 - kseq->ksq_nicemin);
893 if (ke->ke_slice == 0 || (nice > SCHED_SLICE_NTHRESH &&
894 ke->ke_proc->p_nice != 0)) {
895 runq_remove(ke->ke_runq, ke);
897 ke->ke_runq = kseq->ksq_next;
898 runq_add(ke->ke_runq, ke, 0);
905 return (runq_choose(&kseq->ksq_idle));
909 kseq_setup(struct kseq *kseq)
911 runq_init(&kseq->ksq_timeshare[0]);
912 runq_init(&kseq->ksq_timeshare[1]);
913 runq_init(&kseq->ksq_idle);
914 kseq->ksq_curr = &kseq->ksq_timeshare[0];
915 kseq->ksq_next = &kseq->ksq_timeshare[1];
917 kseq->ksq_load_timeshare = 0;
921 sched_setup(void *dummy)
928 * To avoid divide-by-zero, we set realstathz a dummy value
929 * in case which sched_clock() called before sched_initticks().
932 slice_min = (hz/100); /* 10ms */
933 slice_max = (hz/7); /* ~140ms */
938 * Initialize the kseqs.
940 for (i = 0; i < MAXCPU; i++) {
944 ksq->ksq_assigned = NULL;
945 kseq_setup(&kseq_cpu[i]);
947 if (smp_topology == NULL) {
948 struct kseq_group *ksg;
952 for (cpus = 0, i = 0; i < MAXCPU; i++) {
955 ksq = &kseq_cpu[cpus];
956 ksg = &kseq_groups[cpus];
958 * Setup a kseq group with one member.
960 ksq->ksq_transferable = 0;
961 ksq->ksq_group = ksg;
963 ksg->ksg_idlemask = 0;
964 ksg->ksg_cpumask = ksg->ksg_mask = 1 << i;
966 ksg->ksg_transferable = 0;
967 LIST_INIT(&ksg->ksg_members);
968 LIST_INSERT_HEAD(&ksg->ksg_members, ksq, ksq_siblings);
971 ksg_maxid = cpus - 1;
973 struct kseq_group *ksg;
974 struct cpu_group *cg;
977 for (i = 0; i < smp_topology->ct_count; i++) {
978 cg = &smp_topology->ct_group[i];
979 ksg = &kseq_groups[i];
981 * Initialize the group.
983 ksg->ksg_idlemask = 0;
985 ksg->ksg_transferable = 0;
986 ksg->ksg_cpus = cg->cg_count;
987 ksg->ksg_cpumask = cg->cg_mask;
988 LIST_INIT(&ksg->ksg_members);
990 * Find all of the group members and add them.
992 for (j = 0; j < MAXCPU; j++) {
993 if ((cg->cg_mask & (1 << j)) != 0) {
994 if (ksg->ksg_mask == 0)
995 ksg->ksg_mask = 1 << j;
996 kseq_cpu[j].ksq_transferable = 0;
997 kseq_cpu[j].ksq_group = ksg;
998 LIST_INSERT_HEAD(&ksg->ksg_members,
999 &kseq_cpu[j], ksq_siblings);
1002 if (ksg->ksg_cpus > 1)
1005 ksg_maxid = smp_topology->ct_count - 1;
1008 * Stagger the group and global load balancer so they do not
1009 * interfere with each other.
1011 bal_tick = ticks + hz;
1013 gbal_tick = ticks + (hz / 2);
1015 kseq_setup(KSEQ_SELF());
1017 mtx_lock_spin(&sched_lock);
1018 kseq_load_add(KSEQ_SELF(), &kse0);
1019 mtx_unlock_spin(&sched_lock);
1024 sched_initticks(void *dummy)
1026 mtx_lock_spin(&sched_lock);
1027 realstathz = stathz ? stathz : hz;
1028 slice_min = (realstathz/100); /* 10ms */
1029 slice_max = (realstathz/7); /* ~140ms */
1031 tickincr = (hz << 10) / realstathz;
1033 * XXX This does not work for values of stathz that are much
1038 mtx_unlock_spin(&sched_lock);
1043 * Scale the scheduling priority according to the "interactivity" of this
1047 sched_priority(struct ksegrp *kg)
1051 if (kg->kg_pri_class != PRI_TIMESHARE)
1054 pri = SCHED_PRI_INTERACT(sched_interact_score(kg));
1055 pri += SCHED_PRI_BASE;
1056 pri += kg->kg_proc->p_nice;
1058 if (pri > PRI_MAX_TIMESHARE)
1059 pri = PRI_MAX_TIMESHARE;
1060 else if (pri < PRI_MIN_TIMESHARE)
1061 pri = PRI_MIN_TIMESHARE;
1063 kg->kg_user_pri = pri;
1069 * Calculate a time slice based on the properties of the kseg and the runq
1070 * that we're on. This is only for PRI_TIMESHARE ksegrps.
1073 sched_slice(struct kse *ke)
1079 kseq = KSEQ_CPU(ke->ke_cpu);
1081 if (ke->ke_thread->td_flags & TDF_BORROWING) {
1082 ke->ke_slice = SCHED_SLICE_MIN;
1088 * KSEs in interactive ksegs get a minimal slice so that we
1089 * quickly notice if it abuses its advantage.
1091 * KSEs in non-interactive ksegs are assigned a slice that is
1092 * based on the ksegs nice value relative to the least nice kseg
1093 * on the run queue for this cpu.
1095 * If the KSE is less nice than all others it gets the maximum
1096 * slice and other KSEs will adjust their slice relative to
1097 * this when they first expire.
1099 * There is 20 point window that starts relative to the least
1100 * nice kse on the run queue. Slice size is determined by
1101 * the kse distance from the last nice ksegrp.
1103 * If the kse is outside of the window it will get no slice
1104 * and will be reevaluated each time it is selected on the
1105 * run queue. The exception to this is nice 0 ksegs when
1106 * a nice -20 is running. They are always granted a minimum
1109 if (!SCHED_INTERACTIVE(kg)) {
1112 nice = kg->kg_proc->p_nice + (0 - kseq->ksq_nicemin);
1113 if (kseq->ksq_load_timeshare == 0 ||
1114 kg->kg_proc->p_nice < kseq->ksq_nicemin)
1115 ke->ke_slice = SCHED_SLICE_MAX;
1116 else if (nice <= SCHED_SLICE_NTHRESH)
1117 ke->ke_slice = SCHED_SLICE_NICE(nice);
1118 else if (kg->kg_proc->p_nice == 0)
1119 ke->ke_slice = SCHED_SLICE_MIN;
1121 ke->ke_slice = SCHED_SLICE_MIN; /* 0 */
1123 ke->ke_slice = SCHED_SLICE_INTERACTIVE;
1129 * This routine enforces a maximum limit on the amount of scheduling history
1130 * kept. It is called after either the slptime or runtime is adjusted.
1131 * This routine will not operate correctly when slp or run times have been
1132 * adjusted to more than double their maximum.
1135 sched_interact_update(struct ksegrp *kg)
1139 sum = kg->kg_runtime + kg->kg_slptime;
1140 if (sum < SCHED_SLP_RUN_MAX)
1143 * If we have exceeded by more than 1/5th then the algorithm below
1144 * will not bring us back into range. Dividing by two here forces
1145 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1147 if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1148 kg->kg_runtime /= 2;
1149 kg->kg_slptime /= 2;
1152 kg->kg_runtime = (kg->kg_runtime / 5) * 4;
1153 kg->kg_slptime = (kg->kg_slptime / 5) * 4;
1157 sched_interact_fork(struct ksegrp *kg)
1162 sum = kg->kg_runtime + kg->kg_slptime;
1163 if (sum > SCHED_SLP_RUN_FORK) {
1164 ratio = sum / SCHED_SLP_RUN_FORK;
1165 kg->kg_runtime /= ratio;
1166 kg->kg_slptime /= ratio;
1171 sched_interact_score(struct ksegrp *kg)
1175 if (kg->kg_runtime > kg->kg_slptime) {
1176 div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF);
1177 return (SCHED_INTERACT_HALF +
1178 (SCHED_INTERACT_HALF - (kg->kg_slptime / div)));
1179 } if (kg->kg_slptime > kg->kg_runtime) {
1180 div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF);
1181 return (kg->kg_runtime / div);
1185 * This can happen if slptime and runtime are 0.
1192 * Very early in the boot some setup of scheduler-specific
1193 * parts of proc0 and of soem scheduler resources needs to be done.
1201 * Set up the scheduler specific parts of proc0.
1203 proc0.p_sched = NULL; /* XXX */
1204 ksegrp0.kg_sched = &kg_sched0;
1205 thread0.td_sched = &kse0;
1206 kse0.ke_thread = &thread0;
1207 kse0.ke_state = KES_THREAD;
1208 kg_sched0.skg_concurrency = 1;
1209 kg_sched0.skg_avail_opennings = 0; /* we are already running */
1213 * This is only somewhat accurate since given many processes of the same
1214 * priority they will switch when their slices run out, which will be
1215 * at most SCHED_SLICE_MAX.
1218 sched_rr_interval(void)
1220 return (SCHED_SLICE_MAX);
1224 sched_pctcpu_update(struct kse *ke)
1227 * Adjust counters and watermark for pctcpu calc.
1229 if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) {
1231 * Shift the tick count out so that the divide doesn't
1232 * round away our results.
1234 ke->ke_ticks <<= 10;
1235 ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) *
1237 ke->ke_ticks >>= 10;
1240 ke->ke_ltick = ticks;
1241 ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS;
1245 sched_thread_priority(struct thread *td, u_char prio)
1249 CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
1250 td, td->td_proc->p_comm, td->td_priority, prio, curthread,
1251 curthread->td_proc->p_comm);
1253 mtx_assert(&sched_lock, MA_OWNED);
1254 if (td->td_priority == prio)
1256 if (TD_ON_RUNQ(td)) {
1258 * If the priority has been elevated due to priority
1259 * propagation, we may have to move ourselves to a new
1260 * queue. We still call adjustrunqueue below in case kse
1261 * needs to fix things up.
1263 if (prio < td->td_priority && ke->ke_runq != NULL &&
1264 (ke->ke_flags & KEF_ASSIGNED) == 0 &&
1265 ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) {
1266 runq_remove(ke->ke_runq, ke);
1267 ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr;
1268 runq_add(ke->ke_runq, ke, 0);
1271 * Hold this kse on this cpu so that sched_prio() doesn't
1272 * cause excessive migration. We only want migration to
1273 * happen as the result of a wakeup.
1275 ke->ke_flags |= KEF_HOLD;
1276 adjustrunqueue(td, prio);
1277 ke->ke_flags &= ~KEF_HOLD;
1279 td->td_priority = prio;
1283 * Update a thread's priority when it is lent another thread's
1287 sched_lend_prio(struct thread *td, u_char prio)
1290 td->td_flags |= TDF_BORROWING;
1291 sched_thread_priority(td, prio);
1295 * Restore a thread's priority when priority propagation is
1296 * over. The prio argument is the minimum priority the thread
1297 * needs to have to satisfy other possible priority lending
1298 * requests. If the thread's regular priority is less
1299 * important than prio, the thread will keep a priority boost
1303 sched_unlend_prio(struct thread *td, u_char prio)
1307 if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
1308 td->td_base_pri <= PRI_MAX_TIMESHARE)
1309 base_pri = td->td_ksegrp->kg_user_pri;
1311 base_pri = td->td_base_pri;
1312 if (prio >= base_pri) {
1313 td->td_flags &= ~TDF_BORROWING;
1314 sched_thread_priority(td, base_pri);
1316 sched_lend_prio(td, prio);
1320 sched_prio(struct thread *td, u_char prio)
1324 /* First, update the base priority. */
1325 td->td_base_pri = prio;
1328 * If the thread is borrowing another thread's priority, don't
1329 * ever lower the priority.
1331 if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
1334 /* Change the real priority. */
1335 oldprio = td->td_priority;
1336 sched_thread_priority(td, prio);
1339 * If the thread is on a turnstile, then let the turnstile update
1342 if (TD_ON_LOCK(td) && oldprio != prio)
1343 turnstile_adjust(td, oldprio);
1347 sched_switch(struct thread *td, struct thread *newtd, int flags)
1352 mtx_assert(&sched_lock, MA_OWNED);
1357 td->td_lastcpu = td->td_oncpu;
1358 td->td_oncpu = NOCPU;
1359 td->td_flags &= ~TDF_NEEDRESCHED;
1360 td->td_owepreempt = 0;
1363 * If the KSE has been assigned it may be in the process of switching
1364 * to the new cpu. This is the case in sched_bind().
1366 if (td == PCPU_GET(idlethread)) {
1368 } else if ((ke->ke_flags & KEF_ASSIGNED) == 0) {
1369 /* We are ending our run so make our slot available again */
1370 SLOT_RELEASE(td->td_ksegrp);
1371 kseq_load_rem(ksq, ke);
1372 if (TD_IS_RUNNING(td)) {
1374 * Don't allow the thread to migrate
1375 * from a preemption.
1377 ke->ke_flags |= KEF_HOLD;
1378 setrunqueue(td, (flags & SW_PREEMPT) ?
1379 SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1380 SRQ_OURSELF|SRQ_YIELDING);
1381 ke->ke_flags &= ~KEF_HOLD;
1382 } else if ((td->td_proc->p_flag & P_HADTHREADS) &&
1383 (newtd == NULL || newtd->td_ksegrp != td->td_ksegrp))
1385 * We will not be on the run queue.
1386 * So we must be sleeping or similar.
1387 * Don't use the slot if we will need it
1390 slot_fill(td->td_ksegrp);
1392 if (newtd != NULL) {
1394 * If we bring in a thread account for it as if it had been
1395 * added to the run queue and then chosen.
1397 newtd->td_kse->ke_flags |= KEF_DIDRUN;
1398 newtd->td_kse->ke_runq = ksq->ksq_curr;
1399 TD_SET_RUNNING(newtd);
1400 kseq_load_add(KSEQ_SELF(), newtd->td_kse);
1402 * XXX When we preempt, we've already consumed a slot because
1403 * we got here through sched_add(). However, newtd can come
1404 * from thread_switchout() which can't SLOT_USE() because
1405 * the SLOT code is scheduler dependent. We must use the
1406 * slot here otherwise.
1408 if ((flags & SW_PREEMPT) == 0)
1409 SLOT_USE(newtd->td_ksegrp);
1411 newtd = choosethread();
1414 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1415 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1417 cpu_switch(td, newtd);
1419 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1420 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1424 sched_lock.mtx_lock = (uintptr_t)td;
1426 td->td_oncpu = PCPU_GET(cpuid);
1430 sched_nice(struct proc *p, int nice)
1437 PROC_LOCK_ASSERT(p, MA_OWNED);
1438 mtx_assert(&sched_lock, MA_OWNED);
1440 * We need to adjust the nice counts for running KSEs.
1442 FOREACH_KSEGRP_IN_PROC(p, kg) {
1443 if (kg->kg_pri_class == PRI_TIMESHARE) {
1444 FOREACH_THREAD_IN_GROUP(kg, td) {
1446 if (ke->ke_runq == NULL)
1448 kseq = KSEQ_CPU(ke->ke_cpu);
1449 kseq_nice_rem(kseq, p->p_nice);
1450 kseq_nice_add(kseq, nice);
1455 FOREACH_KSEGRP_IN_PROC(p, kg) {
1457 FOREACH_THREAD_IN_GROUP(kg, td)
1458 td->td_flags |= TDF_NEEDRESCHED;
1463 sched_sleep(struct thread *td)
1465 mtx_assert(&sched_lock, MA_OWNED);
1467 td->td_slptime = ticks;
1471 sched_wakeup(struct thread *td)
1473 mtx_assert(&sched_lock, MA_OWNED);
1476 * Let the kseg know how long we slept for. This is because process
1477 * interactivity behavior is modeled in the kseg.
1479 if (td->td_slptime) {
1484 hzticks = (ticks - td->td_slptime) << 10;
1485 if (hzticks >= SCHED_SLP_RUN_MAX) {
1486 kg->kg_slptime = SCHED_SLP_RUN_MAX;
1489 kg->kg_slptime += hzticks;
1490 sched_interact_update(kg);
1493 sched_slice(td->td_kse);
1496 setrunqueue(td, SRQ_BORING);
1500 * Penalize the parent for creating a new child and initialize the child's
1504 sched_fork(struct thread *td, struct thread *childtd)
1507 mtx_assert(&sched_lock, MA_OWNED);
1509 sched_fork_ksegrp(td, childtd->td_ksegrp);
1510 sched_fork_thread(td, childtd);
1514 sched_fork_ksegrp(struct thread *td, struct ksegrp *child)
1516 struct ksegrp *kg = td->td_ksegrp;
1517 mtx_assert(&sched_lock, MA_OWNED);
1519 child->kg_slptime = kg->kg_slptime;
1520 child->kg_runtime = kg->kg_runtime;
1521 child->kg_user_pri = kg->kg_user_pri;
1522 sched_interact_fork(child);
1523 kg->kg_runtime += tickincr;
1524 sched_interact_update(kg);
1528 sched_fork_thread(struct thread *td, struct thread *child)
1533 sched_newthread(child);
1535 ke2 = child->td_kse;
1536 ke2->ke_slice = 1; /* Attempt to quickly learn interactivity. */
1537 ke2->ke_cpu = ke->ke_cpu;
1538 ke2->ke_runq = NULL;
1540 /* Grab our parents cpu estimation information. */
1541 ke2->ke_ticks = ke->ke_ticks;
1542 ke2->ke_ltick = ke->ke_ltick;
1543 ke2->ke_ftick = ke->ke_ftick;
1547 sched_class(struct ksegrp *kg, int class)
1555 mtx_assert(&sched_lock, MA_OWNED);
1556 if (kg->kg_pri_class == class)
1559 nclass = PRI_BASE(class);
1560 oclass = PRI_BASE(kg->kg_pri_class);
1561 FOREACH_THREAD_IN_GROUP(kg, td) {
1563 if ((ke->ke_state != KES_ONRUNQ &&
1564 ke->ke_state != KES_THREAD) || ke->ke_runq == NULL)
1566 kseq = KSEQ_CPU(ke->ke_cpu);
1570 * On SMP if we're on the RUNQ we must adjust the transferable
1571 * count because could be changing to or from an interrupt
1574 if (ke->ke_state == KES_ONRUNQ) {
1575 if (KSE_CAN_MIGRATE(ke)) {
1576 kseq->ksq_transferable--;
1577 kseq->ksq_group->ksg_transferable--;
1579 if (KSE_CAN_MIGRATE(ke)) {
1580 kseq->ksq_transferable++;
1581 kseq->ksq_group->ksg_transferable++;
1585 if (oclass == PRI_TIMESHARE) {
1586 kseq->ksq_load_timeshare--;
1587 kseq_nice_rem(kseq, kg->kg_proc->p_nice);
1589 if (nclass == PRI_TIMESHARE) {
1590 kseq->ksq_load_timeshare++;
1591 kseq_nice_add(kseq, kg->kg_proc->p_nice);
1595 kg->kg_pri_class = class;
1599 * Return some of the child's priority and interactivity to the parent.
1602 sched_exit(struct proc *p, struct thread *childtd)
1604 mtx_assert(&sched_lock, MA_OWNED);
1605 sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), childtd);
1606 sched_exit_thread(NULL, childtd);
1610 sched_exit_ksegrp(struct ksegrp *kg, struct thread *td)
1612 /* kg->kg_slptime += td->td_ksegrp->kg_slptime; */
1613 kg->kg_runtime += td->td_ksegrp->kg_runtime;
1614 sched_interact_update(kg);
1618 sched_exit_thread(struct thread *td, struct thread *childtd)
1620 CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
1621 childtd, childtd->td_proc->p_comm, childtd->td_priority);
1622 kseq_load_rem(KSEQ_CPU(childtd->td_kse->ke_cpu), childtd->td_kse);
1626 sched_clock(struct thread *td)
1632 mtx_assert(&sched_lock, MA_OWNED);
1635 if (ticks >= bal_tick)
1637 if (ticks >= gbal_tick && balance_groups)
1638 sched_balance_groups();
1640 * We could have been assigned a non real-time thread without an
1643 if (kseq->ksq_assigned)
1644 kseq_assign(kseq); /* Potentially sets NEEDRESCHED */
1649 /* Adjust ticks for pctcpu */
1651 ke->ke_ltick = ticks;
1653 /* Go up to one second beyond our max and then trim back down */
1654 if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
1655 sched_pctcpu_update(ke);
1657 if (td->td_flags & TDF_IDLETD)
1660 * We only do slicing code for TIMESHARE ksegrps.
1662 if (kg->kg_pri_class != PRI_TIMESHARE)
1665 * We used a tick charge it to the ksegrp so that we can compute our
1668 kg->kg_runtime += tickincr;
1669 sched_interact_update(kg);
1672 * We used up one time slice.
1674 if (--ke->ke_slice > 0)
1677 * We're out of time, recompute priorities and requeue.
1679 kseq_load_rem(kseq, ke);
1682 if (SCHED_CURR(kg, ke))
1683 ke->ke_runq = kseq->ksq_curr;
1685 ke->ke_runq = kseq->ksq_next;
1686 kseq_load_add(kseq, ke);
1687 td->td_flags |= TDF_NEEDRESCHED;
1691 sched_runnable(void)
1700 if (kseq->ksq_assigned) {
1701 mtx_lock_spin(&sched_lock);
1703 mtx_unlock_spin(&sched_lock);
1706 if ((curthread->td_flags & TDF_IDLETD) != 0) {
1707 if (kseq->ksq_load > 0)
1710 if (kseq->ksq_load - 1 > 0)
1718 sched_userret(struct thread *td)
1722 KASSERT((td->td_flags & TDF_BORROWING) == 0,
1723 ("thread with borrowed priority returning to userland"));
1725 if (td->td_priority != kg->kg_user_pri) {
1726 mtx_lock_spin(&sched_lock);
1727 td->td_priority = kg->kg_user_pri;
1728 td->td_base_pri = kg->kg_user_pri;
1729 mtx_unlock_spin(&sched_lock);
1739 mtx_assert(&sched_lock, MA_OWNED);
1743 if (kseq->ksq_assigned)
1746 ke = kseq_choose(kseq);
1749 if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE)
1750 if (kseq_idled(kseq) == 0)
1753 kseq_runq_rem(kseq, ke);
1754 ke->ke_state = KES_THREAD;
1755 ke->ke_flags &= ~KEF_PREEMPTED;
1759 if (kseq_idled(kseq) == 0)
1766 sched_add(struct thread *td, int flags)
1775 CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
1776 td, td->td_proc->p_comm, td->td_priority, curthread,
1777 curthread->td_proc->p_comm);
1778 mtx_assert(&sched_lock, MA_OWNED);
1782 preemptive = !(flags & SRQ_YIELDING);
1783 class = PRI_BASE(kg->kg_pri_class);
1785 if ((ke->ke_flags & KEF_INTERNAL) == 0)
1786 SLOT_USE(td->td_ksegrp);
1787 ke->ke_flags &= ~KEF_INTERNAL;
1789 if (ke->ke_flags & KEF_ASSIGNED) {
1790 if (ke->ke_flags & KEF_REMOVED)
1791 ke->ke_flags &= ~KEF_REMOVED;
1794 canmigrate = KSE_CAN_MIGRATE(ke);
1796 * Don't migrate running threads here. Force the long term balancer
1799 if (ke->ke_flags & KEF_HOLD) {
1800 ke->ke_flags &= ~KEF_HOLD;
1804 KASSERT(ke->ke_state != KES_ONRUNQ,
1805 ("sched_add: kse %p (%s) already in run queue", ke,
1806 ke->ke_proc->p_comm));
1807 KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1808 ("sched_add: process swapped out"));
1809 KASSERT(ke->ke_runq == NULL,
1810 ("sched_add: KSE %p is still assigned to a run queue", ke));
1811 if (flags & SRQ_PREEMPTED)
1812 ke->ke_flags |= KEF_PREEMPTED;
1816 ke->ke_runq = kseq->ksq_curr;
1817 ke->ke_slice = SCHED_SLICE_MAX;
1819 ke->ke_cpu = PCPU_GET(cpuid);
1822 if (SCHED_CURR(kg, ke))
1823 ke->ke_runq = kseq->ksq_curr;
1825 ke->ke_runq = kseq->ksq_next;
1829 * This is for priority prop.
1831 if (ke->ke_thread->td_priority < PRI_MIN_IDLE)
1832 ke->ke_runq = kseq->ksq_curr;
1834 ke->ke_runq = &kseq->ksq_idle;
1835 ke->ke_slice = SCHED_SLICE_MIN;
1838 panic("Unknown pri class.");
1843 * If this thread is pinned or bound, notify the target cpu.
1845 if (!canmigrate && ke->ke_cpu != PCPU_GET(cpuid) ) {
1847 kseq_notify(ke, ke->ke_cpu);
1851 * If we had been idle, clear our bit in the group and potentially
1852 * the global bitmap. If not, see if we should transfer this thread.
1854 if ((class == PRI_TIMESHARE || class == PRI_REALTIME) &&
1855 (kseq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) != 0) {
1857 * Check to see if our group is unidling, and if so, remove it
1858 * from the global idle mask.
1860 if (kseq->ksq_group->ksg_idlemask ==
1861 kseq->ksq_group->ksg_cpumask)
1862 atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask);
1864 * Now remove ourselves from the group specific idle mask.
1866 kseq->ksq_group->ksg_idlemask &= ~PCPU_GET(cpumask);
1867 } else if (canmigrate && kseq->ksq_load > 1 && class != PRI_ITHD)
1868 if (kseq_transfer(kseq, ke, class))
1870 ke->ke_cpu = PCPU_GET(cpuid);
1872 if (td->td_priority < curthread->td_priority &&
1873 ke->ke_runq == kseq->ksq_curr)
1874 curthread->td_flags |= TDF_NEEDRESCHED;
1875 if (preemptive && maybe_preempt(td))
1877 ke->ke_state = KES_ONRUNQ;
1879 kseq_runq_add(kseq, ke, flags);
1880 kseq_load_add(kseq, ke);
1884 sched_rem(struct thread *td)
1889 CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
1890 td, td->td_proc->p_comm, td->td_priority, curthread,
1891 curthread->td_proc->p_comm);
1892 mtx_assert(&sched_lock, MA_OWNED);
1894 SLOT_RELEASE(td->td_ksegrp);
1895 ke->ke_flags &= ~KEF_PREEMPTED;
1896 if (ke->ke_flags & KEF_ASSIGNED) {
1897 ke->ke_flags |= KEF_REMOVED;
1900 KASSERT((ke->ke_state == KES_ONRUNQ),
1901 ("sched_rem: KSE not on run queue"));
1903 ke->ke_state = KES_THREAD;
1904 kseq = KSEQ_CPU(ke->ke_cpu);
1905 kseq_runq_rem(kseq, ke);
1906 kseq_load_rem(kseq, ke);
1910 sched_pctcpu(struct thread *td)
1920 mtx_lock_spin(&sched_lock);
1925 * Don't update more frequently than twice a second. Allowing
1926 * this causes the cpu usage to decay away too quickly due to
1929 if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick ||
1930 ke->ke_ltick < (ticks - (hz / 2)))
1931 sched_pctcpu_update(ke);
1932 /* How many rtick per second ? */
1933 rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS);
1934 pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
1937 ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
1938 mtx_unlock_spin(&sched_lock);
1944 sched_bind(struct thread *td, int cpu)
1948 mtx_assert(&sched_lock, MA_OWNED);
1950 ke->ke_flags |= KEF_BOUND;
1952 if (PCPU_GET(cpuid) == cpu)
1954 /* sched_rem without the runq_remove */
1955 ke->ke_state = KES_THREAD;
1956 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1957 kseq_notify(ke, cpu);
1958 /* When we return from mi_switch we'll be on the correct cpu. */
1959 mi_switch(SW_VOL, NULL);
1964 sched_unbind(struct thread *td)
1966 mtx_assert(&sched_lock, MA_OWNED);
1967 td->td_kse->ke_flags &= ~KEF_BOUND;
1971 sched_is_bound(struct thread *td)
1973 mtx_assert(&sched_lock, MA_OWNED);
1974 return (td->td_kse->ke_flags & KEF_BOUND);
1985 for (i = 0; i <= ksg_maxid; i++)
1986 total += KSEQ_GROUP(i)->ksg_load;
1989 return (KSEQ_SELF()->ksq_sysload);
1994 sched_sizeof_ksegrp(void)
1996 return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
2000 sched_sizeof_proc(void)
2002 return (sizeof(struct proc));
2006 sched_sizeof_thread(void)
2008 return (sizeof(struct thread) + sizeof(struct td_sched));
2010 #define KERN_SWITCH_INCLUDE 1
2011 #include "kern/kern_switch.c"