2 * Copyright (c) 2005-2006, David Xu <yfxu@corp.netease.com>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include "opt_hwpmc_hooks.h"
31 #include "opt_sched.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/kthread.h>
42 #include <sys/mutex.h>
44 #include <sys/resource.h>
45 #include <sys/resourcevar.h>
46 #include <sys/sched.h>
49 #include <sys/sysctl.h>
50 #include <sys/sysproto.h>
51 #include <sys/turnstile.h>
53 #include <sys/unistd.h>
54 #include <sys/vmmeter.h>
57 #include <sys/ktrace.h>
61 #include <sys/pmckern.h>
64 #include <machine/cpu.h>
65 #include <machine/smp.h>
67 /* get process's nice value, skip value 20 which is not supported */
68 #define PROC_NICE(p) MIN((p)->p_nice, 19)
70 /* convert nice to kernel thread priority */
71 #define NICE_TO_PRI(nice) (PUSER + 20 + (nice))
73 /* get process's static priority */
74 #define PROC_PRI(p) NICE_TO_PRI(PROC_NICE(p))
76 /* convert kernel thread priority to user priority */
77 #define USER_PRI(pri) MIN((pri) - PUSER, 39)
79 /* convert nice value to user priority */
80 #define PROC_USER_PRI(p) (PROC_NICE(p) + 20)
82 /* maximum user priority, highest prio + 1 */
83 #define MAX_USER_PRI 40
85 /* maximum kernel priority its nice is 19 */
86 #define PUSER_MAX (PUSER + 39)
88 /* ticks and nanosecond converters */
89 #define NS_TO_HZ(n) ((n) / (1000000000 / hz))
90 #define HZ_TO_NS(h) ((h) * (1000000000 / hz))
92 /* ticks and microsecond converters */
93 #define MS_TO_HZ(m) ((m) / (1000000 / hz))
95 #define PRI_SCORE_RATIO 25
96 #define MAX_SCORE (MAX_USER_PRI * PRI_SCORE_RATIO / 100)
97 #define MAX_SLEEP_TIME (def_timeslice * MAX_SCORE)
98 #define NS_MAX_SLEEP_TIME (HZ_TO_NS(MAX_SLEEP_TIME))
99 #define STARVATION_TIME (MAX_SLEEP_TIME)
101 #define CURRENT_SCORE(kg) \
102 (MAX_SCORE * NS_TO_HZ((kg)->kg_slptime) / MAX_SLEEP_TIME)
104 #define SCALE_USER_PRI(x, upri) \
105 MAX(x * (upri + 1) / (MAX_USER_PRI/2), min_timeslice)
108 * For a thread whose nice is zero, the score is used to determine
109 * if it is an interactive thread.
111 #define INTERACTIVE_BASE_SCORE (MAX_SCORE * 20)/100
114 * Calculate a score which a thread must have to prove itself is
115 * an interactive thread.
117 #define INTERACTIVE_SCORE(ke) \
118 (PROC_NICE((ke)->ke_proc) * MAX_SCORE / 40 + INTERACTIVE_BASE_SCORE)
120 /* Test if a thread is an interactive thread */
121 #define THREAD_IS_INTERACTIVE(ke) \
122 ((ke)->ke_ksegrp->kg_user_pri <= \
123 PROC_PRI((ke)->ke_proc) - INTERACTIVE_SCORE(ke))
126 * Calculate how long a thread must sleep to prove itself is an
129 #define INTERACTIVE_SLEEP_TIME(ke) \
130 (HZ_TO_NS(MAX_SLEEP_TIME * \
131 (MAX_SCORE / 2 + INTERACTIVE_SCORE((ke)) + 1) / MAX_SCORE - 1))
133 #define CHILD_WEIGHT 90
134 #define PARENT_WEIGHT 90
135 #define EXIT_WEIGHT 3
137 #define SCHED_LOAD_SCALE 128UL
143 #define KQB_LEN (8) /* Number of priority status words. */
144 #define KQB_L2BPW (5) /* Log2(sizeof(rqb_word_t) * NBBY)). */
145 #define KQB_BPW (1<<KQB_L2BPW) /* Bits in an rqb_word_t. */
147 #define KQB_BIT(pri) (1 << ((pri) & (KQB_BPW - 1)))
148 #define KQB_WORD(pri) ((pri) >> KQB_L2BPW)
149 #define KQB_FFS(word) (ffs(word) - 1)
154 * Type of run queue status word.
156 typedef u_int32_t kqb_word_t;
159 * Head of run queues.
161 TAILQ_HEAD(krqhead, kse);
164 * Bit array which maintains the status of a run queue. When a queue is
165 * non-empty the bit corresponding to the queue number will be set.
168 kqb_word_t rqb_bits[KQB_LEN];
172 * Run queue structure. Contains an array of run queues on which processes
173 * are placed, and a structure to maintain the status of each queue.
176 struct krqbits rq_status;
177 struct krqhead rq_queues[KQ_NQS];
181 * The following datastructures are allocated within their parent structure
182 * but are scheduler specific.
185 * The schedulable entity that can be given a context to run. A process may
186 * have several of these.
189 struct thread *ke_thread; /* (*) Active associated thread. */
190 TAILQ_ENTRY(kse) ke_procq; /* (j/z) Run queue. */
191 int ke_flags; /* (j) KEF_* flags. */
192 fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */
193 u_char ke_rqindex; /* (j) Run queue index. */
195 KES_THREAD = 0x0, /* slaved to thread state */
197 } ke_state; /* (j) thread sched specific status. */
198 int ke_slice; /* Time slice in ticks */
199 struct kseq *ke_kseq; /* Kseq the thread belongs to */
200 struct krunq *ke_runq; /* Assiociated runqueue */
202 int ke_cpu; /* CPU that we have affinity for. */
203 int ke_wakeup_cpu; /* CPU that has activated us. */
205 int ke_activated; /* How is the thread activated. */
206 uint64_t ke_timestamp; /* Last timestamp dependent on state.*/
207 unsigned ke_lastran; /* Last timestamp the thread ran. */
209 /* The following variables are only used for pctcpu calculation */
210 int ke_ltick; /* Last tick that we were running on */
211 int ke_ftick; /* First tick that we were running on */
212 int ke_ticks; /* Tick count */
215 #define td_kse td_sched
216 #define ke_proc ke_thread->td_proc
217 #define ke_ksegrp ke_thread->td_ksegrp
219 /* flags kept in ke_flags */
220 #define KEF_BOUND 0x0001 /* Thread can not migrate. */
221 #define KEF_PREEMPTED 0x0002 /* Thread was preempted. */
222 #define KEF_MIGRATING 0x0004 /* Thread is migrating. */
223 #define KEF_SLEEP 0x0008 /* Thread did sleep. */
224 #define KEF_DIDRUN 0x0010 /* Thread actually ran. */
225 #define KEF_EXIT 0x0020 /* Thread is being killed. */
226 #define KEF_NEXTRQ 0x0400 /* Thread should be in next queue. */
227 #define KEF_FIRST_SLICE 0x0800 /* Thread has first time slice left. */
230 struct thread *skg_last_assigned; /* (j) Last thread assigned to */
231 /* the system scheduler */
232 u_long skg_slptime; /* (j) Number of ticks we vol. slept */
233 u_long skg_runtime; /* (j) Temp total run time. */
234 int skg_avail_opennings; /* (j) Num unfilled slots in group.*/
235 int skg_concurrency; /* (j) Num threads requested in group.*/
237 #define kg_last_assigned kg_sched->skg_last_assigned
238 #define kg_avail_opennings kg_sched->skg_avail_opennings
239 #define kg_concurrency kg_sched->skg_concurrency
240 #define kg_slptime kg_sched->skg_slptime
241 #define kg_runtime kg_sched->skg_runtime
243 #define SLOT_RELEASE(kg) (kg)->kg_avail_opennings++
244 #define SLOT_USE(kg) (kg)->kg_avail_opennings--
247 * Cpu percentage computation macros and defines.
249 * SCHED_CPU_TIME: Number of seconds to average the cpu usage across.
250 * SCHED_CPU_TICKS: Number of hz ticks to average the cpu usage across.
253 #define SCHED_CPU_TIME 10
254 #define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME)
257 * kseq - per processor runqs and statistics.
260 struct krunq *ksq_curr; /* Current queue. */
261 struct krunq *ksq_next; /* Next timeshare queue. */
262 struct krunq ksq_timeshare[2]; /* Run queues for !IDLE. */
263 struct krunq ksq_idle; /* Queue of IDLE threads. */
265 uint64_t ksq_last_timestamp; /* Per-cpu last clock tick */
266 unsigned ksq_expired_tick; /* First expired tick */
267 signed char ksq_expired_nice; /* Lowest nice in nextq */
270 static struct kse kse0;
271 static struct kg_sched kg_sched0;
273 static int min_timeslice = 5;
274 static int def_timeslice = 100;
275 static int granularity = 10;
276 static int realstathz;
277 static int sched_tdcnt;
278 static struct kseq kseq_global;
281 * One kse queue per processor.
284 static struct kseq kseq_cpu[MAXCPU];
286 #define KSEQ_SELF() (&kseq_cpu[PCPU_GET(cpuid)])
287 #define KSEQ_CPU(x) (&kseq_cpu[(x)])
288 #define KSEQ_ID(x) ((x) - kseq_cpu)
290 static cpumask_t cpu_sibling[MAXCPU];
294 #define KSEQ_SELF() (&kseq_global)
295 #define KSEQ_CPU(x) (&kseq_global)
298 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
299 static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
300 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
302 static void sched_setup(void *dummy);
303 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL);
305 static void sched_initticks(void *dummy);
306 SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL)
308 static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");
310 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "CORE", 0,
314 /* Enable forwarding of wakeups to all other cpus */
315 SYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL, "Kernel SMP");
317 static int runq_fuzz = 0;
318 SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
320 static int forward_wakeup_enabled = 1;
321 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW,
322 &forward_wakeup_enabled, 0,
323 "Forwarding of wakeup to idle CPUs");
325 static int forward_wakeups_requested = 0;
326 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, requested, CTLFLAG_RD,
327 &forward_wakeups_requested, 0,
328 "Requests for Forwarding of wakeup to idle CPUs");
330 static int forward_wakeups_delivered = 0;
331 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, delivered, CTLFLAG_RD,
332 &forward_wakeups_delivered, 0,
333 "Completed Forwarding of wakeup to idle CPUs");
335 static int forward_wakeup_use_mask = 1;
336 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, usemask, CTLFLAG_RW,
337 &forward_wakeup_use_mask, 0,
338 "Use the mask of idle cpus");
340 static int forward_wakeup_use_loop = 0;
341 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW,
342 &forward_wakeup_use_loop, 0,
343 "Use a loop to find idle cpus");
345 static int forward_wakeup_use_single = 0;
346 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, onecpu, CTLFLAG_RW,
347 &forward_wakeup_use_single, 0,
348 "Only signal one idle cpu");
350 static int forward_wakeup_use_htt = 0;
351 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, htt2, CTLFLAG_RW,
352 &forward_wakeup_use_htt, 0,
356 static void slot_fill(struct ksegrp *);
358 static void krunq_add(struct krunq *, struct kse *);
359 static struct kse *krunq_choose(struct krunq *);
360 static void krunq_clrbit(struct krunq *rq, int pri);
361 static int krunq_findbit(struct krunq *rq);
362 static void krunq_init(struct krunq *);
363 static void krunq_remove(struct krunq *, struct kse *);
365 static struct kse * kseq_choose(struct kseq *);
366 static void kseq_load_add(struct kseq *, struct kse *);
367 static void kseq_load_rem(struct kseq *, struct kse *);
368 static void kseq_runq_add(struct kseq *, struct kse *);
369 static void kseq_runq_rem(struct kseq *, struct kse *);
370 static void kseq_setup(struct kseq *);
372 static int sched_is_timeshare(struct ksegrp *kg);
373 static struct kse *sched_choose(void);
374 static int sched_calc_pri(struct ksegrp *kg);
375 static int sched_starving(struct kseq *, unsigned, struct kse *);
376 static void sched_pctcpu_update(struct kse *);
377 static void sched_thread_priority(struct thread *, u_char);
378 static uint64_t sched_timestamp(void);
379 static int sched_recalc_pri(struct kse *ke, uint64_t now);
380 static int sched_timeslice(struct kse *ke);
381 static void sched_update_runtime(struct kse *ke, uint64_t now);
382 static void sched_commit_runtime(struct kse *ke);
385 * Initialize a run structure.
388 krunq_init(struct krunq *rq)
392 bzero(rq, sizeof *rq);
393 for (i = 0; i < KQ_NQS; i++)
394 TAILQ_INIT(&rq->rq_queues[i]);
398 * Clear the status bit of the queue corresponding to priority level pri,
399 * indicating that it is empty.
402 krunq_clrbit(struct krunq *rq, int pri)
406 rqb = &rq->rq_status;
407 rqb->rqb_bits[KQB_WORD(pri)] &= ~KQB_BIT(pri);
411 * Find the index of the first non-empty run queue. This is done by
412 * scanning the status bits, a set bit indicates a non-empty queue.
415 krunq_findbit(struct krunq *rq)
421 rqb = &rq->rq_status;
422 for (i = 0; i < KQB_LEN; i++) {
423 if (rqb->rqb_bits[i]) {
424 pri = KQB_FFS(rqb->rqb_bits[i]) + (i << KQB_L2BPW);
432 krunq_check(struct krunq *rq)
437 rqb = &rq->rq_status;
438 for (i = 0; i < KQB_LEN; i++) {
439 if (rqb->rqb_bits[i])
446 * Set the status bit of the queue corresponding to priority level pri,
447 * indicating that it is non-empty.
450 krunq_setbit(struct krunq *rq, int pri)
454 rqb = &rq->rq_status;
455 rqb->rqb_bits[KQB_WORD(pri)] |= KQB_BIT(pri);
459 * Add the KSE to the queue specified by its priority, and set the
460 * corresponding status bit.
463 krunq_add(struct krunq *rq, struct kse *ke)
468 pri = ke->ke_thread->td_priority;
469 ke->ke_rqindex = pri;
470 krunq_setbit(rq, pri);
471 rqh = &rq->rq_queues[pri];
472 if (ke->ke_flags & KEF_PREEMPTED)
473 TAILQ_INSERT_HEAD(rqh, ke, ke_procq);
475 TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
479 * Find the highest priority process on the run queue.
482 krunq_choose(struct krunq *rq)
488 mtx_assert(&sched_lock, MA_OWNED);
489 if ((pri = krunq_findbit(rq)) != -1) {
490 rqh = &rq->rq_queues[pri];
491 ke = TAILQ_FIRST(rqh);
492 KASSERT(ke != NULL, ("krunq_choose: no thread on busy queue"));
494 if (pri <= PRI_MAX_ITHD || runq_fuzz <= 0)
498 * In the first couple of entries, check if
499 * there is one for our CPU as a preference.
501 struct kse *ke2 = ke;
502 const int mycpu = PCPU_GET(cpuid);
503 const int mymask = 1 << mycpu;
504 int count = runq_fuzz;
506 while (count-- && ke2) {
507 const int cpu = ke2->ke_wakeup_cpu;
508 if (cpu_sibling[cpu] & mymask) {
512 ke2 = TAILQ_NEXT(ke2, ke_procq);
522 * Remove the KSE from the queue specified by its priority, and clear the
523 * corresponding status bit if the queue becomes empty.
524 * Caller must set ke->ke_state afterwards.
527 krunq_remove(struct krunq *rq, struct kse *ke)
532 KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
533 ("runq_remove: process swapped out"));
534 pri = ke->ke_rqindex;
535 rqh = &rq->rq_queues[pri];
536 KASSERT(ke != NULL, ("krunq_remove: no proc on busy queue"));
537 TAILQ_REMOVE(rqh, ke, ke_procq);
538 if (TAILQ_EMPTY(rqh))
539 krunq_clrbit(rq, pri);
543 kseq_runq_add(struct kseq *kseq, struct kse *ke)
545 krunq_add(ke->ke_runq, ke);
550 kseq_runq_rem(struct kseq *kseq, struct kse *ke)
552 krunq_remove(ke->ke_runq, ke);
558 kseq_load_add(struct kseq *kseq, struct kse *ke)
561 if ((ke->ke_proc->p_flag & P_NOLOAD) == 0)
566 kseq_load_rem(struct kseq *kseq, struct kse *ke)
569 if ((ke->ke_proc->p_flag & P_NOLOAD) == 0)
574 * Pick the highest priority task we have and return it.
577 kseq_choose(struct kseq *kseq)
582 mtx_assert(&sched_lock, MA_OWNED);
583 ke = krunq_choose(kseq->ksq_curr);
587 kseq->ksq_expired_nice = PRIO_MAX + 1;
588 kseq->ksq_expired_tick = 0;
589 swap = kseq->ksq_curr;
590 kseq->ksq_curr = kseq->ksq_next;
591 kseq->ksq_next = swap;
592 ke = krunq_choose(kseq->ksq_curr);
596 return krunq_choose(&kseq->ksq_idle);
599 static inline uint64_t
600 sched_timestamp(void)
602 uint64_t now = cputick2usec(cpu_ticks()) * 1000;
607 sched_timeslice(struct kse *ke)
609 struct proc *p = ke->ke_proc;
611 if (ke->ke_proc->p_nice < 0)
612 return SCALE_USER_PRI(def_timeslice*4, PROC_USER_PRI(p));
614 return SCALE_USER_PRI(def_timeslice, PROC_USER_PRI(p));
618 sched_is_timeshare(struct ksegrp *kg)
620 return (kg->kg_pri_class == PRI_TIMESHARE);
624 sched_calc_pri(struct ksegrp *kg)
628 if (sched_is_timeshare(kg)) {
629 score = CURRENT_SCORE(kg) - MAX_SCORE / 2;
630 pri = PROC_PRI(kg->kg_proc) - score;
633 else if (pri > PUSER_MAX)
637 return (kg->kg_base_user_pri);
641 sched_recalc_pri(struct kse *ke, uint64_t now)
644 unsigned int sleep_time;
648 delta = now - ke->ke_timestamp;
649 if (__predict_false(!sched_is_timeshare(kg)))
650 return (kg->kg_base_user_pri);
652 if (delta > NS_MAX_SLEEP_TIME)
653 sleep_time = NS_MAX_SLEEP_TIME;
655 sleep_time = (unsigned int)delta;
656 if (__predict_false(sleep_time == 0))
659 if (ke->ke_activated != -1 &&
660 sleep_time > INTERACTIVE_SLEEP_TIME(ke)) {
661 kg->kg_slptime = HZ_TO_NS(MAX_SLEEP_TIME - def_timeslice);
663 sleep_time *= (MAX_SCORE - CURRENT_SCORE(kg)) ? : 1;
666 * If thread is waking from uninterruptible sleep, it is
667 * unlikely an interactive sleep, limit its sleep time to
668 * prevent it from being an interactive thread.
670 if (ke->ke_activated == -1) {
671 if (kg->kg_slptime >= INTERACTIVE_SLEEP_TIME(ke))
673 else if (kg->kg_slptime + sleep_time >=
674 INTERACTIVE_SLEEP_TIME(ke)) {
675 kg->kg_slptime = INTERACTIVE_SLEEP_TIME(ke);
681 * Thread gets priority boost here.
683 kg->kg_slptime += sleep_time;
685 /* Sleep time should never be larger than maximum */
686 if (kg->kg_slptime > NS_MAX_SLEEP_TIME)
687 kg->kg_slptime = NS_MAX_SLEEP_TIME;
691 return (sched_calc_pri(kg));
695 sched_update_runtime(struct kse *ke, uint64_t now)
698 struct ksegrp *kg = ke->ke_ksegrp;
700 if (sched_is_timeshare(kg)) {
701 if ((int64_t)(now - ke->ke_timestamp) < NS_MAX_SLEEP_TIME) {
702 runtime = now - ke->ke_timestamp;
703 if ((int64_t)(now - ke->ke_timestamp) < 0)
706 runtime = NS_MAX_SLEEP_TIME;
708 runtime /= (CURRENT_SCORE(kg) ? : 1);
709 kg->kg_runtime += runtime;
710 ke->ke_timestamp = now;
715 sched_commit_runtime(struct kse *ke)
717 struct ksegrp *kg = ke->ke_ksegrp;
719 if (kg->kg_runtime > kg->kg_slptime)
722 kg->kg_slptime -= kg->kg_runtime;
727 kseq_setup(struct kseq *kseq)
729 krunq_init(&kseq->ksq_timeshare[0]);
730 krunq_init(&kseq->ksq_timeshare[1]);
731 krunq_init(&kseq->ksq_idle);
732 kseq->ksq_curr = &kseq->ksq_timeshare[0];
733 kseq->ksq_next = &kseq->ksq_timeshare[1];
734 kseq->ksq_expired_nice = PRIO_MAX + 1;
735 kseq->ksq_expired_tick = 0;
739 sched_setup(void *dummy)
746 * To avoid divide-by-zero, we set realstathz a dummy value
747 * in case which sched_clock() called before sched_initticks().
750 min_timeslice = MAX(5 * hz / 1000, 1);
751 def_timeslice = MAX(100 * hz / 1000, 1);
752 granularity = MAX(10 * hz / 1000, 1);
754 kseq_setup(&kseq_global);
756 runq_fuzz = MIN(mp_ncpus * 2, 8);
758 * Initialize the kseqs.
760 for (i = 0; i < MAXCPU; i++) {
764 kseq_setup(&kseq_cpu[i]);
765 cpu_sibling[i] = 1 << i;
767 if (smp_topology != NULL) {
770 struct cpu_group *cg;
773 for (i = 0; i < smp_topology->ct_count; i++) {
774 cg = &smp_topology->ct_group[i];
775 if (cg->cg_mask & visited)
776 panic("duplicated cpumask in ct_group.");
777 if (cg->cg_mask == 0)
779 visited |= cg->cg_mask;
780 for (j = 0; j < MAXCPU; j++) {
781 if ((cg->cg_mask & (1 << j)) != 0)
782 cpu_sibling[j] |= cg->cg_mask;
788 mtx_lock_spin(&sched_lock);
789 kseq_load_add(KSEQ_SELF(), &kse0);
790 mtx_unlock_spin(&sched_lock);
795 sched_initticks(void *dummy)
797 mtx_lock_spin(&sched_lock);
798 realstathz = stathz ? stathz : hz;
799 mtx_unlock_spin(&sched_lock);
803 * Very early in the boot some setup of scheduler-specific
804 * parts of proc0 and of soem scheduler resources needs to be done.
812 * Set up the scheduler specific parts of proc0.
814 proc0.p_sched = NULL; /* XXX */
815 ksegrp0.kg_sched = &kg_sched0;
816 thread0.td_sched = &kse0;
817 kse0.ke_thread = &thread0;
818 kse0.ke_state = KES_THREAD;
820 kg_sched0.skg_concurrency = 1;
821 kg_sched0.skg_avail_opennings = 0; /* we are already running */
825 * This is only somewhat accurate since given many processes of the same
826 * priority they will switch when their slices run out, which will be
827 * at most SCHED_SLICE_MAX.
830 sched_rr_interval(void)
832 return (def_timeslice);
836 sched_pctcpu_update(struct kse *ke)
839 * Adjust counters and watermark for pctcpu calc.
841 if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) {
843 * Shift the tick count out so that the divide doesn't
844 * round away our results.
847 ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) *
852 ke->ke_ltick = ticks;
853 ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS;
857 sched_thread_priority(struct thread *td, u_char prio)
862 mtx_assert(&sched_lock, MA_OWNED);
863 if (__predict_false(td->td_priority == prio))
866 if (TD_ON_RUNQ(td)) {
868 * If the priority has been elevated due to priority
869 * propagation, we may have to move ourselves to a new
870 * queue. We still call adjustrunqueue below in case kse
871 * needs to fix things up.
873 if (prio < td->td_priority && ke->ke_runq != NULL &&
874 ke->ke_runq != ke->ke_kseq->ksq_curr) {
875 krunq_remove(ke->ke_runq, ke);
876 ke->ke_runq = ke->ke_kseq->ksq_curr;
877 krunq_add(ke->ke_runq, ke);
879 adjustrunqueue(td, prio);
881 td->td_priority = prio;
885 * Update a thread's priority when it is lent another thread's
889 sched_lend_prio(struct thread *td, u_char prio)
892 td->td_flags |= TDF_BORROWING;
893 sched_thread_priority(td, prio);
897 * Restore a thread's priority when priority propagation is
898 * over. The prio argument is the minimum priority the thread
899 * needs to have to satisfy other possible priority lending
900 * requests. If the thread's regular priority is less
901 * important than prio, the thread will keep a priority boost
905 sched_unlend_prio(struct thread *td, u_char prio)
909 if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
910 td->td_base_pri <= PRI_MAX_TIMESHARE)
911 base_pri = td->td_ksegrp->kg_user_pri;
913 base_pri = td->td_base_pri;
914 if (prio >= base_pri) {
915 td->td_flags &= ~TDF_BORROWING;
916 sched_thread_priority(td, base_pri);
918 sched_lend_prio(td, prio);
922 sched_prio(struct thread *td, u_char prio)
926 if (td->td_ksegrp->kg_pri_class == PRI_TIMESHARE)
927 prio = MIN(prio, PUSER_MAX);
929 /* First, update the base priority. */
930 td->td_base_pri = prio;
933 * If the thread is borrowing another thread's priority, don't
934 * ever lower the priority.
936 if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
939 /* Change the real priority. */
940 oldprio = td->td_priority;
941 sched_thread_priority(td, prio);
944 * If the thread is on a turnstile, then let the turnstile update
947 if (TD_ON_LOCK(td) && oldprio != prio)
948 turnstile_adjust(td, oldprio);
952 sched_user_prio(struct ksegrp *kg, u_char prio)
957 kg->kg_base_user_pri = prio;
959 /* XXXKSE only for 1:1 */
961 td = TAILQ_FIRST(&kg->kg_threads);
963 kg->kg_user_pri = prio;
967 if (td->td_flags & TDF_UBORROWING && kg->kg_user_pri <= prio)
970 oldprio = kg->kg_user_pri;
971 kg->kg_user_pri = prio;
973 if (TD_ON_UPILOCK(td) && oldprio != prio)
974 umtx_pi_adjust(td, oldprio);
978 sched_lend_user_prio(struct thread *td, u_char prio)
982 td->td_flags |= TDF_UBORROWING;
984 oldprio = td->td_ksegrp->kg_user_pri;
985 td->td_ksegrp->kg_user_pri = prio;
987 if (TD_ON_UPILOCK(td) && oldprio != prio)
988 umtx_pi_adjust(td, oldprio);
992 sched_unlend_user_prio(struct thread *td, u_char prio)
994 struct ksegrp *kg = td->td_ksegrp;
997 base_pri = kg->kg_base_user_pri;
998 if (prio >= base_pri) {
999 td->td_flags &= ~TDF_UBORROWING;
1000 sched_user_prio(kg, base_pri);
1002 sched_lend_user_prio(td, prio);
1006 sched_switch(struct thread *td, struct thread *newtd, int flags)
1013 mtx_assert(&sched_lock, MA_OWNED);
1015 now = sched_timestamp();
1020 td->td_lastcpu = td->td_oncpu;
1021 td->td_oncpu = NOCPU;
1022 td->td_flags &= ~TDF_NEEDRESCHED;
1023 td->td_owepreempt = 0;
1025 if (td == PCPU_GET(idlethread)) {
1028 sched_update_runtime(ke, now);
1029 /* We are ending our run so make our slot available again */
1030 SLOT_RELEASE(td->td_ksegrp);
1031 kseq_load_rem(ksq, ke);
1032 if (TD_IS_RUNNING(td)) {
1033 setrunqueue(td, (flags & SW_PREEMPT) ?
1034 SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1035 SRQ_OURSELF|SRQ_YIELDING);
1037 if ((td->td_proc->p_flag & P_HADTHREADS) &&
1039 newtd->td_ksegrp != td->td_ksegrp)) {
1041 * We will not be on the run queue.
1042 * So we must be sleeping or similar.
1043 * Don't use the slot if we will need it
1046 slot_fill(td->td_ksegrp);
1048 ke->ke_flags &= ~KEF_NEXTRQ;
1052 if (newtd != NULL) {
1054 * If we bring in a thread account for it as if it had been
1055 * added to the run queue and then chosen.
1057 SLOT_USE(newtd->td_ksegrp);
1058 newtd->td_kse->ke_flags |= KEF_DIDRUN;
1059 newtd->td_kse->ke_timestamp = now;
1060 TD_SET_RUNNING(newtd);
1061 kseq_load_add(ksq, newtd->td_kse);
1063 newtd = choosethread();
1064 /* sched_choose sets ke_timestamp, just reuse it */
1067 ke->ke_lastran = tick;
1070 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1071 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1073 cpu_switch(td, newtd);
1075 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1076 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1080 sched_lock.mtx_lock = (uintptr_t)td;
1082 td->td_oncpu = PCPU_GET(cpuid);
1086 sched_nice(struct proc *p, int nice)
1091 PROC_LOCK_ASSERT(p, MA_OWNED);
1092 mtx_assert(&sched_lock, MA_OWNED);
1094 FOREACH_KSEGRP_IN_PROC(p, kg) {
1095 if (kg->kg_pri_class == PRI_TIMESHARE) {
1096 sched_user_prio(kg, sched_calc_pri(kg));
1097 FOREACH_THREAD_IN_GROUP(kg, td)
1098 td->td_flags |= TDF_NEEDRESCHED;
1104 sched_sleep(struct thread *td)
1108 mtx_assert(&sched_lock, MA_OWNED);
1110 if (td->td_flags & TDF_SINTR)
1111 ke->ke_activated = 0;
1113 ke->ke_activated = -1;
1114 ke->ke_flags |= KEF_SLEEP;
1118 sched_wakeup(struct thread *td)
1122 struct kseq *kseq, *mykseq;
1125 mtx_assert(&sched_lock, MA_OWNED);
1128 mykseq = KSEQ_SELF();
1129 if (ke->ke_flags & KEF_SLEEP) {
1130 ke->ke_flags &= ~KEF_SLEEP;
1131 if (sched_is_timeshare(kg)) {
1132 sched_commit_runtime(ke);
1133 now = sched_timestamp();
1134 kseq = KSEQ_CPU(td->td_lastcpu);
1137 now = now - mykseq->ksq_last_timestamp +
1138 kseq->ksq_last_timestamp;
1140 sched_user_prio(kg, sched_recalc_pri(ke, now));
1143 setrunqueue(td, SRQ_BORING);
1147 * Penalize the parent for creating a new child and initialize the child's
1151 sched_fork(struct thread *td, struct thread *childtd)
1154 mtx_assert(&sched_lock, MA_OWNED);
1155 sched_fork_ksegrp(td, childtd->td_ksegrp);
1156 sched_fork_thread(td, childtd);
1160 sched_fork_ksegrp(struct thread *td, struct ksegrp *child)
1162 struct ksegrp *kg = td->td_ksegrp;
1164 mtx_assert(&sched_lock, MA_OWNED);
1165 child->kg_slptime = kg->kg_slptime * CHILD_WEIGHT / 100;
1166 if (child->kg_pri_class == PRI_TIMESHARE)
1167 sched_user_prio(child, sched_calc_pri(child));
1168 kg->kg_slptime = kg->kg_slptime * PARENT_WEIGHT / 100;
1172 sched_fork_thread(struct thread *td, struct thread *child)
1177 sched_newthread(child);
1180 ke2 = child->td_kse;
1181 ke2->ke_slice = (ke->ke_slice + 1) >> 1;
1182 ke2->ke_flags |= KEF_FIRST_SLICE | (ke->ke_flags & KEF_NEXTRQ);
1183 ke2->ke_activated = 0;
1185 if (ke->ke_slice == 0) {
1190 /* Grab our parents cpu estimation information. */
1191 ke2->ke_ticks = ke->ke_ticks;
1192 ke2->ke_ltick = ke->ke_ltick;
1193 ke2->ke_ftick = ke->ke_ftick;
1197 sched_class(struct ksegrp *kg, int class)
1199 mtx_assert(&sched_lock, MA_OWNED);
1200 kg->kg_pri_class = class;
1204 * Return some of the child's priority and interactivity to the parent.
1207 sched_exit(struct proc *p, struct thread *childtd)
1209 mtx_assert(&sched_lock, MA_OWNED);
1210 sched_exit_thread(FIRST_THREAD_IN_PROC(p), childtd);
1211 sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), childtd);
1215 sched_exit_ksegrp(struct ksegrp *parentkg, struct thread *td)
1217 if (td->td_ksegrp->kg_slptime < parentkg->kg_slptime) {
1218 parentkg->kg_slptime = parentkg->kg_slptime /
1219 (EXIT_WEIGHT) * (EXIT_WEIGHT - 1) +
1220 td->td_ksegrp->kg_slptime / EXIT_WEIGHT;
1225 sched_exit_thread(struct thread *td, struct thread *childtd)
1227 struct kse *childke = childtd->td_kse;
1228 struct kse *parentke = td->td_kse;
1230 kseq_load_rem(KSEQ_SELF(), childke);
1231 sched_update_runtime(childke, sched_timestamp());
1232 sched_commit_runtime(childke);
1233 if ((childke->ke_flags & KEF_FIRST_SLICE) &&
1234 td->td_proc == childtd->td_proc->p_pptr) {
1235 parentke->ke_slice += childke->ke_slice;
1236 if (parentke->ke_slice > sched_timeslice(parentke))
1237 parentke->ke_slice = sched_timeslice(parentke);
1242 sched_starving(struct kseq *ksq, unsigned now, struct kse *ke)
1246 if (ke->ke_proc->p_nice > ksq->ksq_expired_nice)
1248 if (ksq->ksq_expired_tick == 0)
1250 delta = HZ_TO_NS((uint64_t)now - ksq->ksq_expired_tick);
1251 if (delta > STARVATION_TIME * ksq->ksq_load)
1257 * An interactive thread has smaller time slice granularity,
1258 * a cpu hog can have larger granularity.
1261 sched_timeslice_split(struct kse *ke)
1265 score = (int)(MAX_SCORE - CURRENT_SCORE(ke->ke_ksegrp));
1269 g = granularity * ((1 << score) - 1) * smp_cpus;
1271 g = granularity * ((1 << score) - 1);
1273 return (ke->ke_slice >= g && ke->ke_slice % g == 0);
1288 mtx_assert(&sched_lock, MA_OWNED);
1294 class = PRI_BASE(kg->kg_pri_class);
1295 now = sched_timestamp();
1296 cpuid = PCPU_GET(cpuid);
1297 kseq = KSEQ_CPU(cpuid);
1298 kseq->ksq_last_timestamp = now;
1300 if (class == PRI_IDLE) {
1302 * Processes of equal idle priority are run round-robin.
1304 if (td != PCPU_GET(idlethread) && --ke->ke_slice <= 0) {
1305 ke->ke_slice = def_timeslice;
1306 td->td_flags |= TDF_NEEDRESCHED;
1311 if (class == PRI_REALTIME) {
1313 * Realtime scheduling, do round robin for RR class, FIFO
1316 if (PRI_NEED_RR(kg->kg_pri_class) && --ke->ke_slice <= 0) {
1317 ke->ke_slice = def_timeslice;
1318 td->td_flags |= TDF_NEEDRESCHED;
1324 * We skip kernel thread, though it may be classified as TIMESHARE.
1326 if (class != PRI_TIMESHARE || (p->p_flag & P_KTHREAD) != 0)
1329 if (--ke->ke_slice <= 0) {
1330 td->td_flags |= TDF_NEEDRESCHED;
1331 sched_update_runtime(ke, now);
1332 sched_commit_runtime(ke);
1333 sched_user_prio(kg, sched_calc_pri(kg));
1334 ke->ke_slice = sched_timeslice(ke);
1335 ke->ke_flags &= ~KEF_FIRST_SLICE;
1336 if (ke->ke_flags & KEF_BOUND || td->td_pinned) {
1337 if (kseq->ksq_expired_tick == 0)
1338 kseq->ksq_expired_tick = tick;
1340 if (kseq_global.ksq_expired_tick == 0)
1341 kseq_global.ksq_expired_tick = tick;
1343 if (!THREAD_IS_INTERACTIVE(ke) ||
1344 sched_starving(kseq, tick, ke) ||
1345 sched_starving(&kseq_global, tick, ke)) {
1346 /* The thead becomes cpu hog, schedule it off. */
1347 ke->ke_flags |= KEF_NEXTRQ;
1348 if (ke->ke_flags & KEF_BOUND || td->td_pinned) {
1349 if (p->p_nice < kseq->ksq_expired_nice)
1350 kseq->ksq_expired_nice = p->p_nice;
1352 if (p->p_nice < kseq_global.ksq_expired_nice)
1353 kseq_global.ksq_expired_nice =
1359 * Don't allow an interactive thread which has long timeslice
1360 * to monopolize CPU, split the long timeslice into small
1361 * chunks. This essentially does round-robin between
1362 * interactive threads.
1364 if (THREAD_IS_INTERACTIVE(ke) && sched_timeslice_split(ke))
1365 td->td_flags |= TDF_NEEDRESCHED;
1370 sched_clock(struct thread *td)
1375 mtx_assert(&sched_lock, MA_OWNED);
1379 /* Adjust ticks for pctcpu */
1381 ke->ke_ltick = ticks;
1383 /* Go up to one second beyond our max and then trim back down */
1384 if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
1385 sched_pctcpu_update(ke);
1389 kseq_runnable(struct kseq *kseq)
1391 return (krunq_check(kseq->ksq_curr) ||
1392 krunq_check(kseq->ksq_next) ||
1393 krunq_check(&kseq->ksq_idle));
1397 sched_runnable(void)
1400 return (kseq_runnable(&kseq_global) || kseq_runnable(KSEQ_SELF()));
1402 return (kseq_runnable(&kseq_global));
1407 sched_userret(struct thread *td)
1411 KASSERT((td->td_flags & TDF_BORROWING) == 0,
1412 ("thread with borrowed priority returning to userland"));
1414 if (td->td_priority != kg->kg_user_pri) {
1415 mtx_lock_spin(&sched_lock);
1416 td->td_priority = kg->kg_user_pri;
1417 td->td_base_pri = kg->kg_user_pri;
1418 mtx_unlock_spin(&sched_lock);
1431 mtx_assert(&sched_lock, MA_OWNED);
1432 kseq = &kseq_global;
1433 ke = kseq_choose(&kseq_global);
1434 kecpu = kseq_choose(KSEQ_SELF());
1438 kecpu->ke_thread->td_priority < ke->ke_thread->td_priority)) {
1443 kseq = &kseq_global;
1444 ke = kseq_choose(kseq);
1448 kseq_runq_rem(kseq, ke);
1449 ke->ke_state = KES_THREAD;
1450 ke->ke_flags &= ~KEF_PREEMPTED;
1451 ke->ke_timestamp = sched_timestamp();
1459 forward_wakeup(int cpunum, cpumask_t me)
1461 cpumask_t map, dontuse;
1466 mtx_assert(&sched_lock, MA_OWNED);
1468 CTR0(KTR_RUNQ, "forward_wakeup()");
1470 if ((!forward_wakeup_enabled) ||
1471 (forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0))
1473 if (!smp_started || cold || panicstr)
1476 forward_wakeups_requested++;
1479 * check the idle mask we received against what we calculated before
1480 * in the old version.
1483 * don't bother if we should be doing it ourself..
1485 if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == (1 << cpunum)))
1488 dontuse = me | stopped_cpus | hlt_cpus_mask;
1490 if (forward_wakeup_use_loop) {
1491 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
1492 id = pc->pc_cpumask;
1493 if ( (id & dontuse) == 0 &&
1494 pc->pc_curthread == pc->pc_idlethread) {
1500 if (forward_wakeup_use_mask) {
1502 map = idle_cpus_mask & ~dontuse;
1504 /* If they are both on, compare and use loop if different */
1505 if (forward_wakeup_use_loop) {
1507 printf("map (%02X) != map3 (%02X)\n",
1515 /* If we only allow a specific CPU, then mask off all the others */
1516 if (cpunum != NOCPU) {
1517 KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum."));
1518 map &= (1 << cpunum);
1520 /* Try choose an idle die. */
1521 if (forward_wakeup_use_htt) {
1522 map2 = (map & (map >> 1)) & 0x5555;
1528 /* set only one bit */
1529 if (forward_wakeup_use_single) {
1530 map = map & ((~map) + 1);
1534 forward_wakeups_delivered++;
1535 ipi_selected(map, IPI_AST);
1543 sched_add(struct thread *td, int flags)
1548 struct thread *mytd;
1551 int need_resched = 0;
1559 mtx_assert(&sched_lock, MA_OWNED);
1563 KASSERT(ke->ke_state != KES_ONRUNQ,
1564 ("sched_add: kse %p (%s) already in run queue", ke,
1565 ke->ke_proc->p_comm));
1566 KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1567 ("sched_add: process swapped out"));
1568 KASSERT(ke->ke_runq == NULL,
1569 ("sched_add: KSE %p is still assigned to a run queue", ke));
1571 class = PRI_BASE(kg->kg_pri_class);
1573 mycpu = PCPU_GET(cpuid);
1574 myksq = KSEQ_CPU(mycpu);
1575 ke->ke_wakeup_cpu = mycpu;
1577 nextrq = (ke->ke_flags & KEF_NEXTRQ);
1578 ke->ke_flags &= ~KEF_NEXTRQ;
1579 if (flags & SRQ_PREEMPTED)
1580 ke->ke_flags |= KEF_PREEMPTED;
1583 if (td->td_pinned != 0) {
1584 cpu = td->td_lastcpu;
1585 ksq = KSEQ_CPU(cpu);
1587 } else if ((ke)->ke_flags & KEF_BOUND) {
1589 ksq = KSEQ_CPU(cpu);
1599 ke->ke_runq = ksq->ksq_curr;
1602 if ((td->td_flags & TDF_BORROWING) == 0 && nextrq)
1603 ke->ke_runq = ksq->ksq_next;
1605 ke->ke_runq = ksq->ksq_curr;
1609 * This is for priority prop.
1611 if (td->td_priority < PRI_MIN_IDLE)
1612 ke->ke_runq = ksq->ksq_curr;
1614 ke->ke_runq = &ksq->ksq_idle;
1617 panic("Unknown pri class.");
1622 if ((ke->ke_runq == kseq_global.ksq_curr ||
1623 ke->ke_runq == myksq->ksq_curr) &&
1624 td->td_priority < mytd->td_priority) {
1626 if (ke->ke_runq == kseq_global.ksq_curr &&
1627 td->td_priority < mytd->td_priority) {
1633 if ((flags & SRQ_YIELDING) == 0 && maybe_preempt(td))
1636 need_resched = TDF_NEEDRESCHED;
1640 ke->ke_state = KES_ONRUNQ;
1641 kseq_runq_add(ksq, ke);
1642 kseq_load_add(ksq, ke);
1647 struct thread *running = pcpu_find(cpu)->pc_curthread;
1648 if (ksq->ksq_curr == ke->ke_runq &&
1649 running->td_priority < td->td_priority) {
1650 if (td->td_priority <= PRI_MAX_ITHD)
1651 ipi_selected(1 << cpu, IPI_PREEMPT);
1653 running->td_flags |= TDF_NEEDRESCHED;
1654 ipi_selected(1 << cpu, IPI_AST);
1658 curthread->td_flags |= need_resched;
1660 cpumask_t me = 1 << mycpu;
1661 cpumask_t idle = idle_cpus_mask & me;
1664 if (!idle && ((flags & SRQ_INTR) == 0) &&
1665 (idle_cpus_mask & ~(hlt_cpus_mask | me)))
1666 forwarded = forward_wakeup(cpu, me);
1668 curthread->td_flags |= need_resched;
1671 mytd->td_flags |= need_resched;
1676 sched_rem(struct thread *td)
1681 mtx_assert(&sched_lock, MA_OWNED);
1683 KASSERT((ke->ke_state == KES_ONRUNQ),
1684 ("sched_rem: KSE not on run queue"));
1687 SLOT_RELEASE(td->td_ksegrp);
1688 kseq_runq_rem(kseq, ke);
1689 kseq_load_rem(kseq, ke);
1690 ke->ke_state = KES_THREAD;
1694 sched_pctcpu(struct thread *td)
1704 mtx_lock_spin(&sched_lock);
1709 * Don't update more frequently than twice a second. Allowing
1710 * this causes the cpu usage to decay away too quickly due to
1713 if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick ||
1714 ke->ke_ltick < (ticks - (hz / 2)))
1715 sched_pctcpu_update(ke);
1716 /* How many rtick per second ? */
1717 rtick = MIN(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS);
1718 pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
1721 ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
1722 mtx_unlock_spin(&sched_lock);
1728 sched_bind(struct thread *td, int cpu)
1732 mtx_assert(&sched_lock, MA_OWNED);
1734 ke->ke_flags |= KEF_BOUND;
1737 if (PCPU_GET(cpuid) == cpu)
1739 mi_switch(SW_VOL, NULL);
1744 sched_unbind(struct thread *td)
1746 mtx_assert(&sched_lock, MA_OWNED);
1747 td->td_kse->ke_flags &= ~KEF_BOUND;
1751 sched_is_bound(struct thread *td)
1753 mtx_assert(&sched_lock, MA_OWNED);
1754 return (td->td_kse->ke_flags & KEF_BOUND);
1760 return (sched_tdcnt);
1764 sched_relinquish(struct thread *td)
1769 mtx_lock_spin(&sched_lock);
1770 if (sched_is_timeshare(kg)) {
1771 sched_prio(td, PRI_MAX_TIMESHARE);
1772 td->td_kse->ke_flags |= KEF_NEXTRQ;
1774 mi_switch(SW_VOL, NULL);
1775 mtx_unlock_spin(&sched_lock);
1779 sched_sizeof_ksegrp(void)
1781 return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
1785 sched_sizeof_proc(void)
1787 return (sizeof(struct proc));
1791 sched_sizeof_thread(void)
1793 return (sizeof(struct thread) + sizeof(struct td_sched));
1795 #define KERN_SWITCH_INCLUDE 1
1796 #include "kern/kern_switch.c"