2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
40 #include "opt_callout_profiling.h"
41 #include "opt_kdtrace.h"
43 #include "opt_timer.h"
46 #include <sys/param.h>
47 #include <sys/systm.h>
49 #include <sys/callout.h>
51 #include <sys/interrupt.h>
52 #include <sys/kernel.h>
55 #include <sys/malloc.h>
56 #include <sys/mutex.h>
59 #include <sys/sleepqueue.h>
60 #include <sys/sysctl.h>
64 #include <machine/cpu.h>
67 #ifndef NO_EVENTTIMERS
68 DPCPU_DECLARE(sbintime_t, hardclocktime);
71 SDT_PROVIDER_DEFINE(callout_execute);
72 SDT_PROBE_DEFINE(callout_execute, kernel, , callout_start, callout-start);
73 SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_start, 0,
75 SDT_PROBE_DEFINE(callout_execute, kernel, , callout_end, callout-end);
76 SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_end, 0,
79 #ifdef CALLOUT_PROFILING
81 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
82 "Average number of items examined per softclock call. Units = 1/1000");
83 static int avg_gcalls;
84 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
85 "Average number of Giant callouts made per softclock call. Units = 1/1000");
86 static int avg_lockcalls;
87 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0,
88 "Average number of lock callouts made per softclock call. Units = 1/1000");
89 static int avg_mpcalls;
90 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
91 "Average number of MP callouts made per softclock call. Units = 1/1000");
92 static int avg_depth_dir;
93 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth_dir, CTLFLAG_RD, &avg_depth_dir, 0,
94 "Average number of direct callouts examined per callout_process call. "
96 static int avg_lockcalls_dir;
97 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls_dir, CTLFLAG_RD,
98 &avg_lockcalls_dir, 0, "Average number of lock direct callouts made per "
99 "callout_process call. Units = 1/1000");
100 static int avg_mpcalls_dir;
101 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir,
102 0, "Average number of MP direct callouts made per callout_process call. "
107 SYSCTL_INT(_kern, OID_AUTO, ncallout, CTLFLAG_RDTUN, &ncallout, 0,
108 "Number of entries in callwheel and size of timeout() preallocation");
112 * allocate more timeout table slots when table overflows.
114 u_int callwheelsize, callwheelmask;
117 * The callout cpu exec entities represent informations necessary for
118 * describing the state of callouts currently running on the CPU and the ones
119 * necessary for migrating callouts to the new callout cpu. In particular,
120 * the first entry of the array cc_exec_entity holds informations for callout
121 * running in SWI thread context, while the second one holds informations
122 * for callout running directly from hardware interrupt context.
123 * The cached informations are very important for deferring migration when
124 * the migrating callout is already running.
127 struct callout *cc_next;
128 struct callout *cc_curr;
130 void (*ce_migration_func)(void *);
131 void *ce_migration_arg;
132 int ce_migration_cpu;
133 sbintime_t ce_migration_time;
134 sbintime_t ce_migration_prec;
141 * There is one struct callout_cpu per cpu, holding all relevant
142 * state for the callout processing thread on the individual CPU.
145 struct mtx_padalign cc_lock;
146 struct cc_exec cc_exec_entity[2];
147 struct callout *cc_callout;
148 struct callout_list *cc_callwheel;
149 struct callout_tailq cc_expireq;
150 struct callout_slist cc_callfree;
151 sbintime_t cc_firstevent;
152 sbintime_t cc_lastscan;
157 #define cc_exec_curr cc_exec_entity[0].cc_curr
158 #define cc_exec_next cc_exec_entity[0].cc_next
159 #define cc_exec_cancel cc_exec_entity[0].cc_cancel
160 #define cc_exec_waiting cc_exec_entity[0].cc_waiting
161 #define cc_exec_curr_dir cc_exec_entity[1].cc_curr
162 #define cc_exec_next_dir cc_exec_entity[1].cc_next
163 #define cc_exec_cancel_dir cc_exec_entity[1].cc_cancel
164 #define cc_exec_waiting_dir cc_exec_entity[1].cc_waiting
167 #define cc_migration_func cc_exec_entity[0].ce_migration_func
168 #define cc_migration_arg cc_exec_entity[0].ce_migration_arg
169 #define cc_migration_cpu cc_exec_entity[0].ce_migration_cpu
170 #define cc_migration_time cc_exec_entity[0].ce_migration_time
171 #define cc_migration_prec cc_exec_entity[0].ce_migration_prec
172 #define cc_migration_func_dir cc_exec_entity[1].ce_migration_func
173 #define cc_migration_arg_dir cc_exec_entity[1].ce_migration_arg
174 #define cc_migration_cpu_dir cc_exec_entity[1].ce_migration_cpu
175 #define cc_migration_time_dir cc_exec_entity[1].ce_migration_time
176 #define cc_migration_prec_dir cc_exec_entity[1].ce_migration_prec
178 struct callout_cpu cc_cpu[MAXCPU];
179 #define CPUBLOCK MAXCPU
180 #define CC_CPU(cpu) (&cc_cpu[(cpu)])
181 #define CC_SELF() CC_CPU(PCPU_GET(cpuid))
183 struct callout_cpu cc_cpu;
184 #define CC_CPU(cpu) &cc_cpu
185 #define CC_SELF() &cc_cpu
187 #define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock)
188 #define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock)
189 #define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED)
191 static int timeout_cpu;
193 static void callout_cpu_init(struct callout_cpu *cc);
194 static void softclock_call_cc(struct callout *c, struct callout_cpu *cc,
195 #ifdef CALLOUT_PROFILING
196 int *mpcalls, int *lockcalls, int *gcalls,
200 static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
204 * cc_curr - If a callout is in progress, it is cc_curr.
205 * If cc_curr is non-NULL, threads waiting in
206 * callout_drain() will be woken up as soon as the
207 * relevant callout completes.
208 * cc_cancel - Changing to 1 with both callout_lock and cc_lock held
209 * guarantees that the current callout will not run.
210 * The softclock() function sets this to 0 before it
211 * drops callout_lock to acquire c_lock, and it calls
212 * the handler only if curr_cancelled is still 0 after
213 * cc_lock is successfully acquired.
214 * cc_waiting - If a thread is waiting in callout_drain(), then
215 * callout_wait is nonzero. Set only when
216 * cc_curr is non-NULL.
220 * Resets the execution entity tied to a specific callout cpu.
223 cc_cce_cleanup(struct callout_cpu *cc, int direct)
226 cc->cc_exec_entity[direct].cc_curr = NULL;
227 cc->cc_exec_entity[direct].cc_next = NULL;
228 cc->cc_exec_entity[direct].cc_cancel = false;
229 cc->cc_exec_entity[direct].cc_waiting = false;
231 cc->cc_exec_entity[direct].ce_migration_cpu = CPUBLOCK;
232 cc->cc_exec_entity[direct].ce_migration_time = 0;
233 cc->cc_exec_entity[direct].ce_migration_prec = 0;
234 cc->cc_exec_entity[direct].ce_migration_func = NULL;
235 cc->cc_exec_entity[direct].ce_migration_arg = NULL;
240 * Checks if migration is requested by a specific callout cpu.
243 cc_cce_migrating(struct callout_cpu *cc, int direct)
247 return (cc->cc_exec_entity[direct].ce_migration_cpu != CPUBLOCK);
254 * Kernel low level callwheel initialization
255 * called on cpu0 during kernel startup.
258 callout_callwheel_init(void *dummy)
260 struct callout_cpu *cc;
263 * Calculate the size of the callout wheel and the preallocated
264 * timeout() structures.
265 * XXX: Clip callout to result of previous function of maxusers
266 * maximum 384. This is still huge, but acceptable.
268 ncallout = imin(16 + maxproc + maxfiles, 18508);
269 TUNABLE_INT_FETCH("kern.ncallout", &ncallout);
272 * Calculate callout wheel size, should be next power of two higher
275 callwheelsize = 1 << fls(ncallout);
276 callwheelmask = callwheelsize - 1;
279 * Only cpu0 handles timeout(9) and receives a preallocation.
281 * XXX: Once all timeout(9) consumers are converted this can
284 timeout_cpu = PCPU_GET(cpuid);
285 cc = CC_CPU(timeout_cpu);
286 cc->cc_callout = malloc(ncallout * sizeof(struct callout),
287 M_CALLOUT, M_WAITOK);
288 callout_cpu_init(cc);
290 SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL);
293 * Initialize the per-cpu callout structures.
296 callout_cpu_init(struct callout_cpu *cc)
301 mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
302 SLIST_INIT(&cc->cc_callfree);
303 cc->cc_callwheel = malloc(sizeof(struct callout_list) * callwheelsize,
304 M_CALLOUT, M_WAITOK);
305 for (i = 0; i < callwheelsize; i++)
306 LIST_INIT(&cc->cc_callwheel[i]);
307 TAILQ_INIT(&cc->cc_expireq);
308 cc->cc_firstevent = INT64_MAX;
309 for (i = 0; i < 2; i++)
310 cc_cce_cleanup(cc, i);
311 if (cc->cc_callout == NULL) /* Only cpu0 handles timeout(9) */
313 for (i = 0; i < ncallout; i++) {
314 c = &cc->cc_callout[i];
316 c->c_flags = CALLOUT_LOCAL_ALLOC;
317 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
323 * Switches the cpu tied to a specific callout.
324 * The function expects a locked incoming callout cpu and returns with
325 * locked outcoming callout cpu.
327 static struct callout_cpu *
328 callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu)
330 struct callout_cpu *new_cc;
332 MPASS(c != NULL && cc != NULL);
336 * Avoid interrupts and preemption firing after the callout cpu
337 * is blocked in order to avoid deadlocks as the new thread
338 * may be willing to acquire the callout cpu lock.
343 new_cc = CC_CPU(new_cpu);
352 * Start standard softclock thread.
355 start_softclock(void *dummy)
357 struct callout_cpu *cc;
362 cc = CC_CPU(timeout_cpu);
363 if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK,
364 INTR_MPSAFE, &cc->cc_cookie))
365 panic("died while creating standard software ithreads");
368 if (cpu == timeout_cpu)
371 cc->cc_callout = NULL; /* Only cpu0 handles timeout(9). */
372 callout_cpu_init(cc);
373 if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK,
374 INTR_MPSAFE, &cc->cc_cookie))
375 panic("died while creating standard software ithreads");
379 SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL);
381 #define CC_HASH_SHIFT 8
384 callout_hash(sbintime_t sbt)
387 return (sbt >> (32 - CC_HASH_SHIFT));
391 callout_get_bucket(sbintime_t sbt)
394 return (callout_hash(sbt) & callwheelmask);
398 callout_process(sbintime_t now)
400 struct callout *tmp, *tmpn;
401 struct callout_cpu *cc;
402 struct callout_list *sc;
403 sbintime_t first, last, max, tmp_max;
405 u_int firstb, lastb, nowb;
406 #ifdef CALLOUT_PROFILING
407 int depth_dir = 0, mpcalls_dir = 0, lockcalls_dir = 0;
411 mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
413 /* Compute the buckets of the last scan and present times. */
414 firstb = callout_hash(cc->cc_lastscan);
415 cc->cc_lastscan = now;
416 nowb = callout_hash(now);
418 /* Compute the last bucket and minimum time of the bucket after it. */
420 lookahead = (SBT_1S / 16);
421 else if (nowb - firstb == 1)
422 lookahead = (SBT_1S / 8);
424 lookahead = (SBT_1S / 2);
426 first += (lookahead / 2);
428 last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT));
429 lastb = callout_hash(last) - 1;
433 * Check if we wrapped around the entire wheel from the last scan.
434 * In case, we need to scan entirely the wheel for pending callouts.
436 if (lastb - firstb >= callwheelsize) {
437 lastb = firstb + callwheelsize - 1;
438 if (nowb - firstb >= callwheelsize)
442 /* Iterate callwheel from firstb to nowb and then up to lastb. */
444 sc = &cc->cc_callwheel[firstb & callwheelmask];
445 tmp = LIST_FIRST(sc);
446 while (tmp != NULL) {
447 /* Run the callout if present time within allowed. */
448 if (tmp->c_time <= now) {
450 * Consumer told us the callout may be run
451 * directly from hardware interrupt context.
453 if (tmp->c_flags & CALLOUT_DIRECT) {
454 #ifdef CALLOUT_PROFILING
457 cc->cc_exec_next_dir =
458 LIST_NEXT(tmp, c_links.le);
459 cc->cc_bucket = firstb & callwheelmask;
460 LIST_REMOVE(tmp, c_links.le);
461 softclock_call_cc(tmp, cc,
462 #ifdef CALLOUT_PROFILING
463 &mpcalls_dir, &lockcalls_dir, NULL,
466 tmp = cc->cc_exec_next_dir;
468 tmpn = LIST_NEXT(tmp, c_links.le);
469 LIST_REMOVE(tmp, c_links.le);
470 TAILQ_INSERT_TAIL(&cc->cc_expireq,
472 tmp->c_flags |= CALLOUT_PROCESSED;
477 /* Skip events from distant future. */
478 if (tmp->c_time >= max)
481 * Event minimal time is bigger than present maximal
482 * time, so it cannot be aggregated.
484 if (tmp->c_time > last) {
488 /* Update first and last time, respecting this event. */
489 if (tmp->c_time < first)
491 tmp_max = tmp->c_time + tmp->c_precision;
495 tmp = LIST_NEXT(tmp, c_links.le);
497 /* Proceed with the next bucket. */
500 * Stop if we looked after present time and found
501 * some event we can't execute at now.
502 * Stop if we looked far enough into the future.
504 } while (((int)(firstb - lastb)) <= 0);
505 cc->cc_firstevent = last;
506 #ifndef NO_EVENTTIMERS
507 cpu_new_callout(curcpu, last, first);
509 #ifdef CALLOUT_PROFILING
510 avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8;
511 avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8;
512 avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8;
514 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
516 * swi_sched acquires the thread lock, so we don't want to call it
517 * with cc_lock held; incorrect locking order.
519 if (!TAILQ_EMPTY(&cc->cc_expireq))
520 swi_sched(cc->cc_cookie, 0);
523 static struct callout_cpu *
524 callout_lock(struct callout *c)
526 struct callout_cpu *cc;
532 if (cpu == CPUBLOCK) {
533 while (c->c_cpu == CPUBLOCK)
548 callout_cc_add(struct callout *c, struct callout_cpu *cc,
549 sbintime_t sbt, sbintime_t precision, void (*func)(void *),
550 void *arg, int cpu, int flags)
555 if (sbt < cc->cc_lastscan)
556 sbt = cc->cc_lastscan;
558 c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
559 if (flags & C_DIRECT_EXEC)
560 c->c_flags |= CALLOUT_DIRECT;
561 c->c_flags &= ~CALLOUT_PROCESSED;
564 c->c_precision = precision;
565 bucket = callout_get_bucket(c->c_time);
566 CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x",
567 c, (int)(c->c_precision >> 32),
568 (u_int)(c->c_precision & 0xffffffff));
569 LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le);
570 if (cc->cc_bucket == bucket)
571 cc->cc_exec_next_dir = c;
572 #ifndef NO_EVENTTIMERS
574 * Inform the eventtimers(4) subsystem there's a new callout
575 * that has been inserted, but only if really required.
577 sbt = c->c_time + c->c_precision;
578 if (sbt < cc->cc_firstevent) {
579 cc->cc_firstevent = sbt;
580 cpu_new_callout(cpu, sbt, c->c_time);
586 callout_cc_del(struct callout *c, struct callout_cpu *cc)
589 if ((c->c_flags & CALLOUT_LOCAL_ALLOC) == 0)
592 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
596 softclock_call_cc(struct callout *c, struct callout_cpu *cc,
597 #ifdef CALLOUT_PROFILING
598 int *mpcalls, int *lockcalls, int *gcalls,
602 void (*c_func)(void *);
604 struct lock_class *class;
605 struct lock_object *c_lock;
606 int c_flags, sharedlock;
608 struct callout_cpu *new_cc;
609 void (*new_func)(void *);
612 sbintime_t new_prec, new_time;
614 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
615 sbintime_t sbt1, sbt2;
617 static sbintime_t maxdt = 2 * SBT_1MS; /* 2 msec */
618 static timeout_t *lastfunc;
621 KASSERT((c->c_flags & (CALLOUT_PENDING | CALLOUT_ACTIVE)) ==
622 (CALLOUT_PENDING | CALLOUT_ACTIVE),
623 ("softclock_call_cc: pend|act %p %x", c, c->c_flags));
624 class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL;
625 sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 0 : 1;
629 c_flags = c->c_flags;
630 if (c->c_flags & CALLOUT_LOCAL_ALLOC)
631 c->c_flags = CALLOUT_LOCAL_ALLOC;
633 c->c_flags &= ~CALLOUT_PENDING;
634 cc->cc_exec_entity[direct].cc_curr = c;
635 cc->cc_exec_entity[direct].cc_cancel = false;
637 if (c_lock != NULL) {
638 class->lc_lock(c_lock, sharedlock);
640 * The callout may have been cancelled
641 * while we switched locks.
643 if (cc->cc_exec_entity[direct].cc_cancel) {
644 class->lc_unlock(c_lock);
647 /* The callout cannot be stopped now. */
648 cc->cc_exec_entity[direct].cc_cancel = true;
649 if (c_lock == &Giant.lock_object) {
650 #ifdef CALLOUT_PROFILING
653 CTR3(KTR_CALLOUT, "callout giant %p func %p arg %p",
656 #ifdef CALLOUT_PROFILING
659 CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p",
663 #ifdef CALLOUT_PROFILING
666 CTR3(KTR_CALLOUT, "callout %p func %p arg %p",
669 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
672 THREAD_NO_SLEEPING();
673 SDT_PROBE(callout_execute, kernel, , callout_start, c, 0, 0, 0, 0);
675 SDT_PROBE(callout_execute, kernel, , callout_end, c, 0, 0, 0, 0);
676 THREAD_SLEEPING_OK();
677 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
681 if (lastfunc != c_func || sbt2 > maxdt * 2) {
684 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
685 c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec);
691 CTR1(KTR_CALLOUT, "callout %p finished", c);
692 if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
693 class->lc_unlock(c_lock);
696 KASSERT(cc->cc_exec_entity[direct].cc_curr == c, ("mishandled cc_curr"));
697 cc->cc_exec_entity[direct].cc_curr = NULL;
698 if (cc->cc_exec_entity[direct].cc_waiting) {
700 * There is someone waiting for the
701 * callout to complete.
702 * If the callout was scheduled for
703 * migration just cancel it.
705 if (cc_cce_migrating(cc, direct)) {
706 cc_cce_cleanup(cc, direct);
709 * It should be assert here that the callout is not
710 * destroyed but that is not easy.
712 c->c_flags &= ~CALLOUT_DFRMIGRATION;
714 cc->cc_exec_entity[direct].cc_waiting = false;
716 wakeup(&cc->cc_exec_entity[direct].cc_waiting);
718 } else if (cc_cce_migrating(cc, direct)) {
719 KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0,
720 ("Migrating legacy callout %p", c));
723 * If the callout was scheduled for
724 * migration just perform it now.
726 new_cpu = cc->cc_exec_entity[direct].ce_migration_cpu;
727 new_time = cc->cc_exec_entity[direct].ce_migration_time;
728 new_prec = cc->cc_exec_entity[direct].ce_migration_prec;
729 new_func = cc->cc_exec_entity[direct].ce_migration_func;
730 new_arg = cc->cc_exec_entity[direct].ce_migration_arg;
731 cc_cce_cleanup(cc, direct);
734 * It should be assert here that the callout is not destroyed
735 * but that is not easy.
737 * As first thing, handle deferred callout stops.
739 if ((c->c_flags & CALLOUT_DFRMIGRATION) == 0) {
741 "deferred cancelled %p func %p arg %p",
742 c, new_func, new_arg);
743 callout_cc_del(c, cc);
746 c->c_flags &= ~CALLOUT_DFRMIGRATION;
748 new_cc = callout_cpu_switch(c, cc, new_cpu);
749 flags = (direct) ? C_DIRECT_EXEC : 0;
750 callout_cc_add(c, new_cc, new_time, new_prec, new_func,
751 new_arg, new_cpu, flags);
755 panic("migration should not happen");
759 * If the current callout is locally allocated (from
760 * timeout(9)) then put it on the freelist.
762 * Note: we need to check the cached copy of c_flags because
763 * if it was not local, then it's not safe to deref the
766 KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0 ||
767 c->c_flags == CALLOUT_LOCAL_ALLOC,
768 ("corrupted callout"));
769 if (c_flags & CALLOUT_LOCAL_ALLOC)
770 callout_cc_del(c, cc);
774 * The callout mechanism is based on the work of Adam M. Costello and
775 * George Varghese, published in a technical report entitled "Redesigning
776 * the BSD Callout and Timer Facilities" and modified slightly for inclusion
777 * in FreeBSD by Justin T. Gibbs. The original work on the data structures
778 * used in this implementation was published by G. Varghese and T. Lauck in
779 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
780 * the Efficient Implementation of a Timer Facility" in the Proceedings of
781 * the 11th ACM Annual Symposium on Operating Systems Principles,
782 * Austin, Texas Nov 1987.
786 * Software (low priority) clock interrupt.
787 * Run periodic events from timeout queue.
792 struct callout_cpu *cc;
794 #ifdef CALLOUT_PROFILING
795 int depth = 0, gcalls = 0, lockcalls = 0, mpcalls = 0;
798 cc = (struct callout_cpu *)arg;
800 while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) {
801 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
802 softclock_call_cc(c, cc,
803 #ifdef CALLOUT_PROFILING
804 &mpcalls, &lockcalls, &gcalls,
807 #ifdef CALLOUT_PROFILING
811 #ifdef CALLOUT_PROFILING
812 avg_depth += (depth * 1000 - avg_depth) >> 8;
813 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
814 avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8;
815 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
822 * Execute a function after a specified length of time.
825 * Cancel previous timeout function call.
827 * callout_handle_init --
828 * Initialize a handle so that using it with untimeout is benign.
830 * See AT&T BCI Driver Reference Manual for specification. This
831 * implementation differs from that one in that although an
832 * identification value is returned from timeout, the original
833 * arguments to timeout as well as the identifier are used to
834 * identify entries for untimeout.
836 struct callout_handle
837 timeout(ftn, arg, to_ticks)
842 struct callout_cpu *cc;
844 struct callout_handle handle;
846 cc = CC_CPU(timeout_cpu);
848 /* Fill in the next free callout structure. */
849 new = SLIST_FIRST(&cc->cc_callfree);
851 /* XXX Attempt to malloc first */
852 panic("timeout table full");
853 SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle);
854 callout_reset(new, to_ticks, ftn, arg);
855 handle.callout = new;
862 untimeout(ftn, arg, handle)
865 struct callout_handle handle;
867 struct callout_cpu *cc;
870 * Check for a handle that was initialized
871 * by callout_handle_init, but never used
872 * for a real timeout.
874 if (handle.callout == NULL)
877 cc = callout_lock(handle.callout);
878 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
879 callout_stop(handle.callout);
884 callout_handle_init(struct callout_handle *handle)
886 handle->callout = NULL;
890 * New interface; clients allocate their own callout structures.
892 * callout_reset() - establish or change a timeout
893 * callout_stop() - disestablish a timeout
894 * callout_init() - initialize a callout structure so that it can
895 * safely be passed to callout_reset() and callout_stop()
897 * <sys/callout.h> defines three convenience macros:
899 * callout_active() - returns truth if callout has not been stopped,
900 * drained, or deactivated since the last time the callout was
902 * callout_pending() - returns truth if callout is still waiting for timeout
903 * callout_deactivate() - marks the callout as having been serviced
906 callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t precision,
907 void (*ftn)(void *), void *arg, int cpu, int flags)
909 sbintime_t to_sbt, pr;
910 struct callout_cpu *cc;
911 int cancelled, direct;
914 if (flags & C_ABSOLUTE) {
917 if ((flags & C_HARDCLOCK) && (sbt < tick_sbt))
919 if ((flags & C_HARDCLOCK) ||
920 #ifdef NO_EVENTTIMERS
921 sbt >= sbt_timethreshold) {
922 to_sbt = getsbinuptime();
924 /* Add safety belt for the case of hz > 1000. */
925 to_sbt += tc_tick_sbt - tick_sbt;
927 sbt >= sbt_tickthreshold) {
929 * Obtain the time of the last hardclock() call on
930 * this CPU directly from the kern_clocksource.c.
931 * This value is per-CPU, but it is equal for all
935 to_sbt = DPCPU_GET(hardclocktime);
938 to_sbt = DPCPU_GET(hardclocktime);
942 if ((flags & C_HARDCLOCK) == 0)
945 to_sbt = sbinuptime();
947 pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp :
948 sbt >> C_PRELGET(flags));
953 * Don't allow migration of pre-allocated callouts lest they
956 if (c->c_flags & CALLOUT_LOCAL_ALLOC)
958 direct = (c->c_flags & CALLOUT_DIRECT) != 0;
959 KASSERT(!direct || c->c_lock == NULL,
960 ("%s: direct callout %p has lock", __func__, c));
961 cc = callout_lock(c);
962 if (cc->cc_exec_entity[direct].cc_curr == c) {
964 * We're being asked to reschedule a callout which is
965 * currently in progress. If there is a lock then we
966 * can cancel the callout if it has not really started.
968 if (c->c_lock != NULL && !cc->cc_exec_entity[direct].cc_cancel)
969 cancelled = cc->cc_exec_entity[direct].cc_cancel = true;
970 if (cc->cc_exec_entity[direct].cc_waiting) {
972 * Someone has called callout_drain to kill this
973 * callout. Don't reschedule.
975 CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
976 cancelled ? "cancelled" : "failed to cancel",
977 c, c->c_func, c->c_arg);
982 if (c->c_flags & CALLOUT_PENDING) {
983 if ((c->c_flags & CALLOUT_PROCESSED) == 0) {
984 if (cc->cc_exec_next_dir == c)
985 cc->cc_exec_next_dir = LIST_NEXT(c, c_links.le);
986 LIST_REMOVE(c, c_links.le);
988 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
990 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
995 * If the callout must migrate try to perform it immediately.
996 * If the callout is currently running, just defer the migration
997 * to a more appropriate moment.
999 if (c->c_cpu != cpu) {
1000 if (cc->cc_exec_entity[direct].cc_curr == c) {
1001 cc->cc_exec_entity[direct].ce_migration_cpu = cpu;
1002 cc->cc_exec_entity[direct].ce_migration_time
1004 cc->cc_exec_entity[direct].ce_migration_prec
1006 cc->cc_exec_entity[direct].ce_migration_func = ftn;
1007 cc->cc_exec_entity[direct].ce_migration_arg = arg;
1008 c->c_flags |= CALLOUT_DFRMIGRATION;
1010 "migration of %p func %p arg %p in %d.%08x to %u deferred",
1011 c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
1012 (u_int)(to_sbt & 0xffffffff), cpu);
1016 cc = callout_cpu_switch(c, cc, cpu);
1020 callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags);
1021 CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x",
1022 cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
1023 (u_int)(to_sbt & 0xffffffff));
1030 * Common idioms that can be optimized in the future.
1033 callout_schedule_on(struct callout *c, int to_ticks, int cpu)
1035 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu);
1039 callout_schedule(struct callout *c, int to_ticks)
1041 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu);
1045 _callout_stop_safe(c, safe)
1049 struct callout_cpu *cc, *old_cc;
1050 struct lock_class *class;
1051 int direct, sq_locked, use_lock;
1054 * Some old subsystems don't hold Giant while running a callout_stop(),
1055 * so just discard this check for the moment.
1057 if (!safe && c->c_lock != NULL) {
1058 if (c->c_lock == &Giant.lock_object)
1059 use_lock = mtx_owned(&Giant);
1062 class = LOCK_CLASS(c->c_lock);
1063 class->lc_assert(c->c_lock, LA_XLOCKED);
1067 direct = (c->c_flags & CALLOUT_DIRECT) != 0;
1071 cc = callout_lock(c);
1074 * If the callout was migrating while the callout cpu lock was
1075 * dropped, just drop the sleepqueue lock and check the states
1078 if (sq_locked != 0 && cc != old_cc) {
1081 sleepq_release(&old_cc->cc_exec_entity[direct].cc_waiting);
1086 panic("migration should not happen");
1091 * If the callout isn't pending, it's not on the queue, so
1092 * don't attempt to remove it from the queue. We can try to
1093 * stop it by other means however.
1095 if (!(c->c_flags & CALLOUT_PENDING)) {
1096 c->c_flags &= ~CALLOUT_ACTIVE;
1099 * If it wasn't on the queue and it isn't the current
1100 * callout, then we can't stop it, so just bail.
1102 if (cc->cc_exec_entity[direct].cc_curr != c) {
1103 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
1104 c, c->c_func, c->c_arg);
1108 &cc->cc_exec_entity[direct].cc_waiting);
1114 * The current callout is running (or just
1115 * about to run) and blocking is allowed, so
1116 * just wait for the current invocation to
1119 while (cc->cc_exec_entity[direct].cc_curr == c) {
1121 * Use direct calls to sleepqueue interface
1122 * instead of cv/msleep in order to avoid
1123 * a LOR between cc_lock and sleepqueue
1124 * chain spinlocks. This piece of code
1125 * emulates a msleep_spin() call actually.
1127 * If we already have the sleepqueue chain
1128 * locked, then we can safely block. If we
1129 * don't already have it locked, however,
1130 * we have to drop the cc_lock to lock
1131 * it. This opens several races, so we
1132 * restart at the beginning once we have
1133 * both locks. If nothing has changed, then
1134 * we will end up back here with sq_locked
1140 &cc->cc_exec_entity[direct].cc_waiting);
1147 * Migration could be cancelled here, but
1148 * as long as it is still not sure when it
1149 * will be packed up, just let softclock()
1152 cc->cc_exec_entity[direct].cc_waiting = true;
1156 &cc->cc_exec_entity[direct].cc_waiting,
1157 &cc->cc_lock.lock_object, "codrain",
1160 &cc->cc_exec_entity[direct].cc_waiting,
1165 /* Reacquire locks previously released. */
1169 } else if (use_lock &&
1170 !cc->cc_exec_entity[direct].cc_cancel) {
1172 * The current callout is waiting for its
1173 * lock which we hold. Cancel the callout
1174 * and return. After our caller drops the
1175 * lock, the callout will be skipped in
1178 cc->cc_exec_entity[direct].cc_cancel = true;
1179 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
1180 c, c->c_func, c->c_arg);
1181 KASSERT(!cc_cce_migrating(cc, direct),
1182 ("callout wrongly scheduled for migration"));
1184 KASSERT(!sq_locked, ("sleepqueue chain locked"));
1186 } else if ((c->c_flags & CALLOUT_DFRMIGRATION) != 0) {
1187 c->c_flags &= ~CALLOUT_DFRMIGRATION;
1188 CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p",
1189 c, c->c_func, c->c_arg);
1193 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
1194 c, c->c_func, c->c_arg);
1196 KASSERT(!sq_locked, ("sleepqueue chain still locked"));
1200 sleepq_release(&cc->cc_exec_entity[direct].cc_waiting);
1202 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
1204 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
1205 c, c->c_func, c->c_arg);
1206 if ((c->c_flags & CALLOUT_PROCESSED) == 0) {
1207 if (cc->cc_exec_next_dir == c)
1208 cc->cc_exec_next_dir = LIST_NEXT(c, c_links.le);
1209 LIST_REMOVE(c, c_links.le);
1211 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
1212 callout_cc_del(c, cc);
1219 callout_init(c, mpsafe)
1223 bzero(c, sizeof *c);
1226 c->c_flags = CALLOUT_RETURNUNLOCKED;
1228 c->c_lock = &Giant.lock_object;
1231 c->c_cpu = timeout_cpu;
1235 _callout_init_lock(c, lock, flags)
1237 struct lock_object *lock;
1240 bzero(c, sizeof *c);
1242 KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0,
1243 ("callout_init_lock: bad flags %d", flags));
1244 KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
1245 ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
1246 KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags &
1247 (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class",
1249 c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
1250 c->c_cpu = timeout_cpu;
1253 #ifdef APM_FIXUP_CALLTODO
1255 * Adjust the kernel calltodo timeout list. This routine is used after
1256 * an APM resume to recalculate the calltodo timer list values with the
1257 * number of hz's we have been sleeping. The next hardclock() will detect
1258 * that there are fired timers and run softclock() to execute them.
1260 * Please note, I have not done an exhaustive analysis of what code this
1261 * might break. I am motivated to have my select()'s and alarm()'s that
1262 * have expired during suspend firing upon resume so that the applications
1263 * which set the timer can do the maintanence the timer was for as close
1264 * as possible to the originally intended time. Testing this code for a
1265 * week showed that resuming from a suspend resulted in 22 to 25 timers
1266 * firing, which seemed independant on whether the suspend was 2 hours or
1267 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu>
1270 adjust_timeout_calltodo(time_change)
1271 struct timeval *time_change;
1273 register struct callout *p;
1274 unsigned long delta_ticks;
1277 * How many ticks were we asleep?
1278 * (stolen from tvtohz()).
1281 /* Don't do anything */
1282 if (time_change->tv_sec < 0)
1284 else if (time_change->tv_sec <= LONG_MAX / 1000000)
1285 delta_ticks = (time_change->tv_sec * 1000000 +
1286 time_change->tv_usec + (tick - 1)) / tick + 1;
1287 else if (time_change->tv_sec <= LONG_MAX / hz)
1288 delta_ticks = time_change->tv_sec * hz +
1289 (time_change->tv_usec + (tick - 1)) / tick + 1;
1291 delta_ticks = LONG_MAX;
1293 if (delta_ticks > INT_MAX)
1294 delta_ticks = INT_MAX;
1297 * Now rip through the timer calltodo list looking for timers
1301 /* don't collide with softclock() */
1303 for (p = calltodo.c_next; p != NULL; p = p->c_next) {
1304 p->c_time -= delta_ticks;
1306 /* Break if the timer had more time on it than delta_ticks */
1310 /* take back the ticks the timer didn't use (p->c_time <= 0) */
1311 delta_ticks = -p->c_time;
1317 #endif /* APM_FIXUP_CALLTODO */
1320 flssbt(sbintime_t sbt)
1323 sbt += (uint64_t)sbt >> 1;
1324 if (sizeof(long) >= sizeof(sbintime_t))
1327 return (flsl(((uint64_t)sbt) >> 32) + 32);
1332 * Dump immediate statistic snapshot of the scheduled callouts.
1335 sysctl_kern_callout_stat(SYSCTL_HANDLER_ARGS)
1337 struct callout *tmp;
1338 struct callout_cpu *cc;
1339 struct callout_list *sc;
1340 sbintime_t maxpr, maxt, medpr, medt, now, spr, st, t;
1341 int ct[64], cpr[64], ccpbk[32];
1342 int error, val, i, count, tcum, pcum, maxc, c, medc;
1348 error = sysctl_handle_int(oidp, &val, 0, req);
1349 if (error != 0 || req->newptr == NULL)
1352 st = spr = maxt = maxpr = 0;
1353 bzero(ccpbk, sizeof(ccpbk));
1354 bzero(ct, sizeof(ct));
1355 bzero(cpr, sizeof(cpr));
1361 cc = CC_CPU(timeout_cpu);
1364 for (i = 0; i < callwheelsize; i++) {
1365 sc = &cc->cc_callwheel[i];
1367 LIST_FOREACH(tmp, sc, c_links.le) {
1369 t = tmp->c_time - now;
1373 spr += tmp->c_precision / SBT_1US;
1376 if (tmp->c_precision > maxpr)
1377 maxpr = tmp->c_precision;
1379 cpr[flssbt(tmp->c_precision)]++;
1383 ccpbk[fls(c + c / 2)]++;
1391 for (i = 0, tcum = 0; i < 64 && tcum < count / 2; i++)
1393 medt = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
1394 for (i = 0, pcum = 0; i < 64 && pcum < count / 2; i++)
1396 medpr = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
1397 for (i = 0, c = 0; i < 32 && c < count / 2; i++)
1399 medc = (i >= 2) ? (1 << (i - 2)) : 0;
1401 printf("Scheduled callouts statistic snapshot:\n");
1402 printf(" Callouts: %6d Buckets: %6d*%-3d Bucket size: 0.%06ds\n",
1403 count, callwheelsize, mp_ncpus, 1000000 >> CC_HASH_SHIFT);
1404 printf(" C/Bk: med %5d avg %6d.%06jd max %6d\n",
1406 count / callwheelsize / mp_ncpus,
1407 (uint64_t)count * 1000000 / callwheelsize / mp_ncpus % 1000000,
1409 printf(" Time: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
1410 medt / SBT_1S, (medt & 0xffffffff) * 1000000 >> 32,
1411 (st / count) / 1000000, (st / count) % 1000000,
1412 maxt / SBT_1S, (maxt & 0xffffffff) * 1000000 >> 32);
1413 printf(" Prec: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
1414 medpr / SBT_1S, (medpr & 0xffffffff) * 1000000 >> 32,
1415 (spr / count) / 1000000, (spr / count) % 1000000,
1416 maxpr / SBT_1S, (maxpr & 0xffffffff) * 1000000 >> 32);
1417 printf(" Distribution: \tbuckets\t time\t tcum\t"
1419 for (i = 0, tcum = pcum = 0; i < 64; i++) {
1420 if (ct[i] == 0 && cpr[i] == 0)
1422 t = (i != 0) ? (((sbintime_t)1) << (i - 1)) : 0;
1425 printf(" %10jd.%06jds\t 2**%d\t%7d\t%7d\t%7d\t%7d\n",
1426 t / SBT_1S, (t & 0xffffffff) * 1000000 >> 32,
1427 i - 1 - (32 - CC_HASH_SHIFT),
1428 ct[i], tcum, cpr[i], pcum);
1432 SYSCTL_PROC(_kern, OID_AUTO, callout_stat,
1433 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1434 0, 0, sysctl_kern_callout_stat, "I",
1435 "Dump immediate statistic snapshot of the scheduled callouts");