2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
40 #include "opt_callout_profiling.h"
41 #include "opt_kdtrace.h"
43 #include "opt_timer.h"
46 #include <sys/param.h>
47 #include <sys/systm.h>
49 #include <sys/callout.h>
51 #include <sys/interrupt.h>
52 #include <sys/kernel.h>
55 #include <sys/malloc.h>
56 #include <sys/mutex.h>
59 #include <sys/sleepqueue.h>
60 #include <sys/sysctl.h>
64 #include <machine/cpu.h>
67 #ifndef NO_EVENTTIMERS
68 DPCPU_DECLARE(sbintime_t, hardclocktime);
71 SDT_PROVIDER_DEFINE(callout_execute);
72 SDT_PROBE_DEFINE1(callout_execute, , , callout__start, "struct callout *");
73 SDT_PROBE_DEFINE1(callout_execute, , , callout__end, "struct callout *");
75 #ifdef CALLOUT_PROFILING
77 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
78 "Average number of items examined per softclock call. Units = 1/1000");
79 static int avg_gcalls;
80 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
81 "Average number of Giant callouts made per softclock call. Units = 1/1000");
82 static int avg_lockcalls;
83 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0,
84 "Average number of lock callouts made per softclock call. Units = 1/1000");
85 static int avg_mpcalls;
86 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
87 "Average number of MP callouts made per softclock call. Units = 1/1000");
88 static int avg_depth_dir;
89 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth_dir, CTLFLAG_RD, &avg_depth_dir, 0,
90 "Average number of direct callouts examined per callout_process call. "
92 static int avg_lockcalls_dir;
93 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls_dir, CTLFLAG_RD,
94 &avg_lockcalls_dir, 0, "Average number of lock direct callouts made per "
95 "callout_process call. Units = 1/1000");
96 static int avg_mpcalls_dir;
97 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir,
98 0, "Average number of MP direct callouts made per callout_process call. "
103 SYSCTL_INT(_kern, OID_AUTO, ncallout, CTLFLAG_RDTUN, &ncallout, 0,
104 "Number of entries in callwheel and size of timeout() preallocation");
108 * allocate more timeout table slots when table overflows.
110 u_int callwheelsize, callwheelmask;
113 * The callout cpu exec entities represent informations necessary for
114 * describing the state of callouts currently running on the CPU and the ones
115 * necessary for migrating callouts to the new callout cpu. In particular,
116 * the first entry of the array cc_exec_entity holds informations for callout
117 * running in SWI thread context, while the second one holds informations
118 * for callout running directly from hardware interrupt context.
119 * The cached informations are very important for deferring migration when
120 * the migrating callout is already running.
123 struct callout *cc_curr;
125 void (*ce_migration_func)(void *);
126 void *ce_migration_arg;
127 int ce_migration_cpu;
128 sbintime_t ce_migration_time;
129 sbintime_t ce_migration_prec;
136 * There is one struct callout_cpu per cpu, holding all relevant
137 * state for the callout processing thread on the individual CPU.
140 struct mtx_padalign cc_lock;
141 struct cc_exec cc_exec_entity[2];
142 struct callout *cc_next;
143 struct callout *cc_callout;
144 struct callout_list *cc_callwheel;
145 struct callout_tailq cc_expireq;
146 struct callout_slist cc_callfree;
147 sbintime_t cc_firstevent;
148 sbintime_t cc_lastscan;
152 char cc_ktr_event_name[20];
155 #define callout_migrating(c) ((c)->c_iflags & CALLOUT_DFRMIGRATION)
157 #define cc_exec_curr(cc, dir) cc->cc_exec_entity[dir].cc_curr
158 #define cc_exec_next(cc) cc->cc_next
159 #define cc_exec_cancel(cc, dir) cc->cc_exec_entity[dir].cc_cancel
160 #define cc_exec_waiting(cc, dir) cc->cc_exec_entity[dir].cc_waiting
162 #define cc_migration_func(cc, dir) cc->cc_exec_entity[dir].ce_migration_func
163 #define cc_migration_arg(cc, dir) cc->cc_exec_entity[dir].ce_migration_arg
164 #define cc_migration_cpu(cc, dir) cc->cc_exec_entity[dir].ce_migration_cpu
165 #define cc_migration_time(cc, dir) cc->cc_exec_entity[dir].ce_migration_time
166 #define cc_migration_prec(cc, dir) cc->cc_exec_entity[dir].ce_migration_prec
168 struct callout_cpu cc_cpu[MAXCPU];
169 #define CPUBLOCK MAXCPU
170 #define CC_CPU(cpu) (&cc_cpu[(cpu)])
171 #define CC_SELF() CC_CPU(PCPU_GET(cpuid))
173 struct callout_cpu cc_cpu;
174 #define CC_CPU(cpu) &cc_cpu
175 #define CC_SELF() &cc_cpu
177 #define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock)
178 #define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock)
179 #define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED)
181 static int timeout_cpu;
183 static void callout_cpu_init(struct callout_cpu *cc, int cpu);
184 static void softclock_call_cc(struct callout *c, struct callout_cpu *cc,
185 #ifdef CALLOUT_PROFILING
186 int *mpcalls, int *lockcalls, int *gcalls,
190 static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
194 * cc_curr - If a callout is in progress, it is cc_curr.
195 * If cc_curr is non-NULL, threads waiting in
196 * callout_drain() will be woken up as soon as the
197 * relevant callout completes.
198 * cc_cancel - Changing to 1 with both callout_lock and cc_lock held
199 * guarantees that the current callout will not run.
200 * The softclock() function sets this to 0 before it
201 * drops callout_lock to acquire c_lock, and it calls
202 * the handler only if curr_cancelled is still 0 after
203 * cc_lock is successfully acquired.
204 * cc_waiting - If a thread is waiting in callout_drain(), then
205 * callout_wait is nonzero. Set only when
206 * cc_curr is non-NULL.
210 * Resets the execution entity tied to a specific callout cpu.
213 cc_cce_cleanup(struct callout_cpu *cc, int direct)
216 cc_exec_curr(cc, direct) = NULL;
217 cc_exec_cancel(cc, direct) = false;
218 cc_exec_waiting(cc, direct) = false;
220 cc_migration_cpu(cc, direct) = CPUBLOCK;
221 cc_migration_time(cc, direct) = 0;
222 cc_migration_prec(cc, direct) = 0;
223 cc_migration_func(cc, direct) = NULL;
224 cc_migration_arg(cc, direct) = NULL;
229 * Checks if migration is requested by a specific callout cpu.
232 cc_cce_migrating(struct callout_cpu *cc, int direct)
236 return (cc_migration_cpu(cc, direct) != CPUBLOCK);
243 * Kernel low level callwheel initialization
244 * called on cpu0 during kernel startup.
247 callout_callwheel_init(void *dummy)
249 struct callout_cpu *cc;
252 * Calculate the size of the callout wheel and the preallocated
253 * timeout() structures.
254 * XXX: Clip callout to result of previous function of maxusers
255 * maximum 384. This is still huge, but acceptable.
257 memset(CC_CPU(0), 0, sizeof(cc_cpu));
258 ncallout = imin(16 + maxproc + maxfiles, 18508);
259 TUNABLE_INT_FETCH("kern.ncallout", &ncallout);
262 * Calculate callout wheel size, should be next power of two higher
265 callwheelsize = 1 << fls(ncallout);
266 callwheelmask = callwheelsize - 1;
269 * Only cpu0 handles timeout(9) and receives a preallocation.
271 * XXX: Once all timeout(9) consumers are converted this can
274 timeout_cpu = PCPU_GET(cpuid);
275 cc = CC_CPU(timeout_cpu);
276 cc->cc_callout = malloc(ncallout * sizeof(struct callout),
277 M_CALLOUT, M_WAITOK);
278 callout_cpu_init(cc, timeout_cpu);
280 SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL);
283 * Initialize the per-cpu callout structures.
286 callout_cpu_init(struct callout_cpu *cc, int cpu)
291 mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
292 SLIST_INIT(&cc->cc_callfree);
294 cc->cc_callwheel = malloc(sizeof(struct callout_list) * callwheelsize,
295 M_CALLOUT, M_WAITOK);
296 for (i = 0; i < callwheelsize; i++)
297 LIST_INIT(&cc->cc_callwheel[i]);
298 TAILQ_INIT(&cc->cc_expireq);
299 cc->cc_firstevent = INT64_MAX;
300 for (i = 0; i < 2; i++)
301 cc_cce_cleanup(cc, i);
302 snprintf(cc->cc_ktr_event_name, sizeof(cc->cc_ktr_event_name),
303 "callwheel cpu %d", cpu);
304 if (cc->cc_callout == NULL) /* Only cpu0 handles timeout(9) */
306 for (i = 0; i < ncallout; i++) {
307 c = &cc->cc_callout[i];
309 c->c_iflags = CALLOUT_LOCAL_ALLOC;
310 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
316 * Switches the cpu tied to a specific callout.
317 * The function expects a locked incoming callout cpu and returns with
318 * locked outcoming callout cpu.
320 static struct callout_cpu *
321 callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu)
323 struct callout_cpu *new_cc;
325 MPASS(c != NULL && cc != NULL);
329 * Avoid interrupts and preemption firing after the callout cpu
330 * is blocked in order to avoid deadlocks as the new thread
331 * may be willing to acquire the callout cpu lock.
336 new_cc = CC_CPU(new_cpu);
345 * Start standard softclock thread.
348 start_softclock(void *dummy)
350 struct callout_cpu *cc;
355 cc = CC_CPU(timeout_cpu);
356 if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK,
357 INTR_MPSAFE, &cc->cc_cookie))
358 panic("died while creating standard software ithreads");
361 if (cpu == timeout_cpu)
364 cc->cc_callout = NULL; /* Only cpu0 handles timeout(9). */
365 callout_cpu_init(cc, cpu);
366 if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK,
367 INTR_MPSAFE, &cc->cc_cookie))
368 panic("died while creating standard software ithreads");
372 SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL);
374 #define CC_HASH_SHIFT 8
377 callout_hash(sbintime_t sbt)
380 return (sbt >> (32 - CC_HASH_SHIFT));
384 callout_get_bucket(sbintime_t sbt)
387 return (callout_hash(sbt) & callwheelmask);
391 callout_process(sbintime_t now)
393 struct callout *tmp, *tmpn;
394 struct callout_cpu *cc;
395 struct callout_list *sc;
396 sbintime_t first, last, max, tmp_max;
398 u_int firstb, lastb, nowb;
399 #ifdef CALLOUT_PROFILING
400 int depth_dir = 0, mpcalls_dir = 0, lockcalls_dir = 0;
404 mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
406 /* Compute the buckets of the last scan and present times. */
407 firstb = callout_hash(cc->cc_lastscan);
408 cc->cc_lastscan = now;
409 nowb = callout_hash(now);
411 /* Compute the last bucket and minimum time of the bucket after it. */
413 lookahead = (SBT_1S / 16);
414 else if (nowb - firstb == 1)
415 lookahead = (SBT_1S / 8);
417 lookahead = (SBT_1S / 2);
419 first += (lookahead / 2);
421 last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT));
422 lastb = callout_hash(last) - 1;
426 * Check if we wrapped around the entire wheel from the last scan.
427 * In case, we need to scan entirely the wheel for pending callouts.
429 if (lastb - firstb >= callwheelsize) {
430 lastb = firstb + callwheelsize - 1;
431 if (nowb - firstb >= callwheelsize)
435 /* Iterate callwheel from firstb to nowb and then up to lastb. */
437 sc = &cc->cc_callwheel[firstb & callwheelmask];
438 tmp = LIST_FIRST(sc);
439 while (tmp != NULL) {
440 /* Run the callout if present time within allowed. */
441 if (tmp->c_time <= now) {
443 * Consumer told us the callout may be run
444 * directly from hardware interrupt context.
446 if (tmp->c_iflags & CALLOUT_DIRECT) {
447 #ifdef CALLOUT_PROFILING
451 LIST_NEXT(tmp, c_links.le);
452 cc->cc_bucket = firstb & callwheelmask;
453 LIST_REMOVE(tmp, c_links.le);
454 softclock_call_cc(tmp, cc,
455 #ifdef CALLOUT_PROFILING
456 &mpcalls_dir, &lockcalls_dir, NULL,
459 tmp = cc_exec_next(cc);
460 cc_exec_next(cc) = NULL;
462 tmpn = LIST_NEXT(tmp, c_links.le);
463 LIST_REMOVE(tmp, c_links.le);
464 TAILQ_INSERT_TAIL(&cc->cc_expireq,
466 tmp->c_iflags |= CALLOUT_PROCESSED;
471 /* Skip events from distant future. */
472 if (tmp->c_time >= max)
475 * Event minimal time is bigger than present maximal
476 * time, so it cannot be aggregated.
478 if (tmp->c_time > last) {
482 /* Update first and last time, respecting this event. */
483 if (tmp->c_time < first)
485 tmp_max = tmp->c_time + tmp->c_precision;
489 tmp = LIST_NEXT(tmp, c_links.le);
491 /* Proceed with the next bucket. */
494 * Stop if we looked after present time and found
495 * some event we can't execute at now.
496 * Stop if we looked far enough into the future.
498 } while (((int)(firstb - lastb)) <= 0);
499 cc->cc_firstevent = last;
500 #ifndef NO_EVENTTIMERS
501 cpu_new_callout(curcpu, last, first);
503 #ifdef CALLOUT_PROFILING
504 avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8;
505 avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8;
506 avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8;
508 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
510 * swi_sched acquires the thread lock, so we don't want to call it
511 * with cc_lock held; incorrect locking order.
513 if (!TAILQ_EMPTY(&cc->cc_expireq))
514 swi_sched(cc->cc_cookie, 0);
517 static struct callout_cpu *
518 callout_lock(struct callout *c)
520 struct callout_cpu *cc;
526 if (cpu == CPUBLOCK) {
527 while (c->c_cpu == CPUBLOCK)
542 callout_cc_add(struct callout *c, struct callout_cpu *cc,
543 sbintime_t sbt, sbintime_t precision, void (*func)(void *),
544 void *arg, int cpu, int flags)
549 if (sbt < cc->cc_lastscan)
550 sbt = cc->cc_lastscan;
552 c->c_iflags |= CALLOUT_PENDING;
553 c->c_iflags &= ~CALLOUT_PROCESSED;
554 c->c_flags |= CALLOUT_ACTIVE;
555 if (flags & C_DIRECT_EXEC)
556 c->c_iflags |= CALLOUT_DIRECT;
559 c->c_precision = precision;
560 bucket = callout_get_bucket(c->c_time);
561 CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x",
562 c, (int)(c->c_precision >> 32),
563 (u_int)(c->c_precision & 0xffffffff));
564 LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le);
565 if (cc->cc_bucket == bucket)
566 cc_exec_next(cc) = c;
567 #ifndef NO_EVENTTIMERS
569 * Inform the eventtimers(4) subsystem there's a new callout
570 * that has been inserted, but only if really required.
572 if (INT64_MAX - c->c_time < c->c_precision)
573 c->c_precision = INT64_MAX - c->c_time;
574 sbt = c->c_time + c->c_precision;
575 if (sbt < cc->cc_firstevent) {
576 cc->cc_firstevent = sbt;
577 cpu_new_callout(cpu, sbt, c->c_time);
583 callout_cc_del(struct callout *c, struct callout_cpu *cc)
586 if ((c->c_iflags & CALLOUT_LOCAL_ALLOC) == 0)
589 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
593 softclock_call_cc(struct callout *c, struct callout_cpu *cc,
594 #ifdef CALLOUT_PROFILING
595 int *mpcalls, int *lockcalls, int *gcalls,
599 struct rm_priotracker tracker;
600 void (*c_func)(void *);
602 struct lock_class *class;
603 struct lock_object *c_lock;
604 uintptr_t lock_status;
607 struct callout_cpu *new_cc;
608 void (*new_func)(void *);
611 sbintime_t new_prec, new_time;
613 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
614 sbintime_t sbt1, sbt2;
616 static sbintime_t maxdt = 2 * SBT_1MS; /* 2 msec */
617 static timeout_t *lastfunc;
620 KASSERT((c->c_iflags & CALLOUT_PENDING) == CALLOUT_PENDING,
621 ("softclock_call_cc: pend %p %x", c, c->c_iflags));
622 KASSERT((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE,
623 ("softclock_call_cc: act %p %x", c, c->c_flags));
624 class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL;
626 if (c->c_flags & CALLOUT_SHAREDLOCK) {
627 if (class == &lock_class_rm)
628 lock_status = (uintptr_t)&tracker;
635 c_iflags = c->c_iflags;
636 if (c->c_iflags & CALLOUT_LOCAL_ALLOC)
637 c->c_iflags = CALLOUT_LOCAL_ALLOC;
639 c->c_iflags &= ~CALLOUT_PENDING;
641 cc_exec_curr(cc, direct) = c;
642 cc_exec_cancel(cc, direct) = false;
644 if (c_lock != NULL) {
645 class->lc_lock(c_lock, lock_status);
647 * The callout may have been cancelled
648 * while we switched locks.
650 if (cc_exec_cancel(cc, direct)) {
651 class->lc_unlock(c_lock);
654 /* The callout cannot be stopped now. */
655 cc_exec_cancel(cc, direct) = true;
656 if (c_lock == &Giant.lock_object) {
657 #ifdef CALLOUT_PROFILING
660 CTR3(KTR_CALLOUT, "callout giant %p func %p arg %p",
663 #ifdef CALLOUT_PROFILING
666 CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p",
670 #ifdef CALLOUT_PROFILING
673 CTR3(KTR_CALLOUT, "callout %p func %p arg %p",
676 KTR_STATE3(KTR_SCHED, "callout", cc->cc_ktr_event_name, "running",
677 "func:%p", c_func, "arg:%p", c_arg, "direct:%d", direct);
678 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
681 THREAD_NO_SLEEPING();
682 SDT_PROBE1(callout_execute, , , callout__start, c);
684 SDT_PROBE1(callout_execute, , , callout__end, c);
685 THREAD_SLEEPING_OK();
686 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
690 if (lastfunc != c_func || sbt2 > maxdt * 2) {
693 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
694 c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec);
700 KTR_STATE0(KTR_SCHED, "callout", cc->cc_ktr_event_name, "idle");
701 CTR1(KTR_CALLOUT, "callout %p finished", c);
702 if ((c_iflags & CALLOUT_RETURNUNLOCKED) == 0)
703 class->lc_unlock(c_lock);
706 KASSERT(cc_exec_curr(cc, direct) == c, ("mishandled cc_curr"));
707 cc_exec_curr(cc, direct) = NULL;
708 if (cc_exec_waiting(cc, direct)) {
710 * There is someone waiting for the
711 * callout to complete.
712 * If the callout was scheduled for
713 * migration just cancel it.
715 if (cc_cce_migrating(cc, direct)) {
716 cc_cce_cleanup(cc, direct);
719 * It should be assert here that the callout is not
720 * destroyed but that is not easy.
722 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
724 cc_exec_waiting(cc, direct) = false;
726 wakeup(&cc_exec_waiting(cc, direct));
728 } else if (cc_cce_migrating(cc, direct)) {
729 KASSERT((c_iflags & CALLOUT_LOCAL_ALLOC) == 0,
730 ("Migrating legacy callout %p", c));
733 * If the callout was scheduled for
734 * migration just perform it now.
736 new_cpu = cc_migration_cpu(cc, direct);
737 new_time = cc_migration_time(cc, direct);
738 new_prec = cc_migration_prec(cc, direct);
739 new_func = cc_migration_func(cc, direct);
740 new_arg = cc_migration_arg(cc, direct);
741 cc_cce_cleanup(cc, direct);
744 * It should be assert here that the callout is not destroyed
745 * but that is not easy.
747 * As first thing, handle deferred callout stops.
749 if (!callout_migrating(c)) {
751 "deferred cancelled %p func %p arg %p",
752 c, new_func, new_arg);
753 callout_cc_del(c, cc);
756 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
758 new_cc = callout_cpu_switch(c, cc, new_cpu);
759 flags = (direct) ? C_DIRECT_EXEC : 0;
760 callout_cc_add(c, new_cc, new_time, new_prec, new_func,
761 new_arg, new_cpu, flags);
765 panic("migration should not happen");
769 * If the current callout is locally allocated (from
770 * timeout(9)) then put it on the freelist.
772 * Note: we need to check the cached copy of c_iflags because
773 * if it was not local, then it's not safe to deref the
776 KASSERT((c_iflags & CALLOUT_LOCAL_ALLOC) == 0 ||
777 c->c_iflags == CALLOUT_LOCAL_ALLOC,
778 ("corrupted callout"));
779 if (c_iflags & CALLOUT_LOCAL_ALLOC)
780 callout_cc_del(c, cc);
784 * The callout mechanism is based on the work of Adam M. Costello and
785 * George Varghese, published in a technical report entitled "Redesigning
786 * the BSD Callout and Timer Facilities" and modified slightly for inclusion
787 * in FreeBSD by Justin T. Gibbs. The original work on the data structures
788 * used in this implementation was published by G. Varghese and T. Lauck in
789 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
790 * the Efficient Implementation of a Timer Facility" in the Proceedings of
791 * the 11th ACM Annual Symposium on Operating Systems Principles,
792 * Austin, Texas Nov 1987.
796 * Software (low priority) clock interrupt.
797 * Run periodic events from timeout queue.
802 struct callout_cpu *cc;
804 #ifdef CALLOUT_PROFILING
805 int depth = 0, gcalls = 0, lockcalls = 0, mpcalls = 0;
808 cc = (struct callout_cpu *)arg;
810 while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) {
811 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
812 softclock_call_cc(c, cc,
813 #ifdef CALLOUT_PROFILING
814 &mpcalls, &lockcalls, &gcalls,
817 #ifdef CALLOUT_PROFILING
821 #ifdef CALLOUT_PROFILING
822 avg_depth += (depth * 1000 - avg_depth) >> 8;
823 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
824 avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8;
825 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
832 * Execute a function after a specified length of time.
835 * Cancel previous timeout function call.
837 * callout_handle_init --
838 * Initialize a handle so that using it with untimeout is benign.
840 * See AT&T BCI Driver Reference Manual for specification. This
841 * implementation differs from that one in that although an
842 * identification value is returned from timeout, the original
843 * arguments to timeout as well as the identifier are used to
844 * identify entries for untimeout.
846 struct callout_handle
847 timeout(ftn, arg, to_ticks)
852 struct callout_cpu *cc;
854 struct callout_handle handle;
856 cc = CC_CPU(timeout_cpu);
858 /* Fill in the next free callout structure. */
859 new = SLIST_FIRST(&cc->cc_callfree);
861 /* XXX Attempt to malloc first */
862 panic("timeout table full");
863 SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle);
864 callout_reset(new, to_ticks, ftn, arg);
865 handle.callout = new;
872 untimeout(ftn, arg, handle)
875 struct callout_handle handle;
877 struct callout_cpu *cc;
880 * Check for a handle that was initialized
881 * by callout_handle_init, but never used
882 * for a real timeout.
884 if (handle.callout == NULL)
887 cc = callout_lock(handle.callout);
888 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
889 callout_stop(handle.callout);
894 callout_handle_init(struct callout_handle *handle)
896 handle->callout = NULL;
900 * New interface; clients allocate their own callout structures.
902 * callout_reset() - establish or change a timeout
903 * callout_stop() - disestablish a timeout
904 * callout_init() - initialize a callout structure so that it can
905 * safely be passed to callout_reset() and callout_stop()
907 * <sys/callout.h> defines three convenience macros:
909 * callout_active() - returns truth if callout has not been stopped,
910 * drained, or deactivated since the last time the callout was
912 * callout_pending() - returns truth if callout is still waiting for timeout
913 * callout_deactivate() - marks the callout as having been serviced
916 callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t precision,
917 void (*ftn)(void *), void *arg, int cpu, int flags)
919 sbintime_t to_sbt, pr;
920 struct callout_cpu *cc;
921 int cancelled, direct;
927 } else if ((cpu >= MAXCPU) ||
928 ((CC_CPU(cpu))->cc_inited == 0)) {
929 /* Invalid CPU spec */
930 panic("Invalid CPU in callout %d", cpu);
932 if (flags & C_ABSOLUTE) {
935 if ((flags & C_HARDCLOCK) && (sbt < tick_sbt))
937 if ((flags & C_HARDCLOCK) ||
938 #ifdef NO_EVENTTIMERS
939 sbt >= sbt_timethreshold) {
940 to_sbt = getsbinuptime();
942 /* Add safety belt for the case of hz > 1000. */
943 to_sbt += tc_tick_sbt - tick_sbt;
945 sbt >= sbt_tickthreshold) {
947 * Obtain the time of the last hardclock() call on
948 * this CPU directly from the kern_clocksource.c.
949 * This value is per-CPU, but it is equal for all
953 to_sbt = DPCPU_GET(hardclocktime);
956 to_sbt = DPCPU_GET(hardclocktime);
960 if ((flags & C_HARDCLOCK) == 0)
963 to_sbt = sbinuptime();
964 if (INT64_MAX - to_sbt < sbt)
968 pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp :
969 sbt >> C_PRELGET(flags));
974 * This flag used to be added by callout_cc_add, but the
975 * first time you call this we could end up with the
976 * wrong direct flag if we don't do it before we add.
978 if (flags & C_DIRECT_EXEC) {
983 KASSERT(!direct || c->c_lock == NULL,
984 ("%s: direct callout %p has lock", __func__, c));
985 cc = callout_lock(c);
987 * Don't allow migration of pre-allocated callouts lest they
988 * become unbalanced or handle the case where the user does
991 if ((c->c_iflags & CALLOUT_LOCAL_ALLOC) ||
996 if (cc_exec_curr(cc, direct) == c) {
998 * We're being asked to reschedule a callout which is
999 * currently in progress. If there is a lock then we
1000 * can cancel the callout if it has not really started.
1002 if (c->c_lock != NULL && !cc_exec_cancel(cc, direct))
1003 cancelled = cc_exec_cancel(cc, direct) = true;
1004 if (cc_exec_waiting(cc, direct)) {
1006 * Someone has called callout_drain to kill this
1007 * callout. Don't reschedule.
1009 CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
1010 cancelled ? "cancelled" : "failed to cancel",
1011 c, c->c_func, c->c_arg);
1016 if (callout_migrating(c)) {
1018 * This only occurs when a second callout_reset_sbt_on
1019 * is made after a previous one moved it into
1020 * deferred migration (below). Note we do *not* change
1021 * the prev_cpu even though the previous target may
1024 cc_migration_cpu(cc, direct) = cpu;
1025 cc_migration_time(cc, direct) = to_sbt;
1026 cc_migration_prec(cc, direct) = precision;
1027 cc_migration_func(cc, direct) = ftn;
1028 cc_migration_arg(cc, direct) = arg;
1035 if (c->c_iflags & CALLOUT_PENDING) {
1036 if ((c->c_iflags & CALLOUT_PROCESSED) == 0) {
1037 if (cc_exec_next(cc) == c)
1038 cc_exec_next(cc) = LIST_NEXT(c, c_links.le);
1039 LIST_REMOVE(c, c_links.le);
1041 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
1044 c->c_iflags &= ~ CALLOUT_PENDING;
1045 c->c_flags &= ~ CALLOUT_ACTIVE;
1050 * If the callout must migrate try to perform it immediately.
1051 * If the callout is currently running, just defer the migration
1052 * to a more appropriate moment.
1054 if (c->c_cpu != cpu) {
1055 if (cc_exec_curr(cc, direct) == c) {
1057 * Pending will have been removed since we are
1058 * actually executing the callout on another
1059 * CPU. That callout should be waiting on the
1060 * lock the caller holds. If we set both
1061 * active/and/pending after we return and the
1062 * lock on the executing callout proceeds, it
1063 * will then see pending is true and return.
1064 * At the return from the actual callout execution
1065 * the migration will occur in softclock_call_cc
1066 * and this new callout will be placed on the
1067 * new CPU via a call to callout_cpu_switch() which
1068 * will get the lock on the right CPU followed
1069 * by a call callout_cc_add() which will add it there.
1070 * (see above in softclock_call_cc()).
1072 cc_migration_cpu(cc, direct) = cpu;
1073 cc_migration_time(cc, direct) = to_sbt;
1074 cc_migration_prec(cc, direct) = precision;
1075 cc_migration_func(cc, direct) = ftn;
1076 cc_migration_arg(cc, direct) = arg;
1077 c->c_iflags |= (CALLOUT_DFRMIGRATION | CALLOUT_PENDING);
1078 c->c_flags |= CALLOUT_ACTIVE;
1080 "migration of %p func %p arg %p in %d.%08x to %u deferred",
1081 c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
1082 (u_int)(to_sbt & 0xffffffff), cpu);
1086 cc = callout_cpu_switch(c, cc, cpu);
1090 callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags);
1091 CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x",
1092 cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
1093 (u_int)(to_sbt & 0xffffffff));
1100 * Common idioms that can be optimized in the future.
1103 callout_schedule_on(struct callout *c, int to_ticks, int cpu)
1105 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu);
1109 callout_schedule(struct callout *c, int to_ticks)
1111 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu);
1115 _callout_stop_safe(c, flags)
1119 struct callout_cpu *cc, *old_cc;
1120 struct lock_class *class;
1121 int direct, sq_locked, use_lock;
1125 * Some old subsystems don't hold Giant while running a callout_stop(),
1126 * so just discard this check for the moment.
1128 if ((flags & CS_DRAIN) == 0 && c->c_lock != NULL) {
1129 if (c->c_lock == &Giant.lock_object)
1130 use_lock = mtx_owned(&Giant);
1133 class = LOCK_CLASS(c->c_lock);
1134 class->lc_assert(c->c_lock, LA_XLOCKED);
1138 if (c->c_iflags & CALLOUT_DIRECT) {
1146 cc = callout_lock(c);
1148 if ((c->c_iflags & (CALLOUT_DFRMIGRATION | CALLOUT_PENDING)) ==
1149 (CALLOUT_DFRMIGRATION | CALLOUT_PENDING) &&
1150 ((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE)) {
1152 * Special case where this slipped in while we
1153 * were migrating *as* the callout is about to
1154 * execute. The caller probably holds the lock
1155 * the callout wants.
1157 * Get rid of the migration first. Then set
1158 * the flag that tells this code *not* to
1159 * try to remove it from any lists (its not
1160 * on one yet). When the callout wheel runs,
1161 * it will ignore this callout.
1163 c->c_iflags &= ~CALLOUT_PENDING;
1164 c->c_flags &= ~CALLOUT_ACTIVE;
1171 * If the callout was migrating while the callout cpu lock was
1172 * dropped, just drop the sleepqueue lock and check the states
1175 if (sq_locked != 0 && cc != old_cc) {
1178 sleepq_release(&cc_exec_waiting(old_cc, direct));
1183 panic("migration should not happen");
1188 * If the callout isn't pending, it's not on the queue, so
1189 * don't attempt to remove it from the queue. We can try to
1190 * stop it by other means however.
1192 if (!(c->c_iflags & CALLOUT_PENDING)) {
1193 c->c_flags &= ~CALLOUT_ACTIVE;
1196 * If it wasn't on the queue and it isn't the current
1197 * callout, then we can't stop it, so just bail.
1199 if (cc_exec_curr(cc, direct) != c) {
1200 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
1201 c, c->c_func, c->c_arg);
1204 sleepq_release(&cc_exec_waiting(cc, direct));
1208 if ((flags & CS_DRAIN) != 0) {
1210 * The current callout is running (or just
1211 * about to run) and blocking is allowed, so
1212 * just wait for the current invocation to
1215 while (cc_exec_curr(cc, direct) == c) {
1217 * Use direct calls to sleepqueue interface
1218 * instead of cv/msleep in order to avoid
1219 * a LOR between cc_lock and sleepqueue
1220 * chain spinlocks. This piece of code
1221 * emulates a msleep_spin() call actually.
1223 * If we already have the sleepqueue chain
1224 * locked, then we can safely block. If we
1225 * don't already have it locked, however,
1226 * we have to drop the cc_lock to lock
1227 * it. This opens several races, so we
1228 * restart at the beginning once we have
1229 * both locks. If nothing has changed, then
1230 * we will end up back here with sq_locked
1236 &cc_exec_waiting(cc, direct));
1243 * Migration could be cancelled here, but
1244 * as long as it is still not sure when it
1245 * will be packed up, just let softclock()
1248 cc_exec_waiting(cc, direct) = true;
1252 &cc_exec_waiting(cc, direct),
1253 &cc->cc_lock.lock_object, "codrain",
1256 &cc_exec_waiting(cc, direct),
1261 /* Reacquire locks previously released. */
1265 } else if (use_lock &&
1266 !cc_exec_cancel(cc, direct)) {
1269 * The current callout is waiting for its
1270 * lock which we hold. Cancel the callout
1271 * and return. After our caller drops the
1272 * lock, the callout will be skipped in
1275 cc_exec_cancel(cc, direct) = true;
1276 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
1277 c, c->c_func, c->c_arg);
1278 KASSERT(!cc_cce_migrating(cc, direct),
1279 ("callout wrongly scheduled for migration"));
1280 if (callout_migrating(c)) {
1281 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
1283 cc_migration_cpu(cc, direct) = CPUBLOCK;
1284 cc_migration_time(cc, direct) = 0;
1285 cc_migration_prec(cc, direct) = 0;
1286 cc_migration_func(cc, direct) = NULL;
1287 cc_migration_arg(cc, direct) = NULL;
1291 KASSERT(!sq_locked, ("sleepqueue chain locked"));
1293 } else if (callout_migrating(c)) {
1295 * The callout is currently being serviced
1296 * and the "next" callout is scheduled at
1297 * its completion with a migration. We remove
1298 * the migration flag so it *won't* get rescheduled,
1299 * but we can't stop the one thats running so
1302 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
1305 * We can't call cc_cce_cleanup here since
1306 * if we do it will remove .ce_curr and
1307 * its still running. This will prevent a
1308 * reschedule of the callout when the
1309 * execution completes.
1311 cc_migration_cpu(cc, direct) = CPUBLOCK;
1312 cc_migration_time(cc, direct) = 0;
1313 cc_migration_prec(cc, direct) = 0;
1314 cc_migration_func(cc, direct) = NULL;
1315 cc_migration_arg(cc, direct) = NULL;
1317 CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p",
1318 c, c->c_func, c->c_arg);
1320 return ((flags & CS_MIGRBLOCK) != 0);
1322 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
1323 c, c->c_func, c->c_arg);
1325 KASSERT(!sq_locked, ("sleepqueue chain still locked"));
1329 sleepq_release(&cc_exec_waiting(cc, direct));
1331 c->c_iflags &= ~CALLOUT_PENDING;
1332 c->c_flags &= ~CALLOUT_ACTIVE;
1334 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
1335 c, c->c_func, c->c_arg);
1336 if (not_on_a_list == 0) {
1337 if ((c->c_iflags & CALLOUT_PROCESSED) == 0) {
1338 if (cc_exec_next(cc) == c)
1339 cc_exec_next(cc) = LIST_NEXT(c, c_links.le);
1340 LIST_REMOVE(c, c_links.le);
1342 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
1345 callout_cc_del(c, cc);
1351 callout_init(c, mpsafe)
1355 bzero(c, sizeof *c);
1358 c->c_iflags = CALLOUT_RETURNUNLOCKED;
1360 c->c_lock = &Giant.lock_object;
1363 c->c_cpu = timeout_cpu;
1367 _callout_init_lock(c, lock, flags)
1369 struct lock_object *lock;
1372 bzero(c, sizeof *c);
1374 KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0,
1375 ("callout_init_lock: bad flags %d", flags));
1376 KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
1377 ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
1378 KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags &
1379 (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class",
1381 c->c_iflags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
1382 c->c_cpu = timeout_cpu;
1385 #ifdef APM_FIXUP_CALLTODO
1387 * Adjust the kernel calltodo timeout list. This routine is used after
1388 * an APM resume to recalculate the calltodo timer list values with the
1389 * number of hz's we have been sleeping. The next hardclock() will detect
1390 * that there are fired timers and run softclock() to execute them.
1392 * Please note, I have not done an exhaustive analysis of what code this
1393 * might break. I am motivated to have my select()'s and alarm()'s that
1394 * have expired during suspend firing upon resume so that the applications
1395 * which set the timer can do the maintanence the timer was for as close
1396 * as possible to the originally intended time. Testing this code for a
1397 * week showed that resuming from a suspend resulted in 22 to 25 timers
1398 * firing, which seemed independent on whether the suspend was 2 hours or
1399 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu>
1402 adjust_timeout_calltodo(time_change)
1403 struct timeval *time_change;
1405 register struct callout *p;
1406 unsigned long delta_ticks;
1409 * How many ticks were we asleep?
1410 * (stolen from tvtohz()).
1413 /* Don't do anything */
1414 if (time_change->tv_sec < 0)
1416 else if (time_change->tv_sec <= LONG_MAX / 1000000)
1417 delta_ticks = (time_change->tv_sec * 1000000 +
1418 time_change->tv_usec + (tick - 1)) / tick + 1;
1419 else if (time_change->tv_sec <= LONG_MAX / hz)
1420 delta_ticks = time_change->tv_sec * hz +
1421 (time_change->tv_usec + (tick - 1)) / tick + 1;
1423 delta_ticks = LONG_MAX;
1425 if (delta_ticks > INT_MAX)
1426 delta_ticks = INT_MAX;
1429 * Now rip through the timer calltodo list looking for timers
1433 /* don't collide with softclock() */
1435 for (p = calltodo.c_next; p != NULL; p = p->c_next) {
1436 p->c_time -= delta_ticks;
1438 /* Break if the timer had more time on it than delta_ticks */
1442 /* take back the ticks the timer didn't use (p->c_time <= 0) */
1443 delta_ticks = -p->c_time;
1449 #endif /* APM_FIXUP_CALLTODO */
1452 flssbt(sbintime_t sbt)
1455 sbt += (uint64_t)sbt >> 1;
1456 if (sizeof(long) >= sizeof(sbintime_t))
1459 return (flsl(((uint64_t)sbt) >> 32) + 32);
1464 * Dump immediate statistic snapshot of the scheduled callouts.
1467 sysctl_kern_callout_stat(SYSCTL_HANDLER_ARGS)
1469 struct callout *tmp;
1470 struct callout_cpu *cc;
1471 struct callout_list *sc;
1472 sbintime_t maxpr, maxt, medpr, medt, now, spr, st, t;
1473 int ct[64], cpr[64], ccpbk[32];
1474 int error, val, i, count, tcum, pcum, maxc, c, medc;
1480 error = sysctl_handle_int(oidp, &val, 0, req);
1481 if (error != 0 || req->newptr == NULL)
1484 st = spr = maxt = maxpr = 0;
1485 bzero(ccpbk, sizeof(ccpbk));
1486 bzero(ct, sizeof(ct));
1487 bzero(cpr, sizeof(cpr));
1493 cc = CC_CPU(timeout_cpu);
1496 for (i = 0; i < callwheelsize; i++) {
1497 sc = &cc->cc_callwheel[i];
1499 LIST_FOREACH(tmp, sc, c_links.le) {
1501 t = tmp->c_time - now;
1505 spr += tmp->c_precision / SBT_1US;
1508 if (tmp->c_precision > maxpr)
1509 maxpr = tmp->c_precision;
1511 cpr[flssbt(tmp->c_precision)]++;
1515 ccpbk[fls(c + c / 2)]++;
1523 for (i = 0, tcum = 0; i < 64 && tcum < count / 2; i++)
1525 medt = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
1526 for (i = 0, pcum = 0; i < 64 && pcum < count / 2; i++)
1528 medpr = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
1529 for (i = 0, c = 0; i < 32 && c < count / 2; i++)
1531 medc = (i >= 2) ? (1 << (i - 2)) : 0;
1533 printf("Scheduled callouts statistic snapshot:\n");
1534 printf(" Callouts: %6d Buckets: %6d*%-3d Bucket size: 0.%06ds\n",
1535 count, callwheelsize, mp_ncpus, 1000000 >> CC_HASH_SHIFT);
1536 printf(" C/Bk: med %5d avg %6d.%06jd max %6d\n",
1538 count / callwheelsize / mp_ncpus,
1539 (uint64_t)count * 1000000 / callwheelsize / mp_ncpus % 1000000,
1541 printf(" Time: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
1542 medt / SBT_1S, (medt & 0xffffffff) * 1000000 >> 32,
1543 (st / count) / 1000000, (st / count) % 1000000,
1544 maxt / SBT_1S, (maxt & 0xffffffff) * 1000000 >> 32);
1545 printf(" Prec: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
1546 medpr / SBT_1S, (medpr & 0xffffffff) * 1000000 >> 32,
1547 (spr / count) / 1000000, (spr / count) % 1000000,
1548 maxpr / SBT_1S, (maxpr & 0xffffffff) * 1000000 >> 32);
1549 printf(" Distribution: \tbuckets\t time\t tcum\t"
1551 for (i = 0, tcum = pcum = 0; i < 64; i++) {
1552 if (ct[i] == 0 && cpr[i] == 0)
1554 t = (i != 0) ? (((sbintime_t)1) << (i - 1)) : 0;
1557 printf(" %10jd.%06jds\t 2**%d\t%7d\t%7d\t%7d\t%7d\n",
1558 t / SBT_1S, (t & 0xffffffff) * 1000000 >> 32,
1559 i - 1 - (32 - CC_HASH_SHIFT),
1560 ct[i], tcum, cpr[i], pcum);
1564 SYSCTL_PROC(_kern, OID_AUTO, callout_stat,
1565 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1566 0, 0, sysctl_kern_callout_stat, "I",
1567 "Dump immediate statistic snapshot of the scheduled callouts");