2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
42 #include "opt_callout_profiling.h"
45 #include "opt_timer.h"
49 #include <sys/param.h>
50 #include <sys/systm.h>
52 #include <sys/callout.h>
54 #include <sys/interrupt.h>
55 #include <sys/kernel.h>
58 #include <sys/malloc.h>
59 #include <sys/mutex.h>
62 #include <sys/sleepqueue.h>
63 #include <sys/sysctl.h>
68 #include <ddb/db_sym.h>
69 #include <machine/_inttypes.h>
73 #include <machine/cpu.h>
76 #ifndef NO_EVENTTIMERS
77 DPCPU_DECLARE(sbintime_t, hardclocktime);
80 SDT_PROVIDER_DEFINE(callout_execute);
81 SDT_PROBE_DEFINE1(callout_execute, , , callout__start, "struct callout *");
82 SDT_PROBE_DEFINE1(callout_execute, , , callout__end, "struct callout *");
84 #ifdef CALLOUT_PROFILING
86 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
87 "Average number of items examined per softclock call. Units = 1/1000");
88 static int avg_gcalls;
89 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
90 "Average number of Giant callouts made per softclock call. Units = 1/1000");
91 static int avg_lockcalls;
92 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0,
93 "Average number of lock callouts made per softclock call. Units = 1/1000");
94 static int avg_mpcalls;
95 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
96 "Average number of MP callouts made per softclock call. Units = 1/1000");
97 static int avg_depth_dir;
98 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth_dir, CTLFLAG_RD, &avg_depth_dir, 0,
99 "Average number of direct callouts examined per callout_process call. "
101 static int avg_lockcalls_dir;
102 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls_dir, CTLFLAG_RD,
103 &avg_lockcalls_dir, 0, "Average number of lock direct callouts made per "
104 "callout_process call. Units = 1/1000");
105 static int avg_mpcalls_dir;
106 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir,
107 0, "Average number of MP direct callouts made per callout_process call. "
112 SYSCTL_INT(_kern, OID_AUTO, ncallout, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &ncallout, 0,
113 "Number of entries in callwheel and size of timeout() preallocation");
116 static int pin_default_swi = 1;
117 static int pin_pcpu_swi = 1;
119 static int pin_default_swi = 0;
120 static int pin_pcpu_swi = 0;
123 SYSCTL_INT(_kern, OID_AUTO, pin_default_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_default_swi,
124 0, "Pin the default (non-per-cpu) swi (shared with PCPU 0 swi)");
125 SYSCTL_INT(_kern, OID_AUTO, pin_pcpu_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_pcpu_swi,
126 0, "Pin the per-CPU swis (except PCPU 0, which is also default");
130 * allocate more timeout table slots when table overflows.
132 u_int callwheelsize, callwheelmask;
135 * The callout cpu exec entities represent informations necessary for
136 * describing the state of callouts currently running on the CPU and the ones
137 * necessary for migrating callouts to the new callout cpu. In particular,
138 * the first entry of the array cc_exec_entity holds informations for callout
139 * running in SWI thread context, while the second one holds informations
140 * for callout running directly from hardware interrupt context.
141 * The cached informations are very important for deferring migration when
142 * the migrating callout is already running.
145 struct callout *cc_curr;
146 void (*cc_drain)(void *);
150 void (*ce_migration_func)(void *);
151 void *ce_migration_arg;
152 sbintime_t ce_migration_time;
153 sbintime_t ce_migration_prec;
154 int ce_migration_cpu;
161 * There is one struct callout_cpu per cpu, holding all relevant
162 * state for the callout processing thread on the individual CPU.
165 struct mtx_padalign cc_lock;
166 struct cc_exec cc_exec_entity[2];
167 struct callout *cc_next;
168 struct callout *cc_callout;
169 struct callout_list *cc_callwheel;
170 struct callout_tailq cc_expireq;
171 struct callout_slist cc_callfree;
172 sbintime_t cc_firstevent;
173 sbintime_t cc_lastscan;
177 char cc_ktr_event_name[20];
180 #define callout_migrating(c) ((c)->c_iflags & CALLOUT_DFRMIGRATION)
182 #define cc_exec_curr(cc, dir) cc->cc_exec_entity[dir].cc_curr
183 #define cc_exec_last_func(cc, dir) cc->cc_exec_entity[dir].cc_last_func
184 #define cc_exec_last_arg(cc, dir) cc->cc_exec_entity[dir].cc_last_arg
185 #define cc_exec_drain(cc, dir) cc->cc_exec_entity[dir].cc_drain
186 #define cc_exec_next(cc) cc->cc_next
187 #define cc_exec_cancel(cc, dir) cc->cc_exec_entity[dir].cc_cancel
188 #define cc_exec_waiting(cc, dir) cc->cc_exec_entity[dir].cc_waiting
190 #define cc_migration_func(cc, dir) cc->cc_exec_entity[dir].ce_migration_func
191 #define cc_migration_arg(cc, dir) cc->cc_exec_entity[dir].ce_migration_arg
192 #define cc_migration_cpu(cc, dir) cc->cc_exec_entity[dir].ce_migration_cpu
193 #define cc_migration_time(cc, dir) cc->cc_exec_entity[dir].ce_migration_time
194 #define cc_migration_prec(cc, dir) cc->cc_exec_entity[dir].ce_migration_prec
196 struct callout_cpu cc_cpu[MAXCPU];
197 #define CPUBLOCK MAXCPU
198 #define CC_CPU(cpu) (&cc_cpu[(cpu)])
199 #define CC_SELF() CC_CPU(PCPU_GET(cpuid))
201 struct callout_cpu cc_cpu;
202 #define CC_CPU(cpu) &cc_cpu
203 #define CC_SELF() &cc_cpu
205 #define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock)
206 #define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock)
207 #define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED)
209 static int timeout_cpu;
211 static void callout_cpu_init(struct callout_cpu *cc, int cpu);
212 static void softclock_call_cc(struct callout *c, struct callout_cpu *cc,
213 #ifdef CALLOUT_PROFILING
214 int *mpcalls, int *lockcalls, int *gcalls,
218 static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
222 * cc_curr - If a callout is in progress, it is cc_curr.
223 * If cc_curr is non-NULL, threads waiting in
224 * callout_drain() will be woken up as soon as the
225 * relevant callout completes.
226 * cc_cancel - Changing to 1 with both callout_lock and cc_lock held
227 * guarantees that the current callout will not run.
228 * The softclock() function sets this to 0 before it
229 * drops callout_lock to acquire c_lock, and it calls
230 * the handler only if curr_cancelled is still 0 after
231 * cc_lock is successfully acquired.
232 * cc_waiting - If a thread is waiting in callout_drain(), then
233 * callout_wait is nonzero. Set only when
234 * cc_curr is non-NULL.
238 * Resets the execution entity tied to a specific callout cpu.
241 cc_cce_cleanup(struct callout_cpu *cc, int direct)
244 cc_exec_curr(cc, direct) = NULL;
245 cc_exec_cancel(cc, direct) = false;
246 cc_exec_waiting(cc, direct) = false;
248 cc_migration_cpu(cc, direct) = CPUBLOCK;
249 cc_migration_time(cc, direct) = 0;
250 cc_migration_prec(cc, direct) = 0;
251 cc_migration_func(cc, direct) = NULL;
252 cc_migration_arg(cc, direct) = NULL;
257 * Checks if migration is requested by a specific callout cpu.
260 cc_cce_migrating(struct callout_cpu *cc, int direct)
264 return (cc_migration_cpu(cc, direct) != CPUBLOCK);
271 * Kernel low level callwheel initialization
272 * called on the BSP during kernel startup.
275 callout_callwheel_init(void *dummy)
277 struct callout_cpu *cc;
280 * Calculate the size of the callout wheel and the preallocated
281 * timeout() structures.
282 * XXX: Clip callout to result of previous function of maxusers
283 * maximum 384. This is still huge, but acceptable.
285 memset(CC_CPU(curcpu), 0, sizeof(cc_cpu));
286 ncallout = imin(16 + maxproc + maxfiles, 18508);
287 TUNABLE_INT_FETCH("kern.ncallout", &ncallout);
290 * Calculate callout wheel size, should be next power of two higher
293 callwheelsize = 1 << fls(ncallout);
294 callwheelmask = callwheelsize - 1;
297 * Fetch whether we're pinning the swi's or not.
299 TUNABLE_INT_FETCH("kern.pin_default_swi", &pin_default_swi);
300 TUNABLE_INT_FETCH("kern.pin_pcpu_swi", &pin_pcpu_swi);
303 * Only BSP handles timeout(9) and receives a preallocation.
305 * XXX: Once all timeout(9) consumers are converted this can
308 timeout_cpu = PCPU_GET(cpuid);
309 cc = CC_CPU(timeout_cpu);
310 cc->cc_callout = malloc(ncallout * sizeof(struct callout),
311 M_CALLOUT, M_WAITOK);
312 callout_cpu_init(cc, timeout_cpu);
314 SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL);
317 * Initialize the per-cpu callout structures.
320 callout_cpu_init(struct callout_cpu *cc, int cpu)
325 mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
326 SLIST_INIT(&cc->cc_callfree);
328 cc->cc_callwheel = malloc(sizeof(struct callout_list) * callwheelsize,
329 M_CALLOUT, M_WAITOK);
330 for (i = 0; i < callwheelsize; i++)
331 LIST_INIT(&cc->cc_callwheel[i]);
332 TAILQ_INIT(&cc->cc_expireq);
333 cc->cc_firstevent = SBT_MAX;
334 for (i = 0; i < 2; i++)
335 cc_cce_cleanup(cc, i);
336 snprintf(cc->cc_ktr_event_name, sizeof(cc->cc_ktr_event_name),
337 "callwheel cpu %d", cpu);
338 if (cc->cc_callout == NULL) /* Only BSP handles timeout(9) */
340 for (i = 0; i < ncallout; i++) {
341 c = &cc->cc_callout[i];
343 c->c_iflags = CALLOUT_LOCAL_ALLOC;
344 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
350 * Switches the cpu tied to a specific callout.
351 * The function expects a locked incoming callout cpu and returns with
352 * locked outcoming callout cpu.
354 static struct callout_cpu *
355 callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu)
357 struct callout_cpu *new_cc;
359 MPASS(c != NULL && cc != NULL);
363 * Avoid interrupts and preemption firing after the callout cpu
364 * is blocked in order to avoid deadlocks as the new thread
365 * may be willing to acquire the callout cpu lock.
370 new_cc = CC_CPU(new_cpu);
379 * Start standard softclock thread.
382 start_softclock(void *dummy)
384 struct callout_cpu *cc;
385 char name[MAXCOMLEN];
388 struct intr_event *ie;
391 cc = CC_CPU(timeout_cpu);
392 snprintf(name, sizeof(name), "clock (%d)", timeout_cpu);
393 if (swi_add(&clk_intr_event, name, softclock, cc, SWI_CLOCK,
394 INTR_MPSAFE, &cc->cc_cookie))
395 panic("died while creating standard software ithreads");
396 if (pin_default_swi &&
397 (intr_event_bind(clk_intr_event, timeout_cpu) != 0)) {
398 printf("%s: timeout clock couldn't be pinned to cpu %d\n",
405 if (cpu == timeout_cpu)
408 cc->cc_callout = NULL; /* Only BSP handles timeout(9). */
409 callout_cpu_init(cc, cpu);
410 snprintf(name, sizeof(name), "clock (%d)", cpu);
412 if (swi_add(&ie, name, softclock, cc, SWI_CLOCK,
413 INTR_MPSAFE, &cc->cc_cookie))
414 panic("died while creating standard software ithreads");
415 if (pin_pcpu_swi && (intr_event_bind(ie, cpu) != 0)) {
416 printf("%s: per-cpu clock couldn't be pinned to "
424 SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL);
426 #define CC_HASH_SHIFT 8
429 callout_hash(sbintime_t sbt)
432 return (sbt >> (32 - CC_HASH_SHIFT));
436 callout_get_bucket(sbintime_t sbt)
439 return (callout_hash(sbt) & callwheelmask);
443 callout_process(sbintime_t now)
445 struct callout *tmp, *tmpn;
446 struct callout_cpu *cc;
447 struct callout_list *sc;
448 sbintime_t first, last, max, tmp_max;
450 u_int firstb, lastb, nowb;
451 #ifdef CALLOUT_PROFILING
452 int depth_dir = 0, mpcalls_dir = 0, lockcalls_dir = 0;
456 mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
458 /* Compute the buckets of the last scan and present times. */
459 firstb = callout_hash(cc->cc_lastscan);
460 cc->cc_lastscan = now;
461 nowb = callout_hash(now);
463 /* Compute the last bucket and minimum time of the bucket after it. */
465 lookahead = (SBT_1S / 16);
466 else if (nowb - firstb == 1)
467 lookahead = (SBT_1S / 8);
469 lookahead = (SBT_1S / 2);
471 first += (lookahead / 2);
473 last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT));
474 lastb = callout_hash(last) - 1;
478 * Check if we wrapped around the entire wheel from the last scan.
479 * In case, we need to scan entirely the wheel for pending callouts.
481 if (lastb - firstb >= callwheelsize) {
482 lastb = firstb + callwheelsize - 1;
483 if (nowb - firstb >= callwheelsize)
487 /* Iterate callwheel from firstb to nowb and then up to lastb. */
489 sc = &cc->cc_callwheel[firstb & callwheelmask];
490 tmp = LIST_FIRST(sc);
491 while (tmp != NULL) {
492 /* Run the callout if present time within allowed. */
493 if (tmp->c_time <= now) {
495 * Consumer told us the callout may be run
496 * directly from hardware interrupt context.
498 if (tmp->c_iflags & CALLOUT_DIRECT) {
499 #ifdef CALLOUT_PROFILING
503 LIST_NEXT(tmp, c_links.le);
504 cc->cc_bucket = firstb & callwheelmask;
505 LIST_REMOVE(tmp, c_links.le);
506 softclock_call_cc(tmp, cc,
507 #ifdef CALLOUT_PROFILING
508 &mpcalls_dir, &lockcalls_dir, NULL,
511 tmp = cc_exec_next(cc);
512 cc_exec_next(cc) = NULL;
514 tmpn = LIST_NEXT(tmp, c_links.le);
515 LIST_REMOVE(tmp, c_links.le);
516 TAILQ_INSERT_TAIL(&cc->cc_expireq,
518 tmp->c_iflags |= CALLOUT_PROCESSED;
523 /* Skip events from distant future. */
524 if (tmp->c_time >= max)
527 * Event minimal time is bigger than present maximal
528 * time, so it cannot be aggregated.
530 if (tmp->c_time > last) {
534 /* Update first and last time, respecting this event. */
535 if (tmp->c_time < first)
537 tmp_max = tmp->c_time + tmp->c_precision;
541 tmp = LIST_NEXT(tmp, c_links.le);
543 /* Proceed with the next bucket. */
546 * Stop if we looked after present time and found
547 * some event we can't execute at now.
548 * Stop if we looked far enough into the future.
550 } while (((int)(firstb - lastb)) <= 0);
551 cc->cc_firstevent = last;
552 #ifndef NO_EVENTTIMERS
553 cpu_new_callout(curcpu, last, first);
555 #ifdef CALLOUT_PROFILING
556 avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8;
557 avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8;
558 avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8;
560 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
562 * swi_sched acquires the thread lock, so we don't want to call it
563 * with cc_lock held; incorrect locking order.
565 if (!TAILQ_EMPTY(&cc->cc_expireq))
566 swi_sched(cc->cc_cookie, 0);
569 static struct callout_cpu *
570 callout_lock(struct callout *c)
572 struct callout_cpu *cc;
578 if (cpu == CPUBLOCK) {
579 while (c->c_cpu == CPUBLOCK)
594 callout_cc_add(struct callout *c, struct callout_cpu *cc,
595 sbintime_t sbt, sbintime_t precision, void (*func)(void *),
596 void *arg, int cpu, int flags)
601 if (sbt < cc->cc_lastscan)
602 sbt = cc->cc_lastscan;
604 c->c_iflags |= CALLOUT_PENDING;
605 c->c_iflags &= ~CALLOUT_PROCESSED;
606 c->c_flags |= CALLOUT_ACTIVE;
607 if (flags & C_DIRECT_EXEC)
608 c->c_iflags |= CALLOUT_DIRECT;
611 c->c_precision = precision;
612 bucket = callout_get_bucket(c->c_time);
613 CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x",
614 c, (int)(c->c_precision >> 32),
615 (u_int)(c->c_precision & 0xffffffff));
616 LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le);
617 if (cc->cc_bucket == bucket)
618 cc_exec_next(cc) = c;
619 #ifndef NO_EVENTTIMERS
621 * Inform the eventtimers(4) subsystem there's a new callout
622 * that has been inserted, but only if really required.
624 if (SBT_MAX - c->c_time < c->c_precision)
625 c->c_precision = SBT_MAX - c->c_time;
626 sbt = c->c_time + c->c_precision;
627 if (sbt < cc->cc_firstevent) {
628 cc->cc_firstevent = sbt;
629 cpu_new_callout(cpu, sbt, c->c_time);
635 callout_cc_del(struct callout *c, struct callout_cpu *cc)
638 if ((c->c_iflags & CALLOUT_LOCAL_ALLOC) == 0)
641 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
645 softclock_call_cc(struct callout *c, struct callout_cpu *cc,
646 #ifdef CALLOUT_PROFILING
647 int *mpcalls, int *lockcalls, int *gcalls,
651 struct rm_priotracker tracker;
652 void (*c_func)(void *);
654 struct lock_class *class;
655 struct lock_object *c_lock;
656 uintptr_t lock_status;
659 struct callout_cpu *new_cc;
660 void (*new_func)(void *);
663 sbintime_t new_prec, new_time;
665 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
666 sbintime_t sbt1, sbt2;
668 static sbintime_t maxdt = 2 * SBT_1MS; /* 2 msec */
669 static timeout_t *lastfunc;
672 KASSERT((c->c_iflags & CALLOUT_PENDING) == CALLOUT_PENDING,
673 ("softclock_call_cc: pend %p %x", c, c->c_iflags));
674 KASSERT((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE,
675 ("softclock_call_cc: act %p %x", c, c->c_flags));
676 class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL;
678 if (c->c_flags & CALLOUT_SHAREDLOCK) {
679 if (class == &lock_class_rm)
680 lock_status = (uintptr_t)&tracker;
687 c_iflags = c->c_iflags;
688 if (c->c_iflags & CALLOUT_LOCAL_ALLOC)
689 c->c_iflags = CALLOUT_LOCAL_ALLOC;
691 c->c_iflags &= ~CALLOUT_PENDING;
693 cc_exec_curr(cc, direct) = c;
694 cc_exec_last_func(cc, direct) = c_func;
695 cc_exec_last_arg(cc, direct) = c_arg;
696 cc_exec_cancel(cc, direct) = false;
697 cc_exec_drain(cc, direct) = NULL;
699 if (c_lock != NULL) {
700 class->lc_lock(c_lock, lock_status);
702 * The callout may have been cancelled
703 * while we switched locks.
705 if (cc_exec_cancel(cc, direct)) {
706 class->lc_unlock(c_lock);
709 /* The callout cannot be stopped now. */
710 cc_exec_cancel(cc, direct) = true;
711 if (c_lock == &Giant.lock_object) {
712 #ifdef CALLOUT_PROFILING
715 CTR3(KTR_CALLOUT, "callout giant %p func %p arg %p",
718 #ifdef CALLOUT_PROFILING
721 CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p",
725 #ifdef CALLOUT_PROFILING
728 CTR3(KTR_CALLOUT, "callout %p func %p arg %p",
731 KTR_STATE3(KTR_SCHED, "callout", cc->cc_ktr_event_name, "running",
732 "func:%p", c_func, "arg:%p", c_arg, "direct:%d", direct);
733 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
736 THREAD_NO_SLEEPING();
737 SDT_PROBE1(callout_execute, , , callout__start, c);
739 SDT_PROBE1(callout_execute, , , callout__end, c);
740 THREAD_SLEEPING_OK();
741 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
745 if (lastfunc != c_func || sbt2 > maxdt * 2) {
748 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
749 c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec);
755 KTR_STATE0(KTR_SCHED, "callout", cc->cc_ktr_event_name, "idle");
756 CTR1(KTR_CALLOUT, "callout %p finished", c);
757 if ((c_iflags & CALLOUT_RETURNUNLOCKED) == 0)
758 class->lc_unlock(c_lock);
761 KASSERT(cc_exec_curr(cc, direct) == c, ("mishandled cc_curr"));
762 cc_exec_curr(cc, direct) = NULL;
763 if (cc_exec_drain(cc, direct)) {
764 void (*drain)(void *);
766 drain = cc_exec_drain(cc, direct);
767 cc_exec_drain(cc, direct) = NULL;
772 if (cc_exec_waiting(cc, direct)) {
774 * There is someone waiting for the
775 * callout to complete.
776 * If the callout was scheduled for
777 * migration just cancel it.
779 if (cc_cce_migrating(cc, direct)) {
780 cc_cce_cleanup(cc, direct);
783 * It should be assert here that the callout is not
784 * destroyed but that is not easy.
786 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
788 cc_exec_waiting(cc, direct) = false;
790 wakeup(&cc_exec_waiting(cc, direct));
792 } else if (cc_cce_migrating(cc, direct)) {
793 KASSERT((c_iflags & CALLOUT_LOCAL_ALLOC) == 0,
794 ("Migrating legacy callout %p", c));
797 * If the callout was scheduled for
798 * migration just perform it now.
800 new_cpu = cc_migration_cpu(cc, direct);
801 new_time = cc_migration_time(cc, direct);
802 new_prec = cc_migration_prec(cc, direct);
803 new_func = cc_migration_func(cc, direct);
804 new_arg = cc_migration_arg(cc, direct);
805 cc_cce_cleanup(cc, direct);
808 * It should be assert here that the callout is not destroyed
809 * but that is not easy.
811 * As first thing, handle deferred callout stops.
813 if (!callout_migrating(c)) {
815 "deferred cancelled %p func %p arg %p",
816 c, new_func, new_arg);
817 callout_cc_del(c, cc);
820 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
822 new_cc = callout_cpu_switch(c, cc, new_cpu);
823 flags = (direct) ? C_DIRECT_EXEC : 0;
824 callout_cc_add(c, new_cc, new_time, new_prec, new_func,
825 new_arg, new_cpu, flags);
829 panic("migration should not happen");
833 * If the current callout is locally allocated (from
834 * timeout(9)) then put it on the freelist.
836 * Note: we need to check the cached copy of c_iflags because
837 * if it was not local, then it's not safe to deref the
840 KASSERT((c_iflags & CALLOUT_LOCAL_ALLOC) == 0 ||
841 c->c_iflags == CALLOUT_LOCAL_ALLOC,
842 ("corrupted callout"));
843 if (c_iflags & CALLOUT_LOCAL_ALLOC)
844 callout_cc_del(c, cc);
848 * The callout mechanism is based on the work of Adam M. Costello and
849 * George Varghese, published in a technical report entitled "Redesigning
850 * the BSD Callout and Timer Facilities" and modified slightly for inclusion
851 * in FreeBSD by Justin T. Gibbs. The original work on the data structures
852 * used in this implementation was published by G. Varghese and T. Lauck in
853 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
854 * the Efficient Implementation of a Timer Facility" in the Proceedings of
855 * the 11th ACM Annual Symposium on Operating Systems Principles,
856 * Austin, Texas Nov 1987.
860 * Software (low priority) clock interrupt.
861 * Run periodic events from timeout queue.
866 struct callout_cpu *cc;
868 #ifdef CALLOUT_PROFILING
869 int depth = 0, gcalls = 0, lockcalls = 0, mpcalls = 0;
872 cc = (struct callout_cpu *)arg;
874 while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) {
875 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
876 softclock_call_cc(c, cc,
877 #ifdef CALLOUT_PROFILING
878 &mpcalls, &lockcalls, &gcalls,
881 #ifdef CALLOUT_PROFILING
885 #ifdef CALLOUT_PROFILING
886 avg_depth += (depth * 1000 - avg_depth) >> 8;
887 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
888 avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8;
889 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
896 * Execute a function after a specified length of time.
899 * Cancel previous timeout function call.
901 * callout_handle_init --
902 * Initialize a handle so that using it with untimeout is benign.
904 * See AT&T BCI Driver Reference Manual for specification. This
905 * implementation differs from that one in that although an
906 * identification value is returned from timeout, the original
907 * arguments to timeout as well as the identifier are used to
908 * identify entries for untimeout.
910 struct callout_handle
911 timeout(timeout_t *ftn, void *arg, int to_ticks)
913 struct callout_cpu *cc;
915 struct callout_handle handle;
917 cc = CC_CPU(timeout_cpu);
919 /* Fill in the next free callout structure. */
920 new = SLIST_FIRST(&cc->cc_callfree);
922 /* XXX Attempt to malloc first */
923 panic("timeout table full");
924 SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle);
925 callout_reset(new, to_ticks, ftn, arg);
926 handle.callout = new;
933 untimeout(timeout_t *ftn, void *arg, struct callout_handle handle)
935 struct callout_cpu *cc;
938 * Check for a handle that was initialized
939 * by callout_handle_init, but never used
940 * for a real timeout.
942 if (handle.callout == NULL)
945 cc = callout_lock(handle.callout);
946 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
947 callout_stop(handle.callout);
952 callout_handle_init(struct callout_handle *handle)
954 handle->callout = NULL;
958 callout_when(sbintime_t sbt, sbintime_t precision, int flags,
959 sbintime_t *res, sbintime_t *prec_res)
961 sbintime_t to_sbt, to_pr;
963 if ((flags & (C_ABSOLUTE | C_PRECALC)) != 0) {
965 *prec_res = precision;
968 if ((flags & C_HARDCLOCK) != 0 && sbt < tick_sbt)
970 if ((flags & C_HARDCLOCK) != 0 ||
971 #ifdef NO_EVENTTIMERS
972 sbt >= sbt_timethreshold) {
973 to_sbt = getsbinuptime();
975 /* Add safety belt for the case of hz > 1000. */
976 to_sbt += tc_tick_sbt - tick_sbt;
978 sbt >= sbt_tickthreshold) {
980 * Obtain the time of the last hardclock() call on
981 * this CPU directly from the kern_clocksource.c.
982 * This value is per-CPU, but it is equal for all
986 to_sbt = DPCPU_GET(hardclocktime);
989 to_sbt = DPCPU_GET(hardclocktime);
993 if (cold && to_sbt == 0)
994 to_sbt = sbinuptime();
995 if ((flags & C_HARDCLOCK) == 0)
998 to_sbt = sbinuptime();
999 if (SBT_MAX - to_sbt < sbt)
1004 to_pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp :
1005 sbt >> C_PRELGET(flags));
1006 *prec_res = to_pr > precision ? to_pr : precision;
1010 * New interface; clients allocate their own callout structures.
1012 * callout_reset() - establish or change a timeout
1013 * callout_stop() - disestablish a timeout
1014 * callout_init() - initialize a callout structure so that it can
1015 * safely be passed to callout_reset() and callout_stop()
1017 * <sys/callout.h> defines three convenience macros:
1019 * callout_active() - returns truth if callout has not been stopped,
1020 * drained, or deactivated since the last time the callout was
1022 * callout_pending() - returns truth if callout is still waiting for timeout
1023 * callout_deactivate() - marks the callout as having been serviced
1026 callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t prec,
1027 void (*ftn)(void *), void *arg, int cpu, int flags)
1029 sbintime_t to_sbt, precision;
1030 struct callout_cpu *cc;
1031 int cancelled, direct;
1037 } else if ((cpu >= MAXCPU) ||
1038 ((CC_CPU(cpu))->cc_inited == 0)) {
1039 /* Invalid CPU spec */
1040 panic("Invalid CPU in callout %d", cpu);
1042 callout_when(sbt, prec, flags, &to_sbt, &precision);
1045 * This flag used to be added by callout_cc_add, but the
1046 * first time you call this we could end up with the
1047 * wrong direct flag if we don't do it before we add.
1049 if (flags & C_DIRECT_EXEC) {
1054 KASSERT(!direct || c->c_lock == NULL,
1055 ("%s: direct callout %p has lock", __func__, c));
1056 cc = callout_lock(c);
1058 * Don't allow migration of pre-allocated callouts lest they
1059 * become unbalanced or handle the case where the user does
1062 if ((c->c_iflags & CALLOUT_LOCAL_ALLOC) ||
1067 if (cc_exec_curr(cc, direct) == c) {
1069 * We're being asked to reschedule a callout which is
1070 * currently in progress. If there is a lock then we
1071 * can cancel the callout if it has not really started.
1073 if (c->c_lock != NULL && !cc_exec_cancel(cc, direct))
1074 cancelled = cc_exec_cancel(cc, direct) = true;
1075 if (cc_exec_waiting(cc, direct) || cc_exec_drain(cc, direct)) {
1077 * Someone has called callout_drain to kill this
1078 * callout. Don't reschedule.
1080 CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
1081 cancelled ? "cancelled" : "failed to cancel",
1082 c, c->c_func, c->c_arg);
1087 if (callout_migrating(c)) {
1089 * This only occurs when a second callout_reset_sbt_on
1090 * is made after a previous one moved it into
1091 * deferred migration (below). Note we do *not* change
1092 * the prev_cpu even though the previous target may
1095 cc_migration_cpu(cc, direct) = cpu;
1096 cc_migration_time(cc, direct) = to_sbt;
1097 cc_migration_prec(cc, direct) = precision;
1098 cc_migration_func(cc, direct) = ftn;
1099 cc_migration_arg(cc, direct) = arg;
1106 if (c->c_iflags & CALLOUT_PENDING) {
1107 if ((c->c_iflags & CALLOUT_PROCESSED) == 0) {
1108 if (cc_exec_next(cc) == c)
1109 cc_exec_next(cc) = LIST_NEXT(c, c_links.le);
1110 LIST_REMOVE(c, c_links.le);
1112 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
1115 c->c_iflags &= ~ CALLOUT_PENDING;
1116 c->c_flags &= ~ CALLOUT_ACTIVE;
1121 * If the callout must migrate try to perform it immediately.
1122 * If the callout is currently running, just defer the migration
1123 * to a more appropriate moment.
1125 if (c->c_cpu != cpu) {
1126 if (cc_exec_curr(cc, direct) == c) {
1128 * Pending will have been removed since we are
1129 * actually executing the callout on another
1130 * CPU. That callout should be waiting on the
1131 * lock the caller holds. If we set both
1132 * active/and/pending after we return and the
1133 * lock on the executing callout proceeds, it
1134 * will then see pending is true and return.
1135 * At the return from the actual callout execution
1136 * the migration will occur in softclock_call_cc
1137 * and this new callout will be placed on the
1138 * new CPU via a call to callout_cpu_switch() which
1139 * will get the lock on the right CPU followed
1140 * by a call callout_cc_add() which will add it there.
1141 * (see above in softclock_call_cc()).
1143 cc_migration_cpu(cc, direct) = cpu;
1144 cc_migration_time(cc, direct) = to_sbt;
1145 cc_migration_prec(cc, direct) = precision;
1146 cc_migration_func(cc, direct) = ftn;
1147 cc_migration_arg(cc, direct) = arg;
1148 c->c_iflags |= (CALLOUT_DFRMIGRATION | CALLOUT_PENDING);
1149 c->c_flags |= CALLOUT_ACTIVE;
1151 "migration of %p func %p arg %p in %d.%08x to %u deferred",
1152 c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
1153 (u_int)(to_sbt & 0xffffffff), cpu);
1157 cc = callout_cpu_switch(c, cc, cpu);
1161 callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags);
1162 CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x",
1163 cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
1164 (u_int)(to_sbt & 0xffffffff));
1171 * Common idioms that can be optimized in the future.
1174 callout_schedule_on(struct callout *c, int to_ticks, int cpu)
1176 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu);
1180 callout_schedule(struct callout *c, int to_ticks)
1182 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu);
1186 _callout_stop_safe(struct callout *c, int flags, void (*drain)(void *))
1188 struct callout_cpu *cc, *old_cc;
1189 struct lock_class *class;
1190 int direct, sq_locked, use_lock;
1191 int cancelled, not_on_a_list;
1193 if ((flags & CS_DRAIN) != 0)
1194 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, c->c_lock,
1195 "calling %s", __func__);
1198 * Some old subsystems don't hold Giant while running a callout_stop(),
1199 * so just discard this check for the moment.
1201 if ((flags & CS_DRAIN) == 0 && c->c_lock != NULL) {
1202 if (c->c_lock == &Giant.lock_object)
1203 use_lock = mtx_owned(&Giant);
1206 class = LOCK_CLASS(c->c_lock);
1207 class->lc_assert(c->c_lock, LA_XLOCKED);
1211 if (c->c_iflags & CALLOUT_DIRECT) {
1219 cc = callout_lock(c);
1221 if ((c->c_iflags & (CALLOUT_DFRMIGRATION | CALLOUT_PENDING)) ==
1222 (CALLOUT_DFRMIGRATION | CALLOUT_PENDING) &&
1223 ((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE)) {
1225 * Special case where this slipped in while we
1226 * were migrating *as* the callout is about to
1227 * execute. The caller probably holds the lock
1228 * the callout wants.
1230 * Get rid of the migration first. Then set
1231 * the flag that tells this code *not* to
1232 * try to remove it from any lists (its not
1233 * on one yet). When the callout wheel runs,
1234 * it will ignore this callout.
1236 c->c_iflags &= ~CALLOUT_PENDING;
1237 c->c_flags &= ~CALLOUT_ACTIVE;
1244 * If the callout was migrating while the callout cpu lock was
1245 * dropped, just drop the sleepqueue lock and check the states
1248 if (sq_locked != 0 && cc != old_cc) {
1251 sleepq_release(&cc_exec_waiting(old_cc, direct));
1256 panic("migration should not happen");
1261 * If the callout is running, try to stop it or drain it.
1263 if (cc_exec_curr(cc, direct) == c) {
1265 * Succeed we to stop it or not, we must clear the
1266 * active flag - this is what API users expect. If we're
1267 * draining and the callout is currently executing, first wait
1268 * until it finishes.
1270 if ((flags & CS_DRAIN) == 0)
1271 c->c_flags &= ~CALLOUT_ACTIVE;
1273 if ((flags & CS_DRAIN) != 0) {
1275 * The current callout is running (or just
1276 * about to run) and blocking is allowed, so
1277 * just wait for the current invocation to
1280 while (cc_exec_curr(cc, direct) == c) {
1282 * Use direct calls to sleepqueue interface
1283 * instead of cv/msleep in order to avoid
1284 * a LOR between cc_lock and sleepqueue
1285 * chain spinlocks. This piece of code
1286 * emulates a msleep_spin() call actually.
1288 * If we already have the sleepqueue chain
1289 * locked, then we can safely block. If we
1290 * don't already have it locked, however,
1291 * we have to drop the cc_lock to lock
1292 * it. This opens several races, so we
1293 * restart at the beginning once we have
1294 * both locks. If nothing has changed, then
1295 * we will end up back here with sq_locked
1301 &cc_exec_waiting(cc, direct));
1308 * Migration could be cancelled here, but
1309 * as long as it is still not sure when it
1310 * will be packed up, just let softclock()
1313 cc_exec_waiting(cc, direct) = true;
1317 &cc_exec_waiting(cc, direct),
1318 &cc->cc_lock.lock_object, "codrain",
1321 &cc_exec_waiting(cc, direct),
1326 /* Reacquire locks previously released. */
1330 c->c_flags &= ~CALLOUT_ACTIVE;
1331 } else if (use_lock &&
1332 !cc_exec_cancel(cc, direct) && (drain == NULL)) {
1335 * The current callout is waiting for its
1336 * lock which we hold. Cancel the callout
1337 * and return. After our caller drops the
1338 * lock, the callout will be skipped in
1339 * softclock(). This *only* works with a
1340 * callout_stop() *not* callout_drain() or
1341 * callout_async_drain().
1343 cc_exec_cancel(cc, direct) = true;
1344 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
1345 c, c->c_func, c->c_arg);
1346 KASSERT(!cc_cce_migrating(cc, direct),
1347 ("callout wrongly scheduled for migration"));
1348 if (callout_migrating(c)) {
1349 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
1351 cc_migration_cpu(cc, direct) = CPUBLOCK;
1352 cc_migration_time(cc, direct) = 0;
1353 cc_migration_prec(cc, direct) = 0;
1354 cc_migration_func(cc, direct) = NULL;
1355 cc_migration_arg(cc, direct) = NULL;
1359 KASSERT(!sq_locked, ("sleepqueue chain locked"));
1361 } else if (callout_migrating(c)) {
1363 * The callout is currently being serviced
1364 * and the "next" callout is scheduled at
1365 * its completion with a migration. We remove
1366 * the migration flag so it *won't* get rescheduled,
1367 * but we can't stop the one thats running so
1370 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
1373 * We can't call cc_cce_cleanup here since
1374 * if we do it will remove .ce_curr and
1375 * its still running. This will prevent a
1376 * reschedule of the callout when the
1377 * execution completes.
1379 cc_migration_cpu(cc, direct) = CPUBLOCK;
1380 cc_migration_time(cc, direct) = 0;
1381 cc_migration_prec(cc, direct) = 0;
1382 cc_migration_func(cc, direct) = NULL;
1383 cc_migration_arg(cc, direct) = NULL;
1385 CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p",
1386 c, c->c_func, c->c_arg);
1388 cc_exec_drain(cc, direct) = drain;
1391 return ((flags & CS_EXECUTING) != 0);
1393 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
1394 c, c->c_func, c->c_arg);
1396 cc_exec_drain(cc, direct) = drain;
1398 KASSERT(!sq_locked, ("sleepqueue chain still locked"));
1399 cancelled = ((flags & CS_EXECUTING) != 0);
1404 sleepq_release(&cc_exec_waiting(cc, direct));
1406 if ((c->c_iflags & CALLOUT_PENDING) == 0) {
1407 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
1408 c, c->c_func, c->c_arg);
1410 * For not scheduled and not executing callout return
1413 if (cc_exec_curr(cc, direct) != c)
1419 c->c_iflags &= ~CALLOUT_PENDING;
1420 c->c_flags &= ~CALLOUT_ACTIVE;
1422 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
1423 c, c->c_func, c->c_arg);
1424 if (not_on_a_list == 0) {
1425 if ((c->c_iflags & CALLOUT_PROCESSED) == 0) {
1426 if (cc_exec_next(cc) == c)
1427 cc_exec_next(cc) = LIST_NEXT(c, c_links.le);
1428 LIST_REMOVE(c, c_links.le);
1430 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
1433 callout_cc_del(c, cc);
1439 callout_init(struct callout *c, int mpsafe)
1441 bzero(c, sizeof *c);
1444 c->c_iflags = CALLOUT_RETURNUNLOCKED;
1446 c->c_lock = &Giant.lock_object;
1449 c->c_cpu = timeout_cpu;
1453 _callout_init_lock(struct callout *c, struct lock_object *lock, int flags)
1455 bzero(c, sizeof *c);
1457 KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0,
1458 ("callout_init_lock: bad flags %d", flags));
1459 KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
1460 ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
1461 KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags &
1462 (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class",
1464 c->c_iflags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
1465 c->c_cpu = timeout_cpu;
1468 #ifdef APM_FIXUP_CALLTODO
1470 * Adjust the kernel calltodo timeout list. This routine is used after
1471 * an APM resume to recalculate the calltodo timer list values with the
1472 * number of hz's we have been sleeping. The next hardclock() will detect
1473 * that there are fired timers and run softclock() to execute them.
1475 * Please note, I have not done an exhaustive analysis of what code this
1476 * might break. I am motivated to have my select()'s and alarm()'s that
1477 * have expired during suspend firing upon resume so that the applications
1478 * which set the timer can do the maintanence the timer was for as close
1479 * as possible to the originally intended time. Testing this code for a
1480 * week showed that resuming from a suspend resulted in 22 to 25 timers
1481 * firing, which seemed independent on whether the suspend was 2 hours or
1482 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu>
1485 adjust_timeout_calltodo(struct timeval *time_change)
1488 unsigned long delta_ticks;
1491 * How many ticks were we asleep?
1492 * (stolen from tvtohz()).
1495 /* Don't do anything */
1496 if (time_change->tv_sec < 0)
1498 else if (time_change->tv_sec <= LONG_MAX / 1000000)
1499 delta_ticks = howmany(time_change->tv_sec * 1000000 +
1500 time_change->tv_usec, tick) + 1;
1501 else if (time_change->tv_sec <= LONG_MAX / hz)
1502 delta_ticks = time_change->tv_sec * hz +
1503 howmany(time_change->tv_usec, tick) + 1;
1505 delta_ticks = LONG_MAX;
1507 if (delta_ticks > INT_MAX)
1508 delta_ticks = INT_MAX;
1511 * Now rip through the timer calltodo list looking for timers
1515 /* don't collide with softclock() */
1517 for (p = calltodo.c_next; p != NULL; p = p->c_next) {
1518 p->c_time -= delta_ticks;
1520 /* Break if the timer had more time on it than delta_ticks */
1524 /* take back the ticks the timer didn't use (p->c_time <= 0) */
1525 delta_ticks = -p->c_time;
1531 #endif /* APM_FIXUP_CALLTODO */
1534 flssbt(sbintime_t sbt)
1537 sbt += (uint64_t)sbt >> 1;
1538 if (sizeof(long) >= sizeof(sbintime_t))
1541 return (flsl(((uint64_t)sbt) >> 32) + 32);
1546 * Dump immediate statistic snapshot of the scheduled callouts.
1549 sysctl_kern_callout_stat(SYSCTL_HANDLER_ARGS)
1551 struct callout *tmp;
1552 struct callout_cpu *cc;
1553 struct callout_list *sc;
1554 sbintime_t maxpr, maxt, medpr, medt, now, spr, st, t;
1555 int ct[64], cpr[64], ccpbk[32];
1556 int error, val, i, count, tcum, pcum, maxc, c, medc;
1562 error = sysctl_handle_int(oidp, &val, 0, req);
1563 if (error != 0 || req->newptr == NULL)
1566 st = spr = maxt = maxpr = 0;
1567 bzero(ccpbk, sizeof(ccpbk));
1568 bzero(ct, sizeof(ct));
1569 bzero(cpr, sizeof(cpr));
1575 cc = CC_CPU(timeout_cpu);
1578 for (i = 0; i < callwheelsize; i++) {
1579 sc = &cc->cc_callwheel[i];
1581 LIST_FOREACH(tmp, sc, c_links.le) {
1583 t = tmp->c_time - now;
1587 spr += tmp->c_precision / SBT_1US;
1590 if (tmp->c_precision > maxpr)
1591 maxpr = tmp->c_precision;
1593 cpr[flssbt(tmp->c_precision)]++;
1597 ccpbk[fls(c + c / 2)]++;
1605 for (i = 0, tcum = 0; i < 64 && tcum < count / 2; i++)
1607 medt = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
1608 for (i = 0, pcum = 0; i < 64 && pcum < count / 2; i++)
1610 medpr = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
1611 for (i = 0, c = 0; i < 32 && c < count / 2; i++)
1613 medc = (i >= 2) ? (1 << (i - 2)) : 0;
1615 printf("Scheduled callouts statistic snapshot:\n");
1616 printf(" Callouts: %6d Buckets: %6d*%-3d Bucket size: 0.%06ds\n",
1617 count, callwheelsize, mp_ncpus, 1000000 >> CC_HASH_SHIFT);
1618 printf(" C/Bk: med %5d avg %6d.%06jd max %6d\n",
1620 count / callwheelsize / mp_ncpus,
1621 (uint64_t)count * 1000000 / callwheelsize / mp_ncpus % 1000000,
1623 printf(" Time: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
1624 medt / SBT_1S, (medt & 0xffffffff) * 1000000 >> 32,
1625 (st / count) / 1000000, (st / count) % 1000000,
1626 maxt / SBT_1S, (maxt & 0xffffffff) * 1000000 >> 32);
1627 printf(" Prec: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
1628 medpr / SBT_1S, (medpr & 0xffffffff) * 1000000 >> 32,
1629 (spr / count) / 1000000, (spr / count) % 1000000,
1630 maxpr / SBT_1S, (maxpr & 0xffffffff) * 1000000 >> 32);
1631 printf(" Distribution: \tbuckets\t time\t tcum\t"
1633 for (i = 0, tcum = pcum = 0; i < 64; i++) {
1634 if (ct[i] == 0 && cpr[i] == 0)
1636 t = (i != 0) ? (((sbintime_t)1) << (i - 1)) : 0;
1639 printf(" %10jd.%06jds\t 2**%d\t%7d\t%7d\t%7d\t%7d\n",
1640 t / SBT_1S, (t & 0xffffffff) * 1000000 >> 32,
1641 i - 1 - (32 - CC_HASH_SHIFT),
1642 ct[i], tcum, cpr[i], pcum);
1646 SYSCTL_PROC(_kern, OID_AUTO, callout_stat,
1647 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1648 0, 0, sysctl_kern_callout_stat, "I",
1649 "Dump immediate statistic snapshot of the scheduled callouts");
1653 _show_callout(struct callout *c)
1656 db_printf("callout %p\n", c);
1657 #define C_DB_PRINTF(f, e) db_printf(" %s = " f "\n", #e, c->e);
1658 db_printf(" &c_links = %p\n", &(c->c_links));
1659 C_DB_PRINTF("%" PRId64, c_time);
1660 C_DB_PRINTF("%" PRId64, c_precision);
1661 C_DB_PRINTF("%p", c_arg);
1662 C_DB_PRINTF("%p", c_func);
1663 C_DB_PRINTF("%p", c_lock);
1664 C_DB_PRINTF("%#x", c_flags);
1665 C_DB_PRINTF("%#x", c_iflags);
1666 C_DB_PRINTF("%d", c_cpu);
1670 DB_SHOW_COMMAND(callout, db_show_callout)
1674 db_printf("usage: show callout <struct callout *>\n");
1678 _show_callout((struct callout *)addr);
1682 _show_last_callout(int cpu, int direct, const char *dirstr)
1684 struct callout_cpu *cc;
1688 func = cc_exec_last_func(cc, direct);
1689 arg = cc_exec_last_arg(cc, direct);
1690 db_printf("cpu %d last%s callout function: %p ", cpu, dirstr, func);
1691 db_printsym((db_expr_t)func, DB_STGY_ANY);
1692 db_printf("\ncpu %d last%s callout argument: %p\n", cpu, dirstr, arg);
1695 DB_SHOW_COMMAND(callout_last, db_show_callout_last)
1700 if (addr < 0 || addr > mp_maxid || CPU_ABSENT(addr)) {
1701 db_printf("no such cpu: %d\n", (int)addr);
1710 while (cpu <= last) {
1711 if (!CPU_ABSENT(cpu)) {
1712 _show_last_callout(cpu, 0, "");
1713 _show_last_callout(cpu, 1, " direct");