2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
42 #include "opt_callout_profiling.h"
46 #include <sys/param.h>
47 #include <sys/systm.h>
49 #include <sys/callout.h>
50 #include <sys/domainset.h>
52 #include <sys/interrupt.h>
53 #include <sys/kernel.h>
55 #include <sys/kthread.h>
57 #include <sys/malloc.h>
58 #include <sys/mutex.h>
60 #include <sys/random.h>
61 #include <sys/sched.h>
63 #include <sys/sleepqueue.h>
64 #include <sys/sysctl.h>
66 #include <sys/unistd.h>
70 #include <ddb/db_sym.h>
71 #include <machine/_inttypes.h>
75 #include <machine/cpu.h>
78 DPCPU_DECLARE(sbintime_t, hardclocktime);
80 SDT_PROVIDER_DEFINE(callout_execute);
81 SDT_PROBE_DEFINE1(callout_execute, , , callout__start, "struct callout *");
82 SDT_PROBE_DEFINE1(callout_execute, , , callout__end, "struct callout *");
84 static void softclock_thread(void *arg);
86 #ifdef CALLOUT_PROFILING
88 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
89 "Average number of items examined per softclock call. Units = 1/1000");
90 static int avg_gcalls;
91 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
92 "Average number of Giant callouts made per softclock call. Units = 1/1000");
93 static int avg_lockcalls;
94 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0,
95 "Average number of lock callouts made per softclock call. Units = 1/1000");
96 static int avg_mpcalls;
97 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
98 "Average number of MP callouts made per softclock call. Units = 1/1000");
99 static int avg_depth_dir;
100 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth_dir, CTLFLAG_RD, &avg_depth_dir, 0,
101 "Average number of direct callouts examined per callout_process call. "
103 static int avg_lockcalls_dir;
104 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls_dir, CTLFLAG_RD,
105 &avg_lockcalls_dir, 0, "Average number of lock direct callouts made per "
106 "callout_process call. Units = 1/1000");
107 static int avg_mpcalls_dir;
108 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir,
109 0, "Average number of MP direct callouts made per callout_process call. "
114 SYSCTL_INT(_kern, OID_AUTO, ncallout, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &ncallout, 0,
115 "Number of entries in callwheel and size of timeout() preallocation");
118 static int pin_default_swi = 1;
119 static int pin_pcpu_swi = 1;
121 static int pin_default_swi = 0;
122 static int pin_pcpu_swi = 0;
125 SYSCTL_INT(_kern, OID_AUTO, pin_default_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_default_swi,
126 0, "Pin the default (non-per-cpu) swi (shared with PCPU 0 swi)");
127 SYSCTL_INT(_kern, OID_AUTO, pin_pcpu_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_pcpu_swi,
128 0, "Pin the per-CPU swis (except PCPU 0, which is also default)");
132 * allocate more timeout table slots when table overflows.
134 static u_int __read_mostly callwheelsize;
135 static u_int __read_mostly callwheelmask;
138 * The callout cpu exec entities represent informations necessary for
139 * describing the state of callouts currently running on the CPU and the ones
140 * necessary for migrating callouts to the new callout cpu. In particular,
141 * the first entry of the array cc_exec_entity holds informations for callout
142 * running in SWI thread context, while the second one holds informations
143 * for callout running directly from hardware interrupt context.
144 * The cached informations are very important for deferring migration when
145 * the migrating callout is already running.
148 struct callout *cc_curr;
149 callout_func_t *cc_drain;
153 callout_func_t *ce_migration_func;
154 void *ce_migration_arg;
155 sbintime_t ce_migration_time;
156 sbintime_t ce_migration_prec;
157 int ce_migration_cpu;
164 * There is one struct callout_cpu per cpu, holding all relevant
165 * state for the callout processing thread on the individual CPU.
168 struct mtx_padalign cc_lock;
169 struct cc_exec cc_exec_entity[2];
170 struct callout *cc_next;
171 struct callout_list *cc_callwheel;
172 struct callout_tailq cc_expireq;
173 sbintime_t cc_firstevent;
174 sbintime_t cc_lastscan;
175 struct thread *cc_thread;
178 char cc_ktr_event_name[20];
182 #define callout_migrating(c) ((c)->c_iflags & CALLOUT_DFRMIGRATION)
184 #define cc_exec_curr(cc, dir) cc->cc_exec_entity[dir].cc_curr
185 #define cc_exec_last_func(cc, dir) cc->cc_exec_entity[dir].cc_last_func
186 #define cc_exec_last_arg(cc, dir) cc->cc_exec_entity[dir].cc_last_arg
187 #define cc_exec_drain(cc, dir) cc->cc_exec_entity[dir].cc_drain
188 #define cc_exec_next(cc) cc->cc_next
189 #define cc_exec_cancel(cc, dir) cc->cc_exec_entity[dir].cc_cancel
190 #define cc_exec_waiting(cc, dir) cc->cc_exec_entity[dir].cc_waiting
192 #define cc_migration_func(cc, dir) cc->cc_exec_entity[dir].ce_migration_func
193 #define cc_migration_arg(cc, dir) cc->cc_exec_entity[dir].ce_migration_arg
194 #define cc_migration_cpu(cc, dir) cc->cc_exec_entity[dir].ce_migration_cpu
195 #define cc_migration_time(cc, dir) cc->cc_exec_entity[dir].ce_migration_time
196 #define cc_migration_prec(cc, dir) cc->cc_exec_entity[dir].ce_migration_prec
198 static struct callout_cpu cc_cpu[MAXCPU];
199 #define CPUBLOCK MAXCPU
200 #define CC_CPU(cpu) (&cc_cpu[(cpu)])
201 #define CC_SELF() CC_CPU(PCPU_GET(cpuid))
203 static struct callout_cpu cc_cpu;
204 #define CC_CPU(cpu) (&cc_cpu)
205 #define CC_SELF() (&cc_cpu)
207 #define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock)
208 #define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock)
209 #define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED)
211 static int __read_mostly cc_default_cpu;
213 static void callout_cpu_init(struct callout_cpu *cc, int cpu);
214 static void softclock_call_cc(struct callout *c, struct callout_cpu *cc,
215 #ifdef CALLOUT_PROFILING
216 int *mpcalls, int *lockcalls, int *gcalls,
220 static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
224 * cc_curr - If a callout is in progress, it is cc_curr.
225 * If cc_curr is non-NULL, threads waiting in
226 * callout_drain() will be woken up as soon as the
227 * relevant callout completes.
228 * cc_cancel - Changing to 1 with both callout_lock and cc_lock held
229 * guarantees that the current callout will not run.
230 * The softclock_call_cc() function sets this to 0 before it
231 * drops callout_lock to acquire c_lock, and it calls
232 * the handler only if curr_cancelled is still 0 after
233 * cc_lock is successfully acquired.
234 * cc_waiting - If a thread is waiting in callout_drain(), then
235 * callout_wait is nonzero. Set only when
236 * cc_curr is non-NULL.
240 * Resets the execution entity tied to a specific callout cpu.
243 cc_cce_cleanup(struct callout_cpu *cc, int direct)
246 cc_exec_curr(cc, direct) = NULL;
247 cc_exec_cancel(cc, direct) = false;
248 cc_exec_waiting(cc, direct) = false;
250 cc_migration_cpu(cc, direct) = CPUBLOCK;
251 cc_migration_time(cc, direct) = 0;
252 cc_migration_prec(cc, direct) = 0;
253 cc_migration_func(cc, direct) = NULL;
254 cc_migration_arg(cc, direct) = NULL;
259 * Checks if migration is requested by a specific callout cpu.
262 cc_cce_migrating(struct callout_cpu *cc, int direct)
266 return (cc_migration_cpu(cc, direct) != CPUBLOCK);
273 * Kernel low level callwheel initialization
274 * called on the BSP during kernel startup.
277 callout_callwheel_init(void *dummy)
279 struct callout_cpu *cc;
283 * Calculate the size of the callout wheel and the preallocated
284 * timeout() structures.
285 * XXX: Clip callout to result of previous function of maxusers
286 * maximum 384. This is still huge, but acceptable.
288 ncallout = imin(16 + maxproc + maxfiles, 18508);
289 TUNABLE_INT_FETCH("kern.ncallout", &ncallout);
292 * Calculate callout wheel size, should be next power of two higher
295 callwheelsize = 1 << fls(ncallout);
296 callwheelmask = callwheelsize - 1;
299 * Fetch whether we're pinning the swi's or not.
301 TUNABLE_INT_FETCH("kern.pin_default_swi", &pin_default_swi);
302 TUNABLE_INT_FETCH("kern.pin_pcpu_swi", &pin_pcpu_swi);
305 * Initialize callout wheels. The software interrupt threads
308 cc_default_cpu = PCPU_GET(cpuid);
311 callout_cpu_init(cc, cpu);
314 SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL);
317 * Initialize the per-cpu callout structures.
320 callout_cpu_init(struct callout_cpu *cc, int cpu)
324 mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN);
325 cc->cc_callwheel = malloc_domainset(sizeof(struct callout_list) *
326 callwheelsize, M_CALLOUT,
327 DOMAINSET_PREF(pcpu_find(cpu)->pc_domain), M_WAITOK);
328 for (i = 0; i < callwheelsize; i++)
329 LIST_INIT(&cc->cc_callwheel[i]);
330 TAILQ_INIT(&cc->cc_expireq);
331 cc->cc_firstevent = SBT_MAX;
332 for (i = 0; i < 2; i++)
333 cc_cce_cleanup(cc, i);
335 snprintf(cc->cc_ktr_event_name, sizeof(cc->cc_ktr_event_name),
336 "callwheel cpu %d", cpu);
342 * Switches the cpu tied to a specific callout.
343 * The function expects a locked incoming callout cpu and returns with
344 * locked outcoming callout cpu.
346 static struct callout_cpu *
347 callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu)
349 struct callout_cpu *new_cc;
351 MPASS(c != NULL && cc != NULL);
355 * Avoid interrupts and preemption firing after the callout cpu
356 * is blocked in order to avoid deadlocks as the new thread
357 * may be willing to acquire the callout cpu lock.
362 new_cc = CC_CPU(new_cpu);
371 * Start softclock threads.
374 start_softclock(void *dummy)
378 struct callout_cpu *cc;
385 error = kproc_kthread_add(softclock_thread, cc, &p, &td,
386 RFSTOPPED, 0, "clock", "clock (%d)", cpu);
388 panic("failed to create softclock thread for cpu %d: %d",
393 sched_class(td, PRI_ITHD);
394 sched_ithread_prio(td, PI_SOFTCLOCK);
396 thread_lock_set(td, (struct mtx *)&cc->cc_lock);
398 if (cpu == cc_default_cpu)
399 pin_swi = pin_default_swi;
401 pin_swi = pin_pcpu_swi;
403 error = cpuset_setithread(td->td_tid, cpu);
405 printf("%s: %s clock couldn't be pinned to cpu %d: %d\n",
406 __func__, cpu == cc_default_cpu ?
407 "default" : "per-cpu", cpu, error);
411 SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL);
413 #define CC_HASH_SHIFT 8
416 callout_hash(sbintime_t sbt)
419 return (sbt >> (32 - CC_HASH_SHIFT));
423 callout_get_bucket(sbintime_t sbt)
426 return (callout_hash(sbt) & callwheelmask);
430 callout_process(sbintime_t now)
432 struct callout_entropy {
433 struct callout_cpu *cc;
437 struct callout *tmp, *tmpn;
438 struct callout_cpu *cc;
439 struct callout_list *sc;
441 sbintime_t first, last, lookahead, max, tmp_max;
442 u_int firstb, lastb, nowb;
443 #ifdef CALLOUT_PROFILING
444 int depth_dir = 0, mpcalls_dir = 0, lockcalls_dir = 0;
448 mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
450 /* Compute the buckets of the last scan and present times. */
451 firstb = callout_hash(cc->cc_lastscan);
452 cc->cc_lastscan = now;
453 nowb = callout_hash(now);
455 /* Compute the last bucket and minimum time of the bucket after it. */
457 lookahead = (SBT_1S / 16);
458 else if (nowb - firstb == 1)
459 lookahead = (SBT_1S / 8);
463 first += (lookahead / 2);
465 last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT));
466 lastb = callout_hash(last) - 1;
470 * Check if we wrapped around the entire wheel from the last scan.
471 * In case, we need to scan entirely the wheel for pending callouts.
473 if (lastb - firstb >= callwheelsize) {
474 lastb = firstb + callwheelsize - 1;
475 if (nowb - firstb >= callwheelsize)
479 /* Iterate callwheel from firstb to nowb and then up to lastb. */
481 sc = &cc->cc_callwheel[firstb & callwheelmask];
482 tmp = LIST_FIRST(sc);
483 while (tmp != NULL) {
484 /* Run the callout if present time within allowed. */
485 if (tmp->c_time <= now) {
487 * Consumer told us the callout may be run
488 * directly from hardware interrupt context.
490 if (tmp->c_iflags & CALLOUT_DIRECT) {
491 #ifdef CALLOUT_PROFILING
495 LIST_NEXT(tmp, c_links.le);
496 cc->cc_bucket = firstb & callwheelmask;
497 LIST_REMOVE(tmp, c_links.le);
498 softclock_call_cc(tmp, cc,
499 #ifdef CALLOUT_PROFILING
500 &mpcalls_dir, &lockcalls_dir, NULL,
503 tmp = cc_exec_next(cc);
504 cc_exec_next(cc) = NULL;
506 tmpn = LIST_NEXT(tmp, c_links.le);
507 LIST_REMOVE(tmp, c_links.le);
508 TAILQ_INSERT_TAIL(&cc->cc_expireq,
510 tmp->c_iflags |= CALLOUT_PROCESSED;
515 /* Skip events from distant future. */
516 if (tmp->c_time >= max)
519 * Event minimal time is bigger than present maximal
520 * time, so it cannot be aggregated.
522 if (tmp->c_time > last) {
526 /* Update first and last time, respecting this event. */
527 if (tmp->c_time < first)
529 tmp_max = tmp->c_time + tmp->c_precision;
533 tmp = LIST_NEXT(tmp, c_links.le);
535 /* Proceed with the next bucket. */
538 * Stop if we looked after present time and found
539 * some event we can't execute at now.
540 * Stop if we looked far enough into the future.
542 } while (((int)(firstb - lastb)) <= 0);
543 cc->cc_firstevent = last;
544 cpu_new_callout(curcpu, last, first);
546 #ifdef CALLOUT_PROFILING
547 avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8;
548 avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8;
549 avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8;
551 if (!TAILQ_EMPTY(&cc->cc_expireq)) {
553 entropy.td = curthread;
555 random_harvest_queue(&entropy, sizeof(entropy), RANDOM_CALLOUT);
558 if (TD_AWAITING_INTR(td)) {
559 thread_lock_block_wait(td);
560 THREAD_LOCK_ASSERT(td, MA_OWNED);
562 sched_add(td, SRQ_INTR);
564 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
566 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
569 static struct callout_cpu *
570 callout_lock(struct callout *c)
572 struct callout_cpu *cc;
578 if (cpu == CPUBLOCK) {
579 while (c->c_cpu == CPUBLOCK)
594 callout_cc_add(struct callout *c, struct callout_cpu *cc,
595 sbintime_t sbt, sbintime_t precision, void (*func)(void *),
596 void *arg, int cpu, int flags)
601 if (sbt < cc->cc_lastscan)
602 sbt = cc->cc_lastscan;
604 c->c_iflags |= CALLOUT_PENDING;
605 c->c_iflags &= ~CALLOUT_PROCESSED;
606 c->c_flags |= CALLOUT_ACTIVE;
607 if (flags & C_DIRECT_EXEC)
608 c->c_iflags |= CALLOUT_DIRECT;
611 c->c_precision = precision;
612 bucket = callout_get_bucket(c->c_time);
613 CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x",
614 c, (int)(c->c_precision >> 32),
615 (u_int)(c->c_precision & 0xffffffff));
616 LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le);
617 if (cc->cc_bucket == bucket)
618 cc_exec_next(cc) = c;
621 * Inform the eventtimers(4) subsystem there's a new callout
622 * that has been inserted, but only if really required.
624 if (SBT_MAX - c->c_time < c->c_precision)
625 c->c_precision = SBT_MAX - c->c_time;
626 sbt = c->c_time + c->c_precision;
627 if (sbt < cc->cc_firstevent) {
628 cc->cc_firstevent = sbt;
629 cpu_new_callout(cpu, sbt, c->c_time);
634 softclock_call_cc(struct callout *c, struct callout_cpu *cc,
635 #ifdef CALLOUT_PROFILING
636 int *mpcalls, int *lockcalls, int *gcalls,
640 struct rm_priotracker tracker;
641 callout_func_t *c_func, *drain;
643 struct lock_class *class;
644 struct lock_object *c_lock;
645 uintptr_t lock_status;
648 struct callout_cpu *new_cc;
649 callout_func_t *new_func;
652 sbintime_t new_prec, new_time;
654 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
655 sbintime_t sbt1, sbt2;
657 static sbintime_t maxdt = 2 * SBT_1MS; /* 2 msec */
658 static callout_func_t *lastfunc;
661 KASSERT((c->c_iflags & CALLOUT_PENDING) == CALLOUT_PENDING,
662 ("softclock_call_cc: pend %p %x", c, c->c_iflags));
663 KASSERT((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE,
664 ("softclock_call_cc: act %p %x", c, c->c_flags));
665 class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL;
667 if (c->c_iflags & CALLOUT_SHAREDLOCK) {
668 if (class == &lock_class_rm)
669 lock_status = (uintptr_t)&tracker;
676 c_iflags = c->c_iflags;
677 c->c_iflags &= ~CALLOUT_PENDING;
679 cc_exec_curr(cc, direct) = c;
680 cc_exec_last_func(cc, direct) = c_func;
681 cc_exec_last_arg(cc, direct) = c_arg;
682 cc_exec_cancel(cc, direct) = false;
683 cc_exec_drain(cc, direct) = NULL;
685 if (c_lock != NULL) {
686 class->lc_lock(c_lock, lock_status);
688 * The callout may have been cancelled
689 * while we switched locks.
691 if (cc_exec_cancel(cc, direct)) {
692 class->lc_unlock(c_lock);
695 /* The callout cannot be stopped now. */
696 cc_exec_cancel(cc, direct) = true;
697 if (c_lock == &Giant.lock_object) {
698 #ifdef CALLOUT_PROFILING
701 CTR3(KTR_CALLOUT, "callout giant %p func %p arg %p",
704 #ifdef CALLOUT_PROFILING
707 CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p",
711 #ifdef CALLOUT_PROFILING
714 CTR3(KTR_CALLOUT, "callout %p func %p arg %p",
717 KTR_STATE3(KTR_SCHED, "callout", cc->cc_ktr_event_name, "running",
718 "func:%p", c_func, "arg:%p", c_arg, "direct:%d", direct);
719 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
722 THREAD_NO_SLEEPING();
723 SDT_PROBE1(callout_execute, , , callout__start, c);
725 SDT_PROBE1(callout_execute, , , callout__end, c);
726 THREAD_SLEEPING_OK();
727 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
731 if (lastfunc != c_func || sbt2 > maxdt * 2) {
734 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
735 c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec);
741 KTR_STATE0(KTR_SCHED, "callout", cc->cc_ktr_event_name, "idle");
742 CTR1(KTR_CALLOUT, "callout %p finished", c);
743 if ((c_iflags & CALLOUT_RETURNUNLOCKED) == 0)
744 class->lc_unlock(c_lock);
747 KASSERT(cc_exec_curr(cc, direct) == c, ("mishandled cc_curr"));
748 cc_exec_curr(cc, direct) = NULL;
749 if (cc_exec_drain(cc, direct)) {
750 drain = cc_exec_drain(cc, direct);
751 cc_exec_drain(cc, direct) = NULL;
756 if (cc_exec_waiting(cc, direct)) {
758 * There is someone waiting for the
759 * callout to complete.
760 * If the callout was scheduled for
761 * migration just cancel it.
763 if (cc_cce_migrating(cc, direct)) {
764 cc_cce_cleanup(cc, direct);
767 * It should be assert here that the callout is not
768 * destroyed but that is not easy.
770 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
772 cc_exec_waiting(cc, direct) = false;
774 wakeup(&cc_exec_waiting(cc, direct));
776 } else if (cc_cce_migrating(cc, direct)) {
779 * If the callout was scheduled for
780 * migration just perform it now.
782 new_cpu = cc_migration_cpu(cc, direct);
783 new_time = cc_migration_time(cc, direct);
784 new_prec = cc_migration_prec(cc, direct);
785 new_func = cc_migration_func(cc, direct);
786 new_arg = cc_migration_arg(cc, direct);
787 cc_cce_cleanup(cc, direct);
790 * It should be assert here that the callout is not destroyed
791 * but that is not easy.
793 * As first thing, handle deferred callout stops.
795 if (!callout_migrating(c)) {
797 "deferred cancelled %p func %p arg %p",
798 c, new_func, new_arg);
801 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
803 new_cc = callout_cpu_switch(c, cc, new_cpu);
804 flags = (direct) ? C_DIRECT_EXEC : 0;
805 callout_cc_add(c, new_cc, new_time, new_prec, new_func,
806 new_arg, new_cpu, flags);
810 panic("migration should not happen");
816 * The callout mechanism is based on the work of Adam M. Costello and
817 * George Varghese, published in a technical report entitled "Redesigning
818 * the BSD Callout and Timer Facilities" and modified slightly for inclusion
819 * in FreeBSD by Justin T. Gibbs. The original work on the data structures
820 * used in this implementation was published by G. Varghese and T. Lauck in
821 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
822 * the Efficient Implementation of a Timer Facility" in the Proceedings of
823 * the 11th ACM Annual Symposium on Operating Systems Principles,
824 * Austin, Texas Nov 1987.
828 * Software (low priority) clock interrupt thread handler.
829 * Run periodic events from timeout queue.
832 softclock_thread(void *arg)
834 struct thread *td = curthread;
835 struct callout_cpu *cc;
837 #ifdef CALLOUT_PROFILING
838 int depth, gcalls, lockcalls, mpcalls;
841 cc = (struct callout_cpu *)arg;
844 while (TAILQ_EMPTY(&cc->cc_expireq)) {
846 * Use CC_LOCK(cc) as the thread_lock while
850 thread_lock_set(td, (struct mtx *)&cc->cc_lock);
852 mi_switch(SW_VOL | SWT_IWAIT);
854 /* mi_switch() drops thread_lock(). */
858 #ifdef CALLOUT_PROFILING
859 depth = gcalls = lockcalls = mpcalls = 0;
861 while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) {
862 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
863 softclock_call_cc(c, cc,
864 #ifdef CALLOUT_PROFILING
865 &mpcalls, &lockcalls, &gcalls,
868 #ifdef CALLOUT_PROFILING
872 #ifdef CALLOUT_PROFILING
873 avg_depth += (depth * 1000 - avg_depth) >> 8;
874 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
875 avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8;
876 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
882 callout_when(sbintime_t sbt, sbintime_t precision, int flags,
883 sbintime_t *res, sbintime_t *prec_res)
885 sbintime_t to_sbt, to_pr;
887 if ((flags & (C_ABSOLUTE | C_PRECALC)) != 0) {
889 *prec_res = precision;
892 if ((flags & C_HARDCLOCK) != 0 && sbt < tick_sbt)
894 if ((flags & C_HARDCLOCK) != 0 || sbt >= sbt_tickthreshold) {
896 * Obtain the time of the last hardclock() call on
897 * this CPU directly from the kern_clocksource.c.
898 * This value is per-CPU, but it is equal for all
902 to_sbt = DPCPU_GET(hardclocktime);
905 to_sbt = DPCPU_GET(hardclocktime);
908 if (cold && to_sbt == 0)
909 to_sbt = sbinuptime();
910 if ((flags & C_HARDCLOCK) == 0)
913 to_sbt = sbinuptime();
914 if (SBT_MAX - to_sbt < sbt)
919 to_pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp :
920 sbt >> C_PRELGET(flags));
921 *prec_res = to_pr > precision ? to_pr : precision;
925 * New interface; clients allocate their own callout structures.
927 * callout_reset() - establish or change a timeout
928 * callout_stop() - disestablish a timeout
929 * callout_init() - initialize a callout structure so that it can
930 * safely be passed to callout_reset() and callout_stop()
932 * <sys/callout.h> defines three convenience macros:
934 * callout_active() - returns truth if callout has not been stopped,
935 * drained, or deactivated since the last time the callout was
937 * callout_pending() - returns truth if callout is still waiting for timeout
938 * callout_deactivate() - marks the callout as having been serviced
941 callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t prec,
942 callout_func_t *ftn, void *arg, int cpu, int flags)
944 sbintime_t to_sbt, precision;
945 struct callout_cpu *cc;
946 int cancelled, direct;
949 callout_when(sbt, prec, flags, &to_sbt, &precision);
952 * This flag used to be added by callout_cc_add, but the
953 * first time you call this we could end up with the
954 * wrong direct flag if we don't do it before we add.
956 if (flags & C_DIRECT_EXEC) {
961 KASSERT(!direct || c->c_lock == NULL ||
962 (LOCK_CLASS(c->c_lock)->lc_flags & LC_SPINLOCK),
963 ("%s: direct callout %p has non-spin lock", __func__, c));
965 cc = callout_lock(c);
968 KASSERT(cpu >= 0 && cpu <= mp_maxid && !CPU_ABSENT(cpu),
969 ("%s: invalid cpu %d", __func__, cpu));
971 if (cc_exec_curr(cc, direct) == c) {
973 * We're being asked to reschedule a callout which is
974 * currently in progress. If there is a lock then we
975 * can cancel the callout if it has not really started.
977 if (c->c_lock != NULL && !cc_exec_cancel(cc, direct))
978 cancelled = cc_exec_cancel(cc, direct) = true;
979 if (cc_exec_waiting(cc, direct) || cc_exec_drain(cc, direct)) {
981 * Someone has called callout_drain to kill this
982 * callout. Don't reschedule.
984 CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
985 cancelled ? "cancelled" : "failed to cancel",
986 c, c->c_func, c->c_arg);
991 if (callout_migrating(c)) {
993 * This only occurs when a second callout_reset_sbt_on
994 * is made after a previous one moved it into
995 * deferred migration (below). Note we do *not* change
996 * the prev_cpu even though the previous target may
999 cc_migration_cpu(cc, direct) = cpu;
1000 cc_migration_time(cc, direct) = to_sbt;
1001 cc_migration_prec(cc, direct) = precision;
1002 cc_migration_func(cc, direct) = ftn;
1003 cc_migration_arg(cc, direct) = arg;
1010 if (c->c_iflags & CALLOUT_PENDING) {
1011 if ((c->c_iflags & CALLOUT_PROCESSED) == 0) {
1012 if (cc_exec_next(cc) == c)
1013 cc_exec_next(cc) = LIST_NEXT(c, c_links.le);
1014 LIST_REMOVE(c, c_links.le);
1016 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
1019 c->c_iflags &= ~ CALLOUT_PENDING;
1020 c->c_flags &= ~ CALLOUT_ACTIVE;
1025 * If the callout must migrate try to perform it immediately.
1026 * If the callout is currently running, just defer the migration
1027 * to a more appropriate moment.
1029 if (c->c_cpu != cpu) {
1030 if (cc_exec_curr(cc, direct) == c) {
1032 * Pending will have been removed since we are
1033 * actually executing the callout on another
1034 * CPU. That callout should be waiting on the
1035 * lock the caller holds. If we set both
1036 * active/and/pending after we return and the
1037 * lock on the executing callout proceeds, it
1038 * will then see pending is true and return.
1039 * At the return from the actual callout execution
1040 * the migration will occur in softclock_call_cc
1041 * and this new callout will be placed on the
1042 * new CPU via a call to callout_cpu_switch() which
1043 * will get the lock on the right CPU followed
1044 * by a call callout_cc_add() which will add it there.
1045 * (see above in softclock_call_cc()).
1047 cc_migration_cpu(cc, direct) = cpu;
1048 cc_migration_time(cc, direct) = to_sbt;
1049 cc_migration_prec(cc, direct) = precision;
1050 cc_migration_func(cc, direct) = ftn;
1051 cc_migration_arg(cc, direct) = arg;
1052 c->c_iflags |= (CALLOUT_DFRMIGRATION | CALLOUT_PENDING);
1053 c->c_flags |= CALLOUT_ACTIVE;
1055 "migration of %p func %p arg %p in %d.%08x to %u deferred",
1056 c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
1057 (u_int)(to_sbt & 0xffffffff), cpu);
1061 cc = callout_cpu_switch(c, cc, cpu);
1065 callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags);
1066 CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x",
1067 cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
1068 (u_int)(to_sbt & 0xffffffff));
1075 * Common idioms that can be optimized in the future.
1078 callout_schedule_on(struct callout *c, int to_ticks, int cpu)
1080 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu);
1084 callout_schedule(struct callout *c, int to_ticks)
1086 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu);
1090 _callout_stop_safe(struct callout *c, int flags, callout_func_t *drain)
1092 struct callout_cpu *cc, *old_cc;
1093 struct lock_class *class;
1094 int direct, sq_locked, use_lock;
1095 int cancelled, not_on_a_list;
1097 if ((flags & CS_DRAIN) != 0)
1098 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, c->c_lock,
1099 "calling %s", __func__);
1101 KASSERT((flags & CS_DRAIN) == 0 || drain == NULL,
1102 ("Cannot set drain callback and CS_DRAIN flag at the same time"));
1105 * Some old subsystems don't hold Giant while running a callout_stop(),
1106 * so just discard this check for the moment.
1108 if ((flags & CS_DRAIN) == 0 && c->c_lock != NULL) {
1109 if (c->c_lock == &Giant.lock_object)
1110 use_lock = mtx_owned(&Giant);
1113 class = LOCK_CLASS(c->c_lock);
1114 class->lc_assert(c->c_lock, LA_XLOCKED);
1118 if (c->c_iflags & CALLOUT_DIRECT) {
1126 cc = callout_lock(c);
1128 if ((c->c_iflags & (CALLOUT_DFRMIGRATION | CALLOUT_PENDING)) ==
1129 (CALLOUT_DFRMIGRATION | CALLOUT_PENDING) &&
1130 ((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE)) {
1132 * Special case where this slipped in while we
1133 * were migrating *as* the callout is about to
1134 * execute. The caller probably holds the lock
1135 * the callout wants.
1137 * Get rid of the migration first. Then set
1138 * the flag that tells this code *not* to
1139 * try to remove it from any lists (its not
1140 * on one yet). When the callout wheel runs,
1141 * it will ignore this callout.
1143 c->c_iflags &= ~CALLOUT_PENDING;
1144 c->c_flags &= ~CALLOUT_ACTIVE;
1151 * If the callout was migrating while the callout cpu lock was
1152 * dropped, just drop the sleepqueue lock and check the states
1155 if (sq_locked != 0 && cc != old_cc) {
1158 sleepq_release(&cc_exec_waiting(old_cc, direct));
1163 panic("migration should not happen");
1168 * If the callout is running, try to stop it or drain it.
1170 if (cc_exec_curr(cc, direct) == c) {
1172 * Succeed we to stop it or not, we must clear the
1173 * active flag - this is what API users expect. If we're
1174 * draining and the callout is currently executing, first wait
1175 * until it finishes.
1177 if ((flags & CS_DRAIN) == 0)
1178 c->c_flags &= ~CALLOUT_ACTIVE;
1180 if ((flags & CS_DRAIN) != 0) {
1182 * The current callout is running (or just
1183 * about to run) and blocking is allowed, so
1184 * just wait for the current invocation to
1187 if (cc_exec_curr(cc, direct) == c) {
1189 * Use direct calls to sleepqueue interface
1190 * instead of cv/msleep in order to avoid
1191 * a LOR between cc_lock and sleepqueue
1192 * chain spinlocks. This piece of code
1193 * emulates a msleep_spin() call actually.
1195 * If we already have the sleepqueue chain
1196 * locked, then we can safely block. If we
1197 * don't already have it locked, however,
1198 * we have to drop the cc_lock to lock
1199 * it. This opens several races, so we
1200 * restart at the beginning once we have
1201 * both locks. If nothing has changed, then
1202 * we will end up back here with sq_locked
1208 &cc_exec_waiting(cc, direct));
1215 * Migration could be cancelled here, but
1216 * as long as it is still not sure when it
1217 * will be packed up, just let softclock()
1220 cc_exec_waiting(cc, direct) = true;
1224 &cc_exec_waiting(cc, direct),
1225 &cc->cc_lock.lock_object, "codrain",
1228 &cc_exec_waiting(cc, direct),
1233 /* Reacquire locks previously released. */
1237 c->c_flags &= ~CALLOUT_ACTIVE;
1238 } else if (use_lock &&
1239 !cc_exec_cancel(cc, direct) && (drain == NULL)) {
1242 * The current callout is waiting for its
1243 * lock which we hold. Cancel the callout
1244 * and return. After our caller drops the
1245 * lock, the callout will be skipped in
1246 * softclock(). This *only* works with a
1247 * callout_stop() *not* callout_drain() or
1248 * callout_async_drain().
1250 cc_exec_cancel(cc, direct) = true;
1251 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
1252 c, c->c_func, c->c_arg);
1253 KASSERT(!cc_cce_migrating(cc, direct),
1254 ("callout wrongly scheduled for migration"));
1255 if (callout_migrating(c)) {
1256 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
1258 cc_migration_cpu(cc, direct) = CPUBLOCK;
1259 cc_migration_time(cc, direct) = 0;
1260 cc_migration_prec(cc, direct) = 0;
1261 cc_migration_func(cc, direct) = NULL;
1262 cc_migration_arg(cc, direct) = NULL;
1266 KASSERT(!sq_locked, ("sleepqueue chain locked"));
1268 } else if (callout_migrating(c)) {
1270 * The callout is currently being serviced
1271 * and the "next" callout is scheduled at
1272 * its completion with a migration. We remove
1273 * the migration flag so it *won't* get rescheduled,
1274 * but we can't stop the one thats running so
1277 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
1280 * We can't call cc_cce_cleanup here since
1281 * if we do it will remove .ce_curr and
1282 * its still running. This will prevent a
1283 * reschedule of the callout when the
1284 * execution completes.
1286 cc_migration_cpu(cc, direct) = CPUBLOCK;
1287 cc_migration_time(cc, direct) = 0;
1288 cc_migration_prec(cc, direct) = 0;
1289 cc_migration_func(cc, direct) = NULL;
1290 cc_migration_arg(cc, direct) = NULL;
1292 CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p",
1293 c, c->c_func, c->c_arg);
1295 KASSERT(cc_exec_drain(cc, direct) == NULL,
1296 ("callout drain function already set to %p",
1297 cc_exec_drain(cc, direct)));
1298 cc_exec_drain(cc, direct) = drain;
1303 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
1304 c, c->c_func, c->c_arg);
1306 KASSERT(cc_exec_drain(cc, direct) == NULL,
1307 ("callout drain function already set to %p",
1308 cc_exec_drain(cc, direct)));
1309 cc_exec_drain(cc, direct) = drain;
1312 KASSERT(!sq_locked, ("sleepqueue chain still locked"));
1318 sleepq_release(&cc_exec_waiting(cc, direct));
1320 if ((c->c_iflags & CALLOUT_PENDING) == 0) {
1321 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
1322 c, c->c_func, c->c_arg);
1324 * For not scheduled and not executing callout return
1327 if (cc_exec_curr(cc, direct) != c)
1333 c->c_iflags &= ~CALLOUT_PENDING;
1334 c->c_flags &= ~CALLOUT_ACTIVE;
1336 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
1337 c, c->c_func, c->c_arg);
1338 if (not_on_a_list == 0) {
1339 if ((c->c_iflags & CALLOUT_PROCESSED) == 0) {
1340 if (cc_exec_next(cc) == c)
1341 cc_exec_next(cc) = LIST_NEXT(c, c_links.le);
1342 LIST_REMOVE(c, c_links.le);
1344 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
1352 callout_init(struct callout *c, int mpsafe)
1354 bzero(c, sizeof *c);
1357 c->c_iflags = CALLOUT_RETURNUNLOCKED;
1359 c->c_lock = &Giant.lock_object;
1362 c->c_cpu = cc_default_cpu;
1366 _callout_init_lock(struct callout *c, struct lock_object *lock, int flags)
1368 bzero(c, sizeof *c);
1370 KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0,
1371 ("callout_init_lock: bad flags %d", flags));
1372 KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
1373 ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
1374 KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & LC_SLEEPABLE),
1375 ("%s: callout %p has sleepable lock", __func__, c));
1376 c->c_iflags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
1377 c->c_cpu = cc_default_cpu;
1381 flssbt(sbintime_t sbt)
1384 sbt += (uint64_t)sbt >> 1;
1385 if (sizeof(long) >= sizeof(sbintime_t))
1388 return (flsl(((uint64_t)sbt) >> 32) + 32);
1393 * Dump immediate statistic snapshot of the scheduled callouts.
1396 sysctl_kern_callout_stat(SYSCTL_HANDLER_ARGS)
1398 struct callout *tmp;
1399 struct callout_cpu *cc;
1400 struct callout_list *sc;
1401 sbintime_t maxpr, maxt, medpr, medt, now, spr, st, t;
1402 int ct[64], cpr[64], ccpbk[32];
1403 int error, val, i, count, tcum, pcum, maxc, c, medc;
1407 error = sysctl_handle_int(oidp, &val, 0, req);
1408 if (error != 0 || req->newptr == NULL)
1411 st = spr = maxt = maxpr = 0;
1412 bzero(ccpbk, sizeof(ccpbk));
1413 bzero(ct, sizeof(ct));
1414 bzero(cpr, sizeof(cpr));
1419 for (i = 0; i < callwheelsize; i++) {
1420 sc = &cc->cc_callwheel[i];
1422 LIST_FOREACH(tmp, sc, c_links.le) {
1424 t = tmp->c_time - now;
1428 spr += tmp->c_precision / SBT_1US;
1431 if (tmp->c_precision > maxpr)
1432 maxpr = tmp->c_precision;
1434 cpr[flssbt(tmp->c_precision)]++;
1438 ccpbk[fls(c + c / 2)]++;
1444 for (i = 0, tcum = 0; i < 64 && tcum < count / 2; i++)
1446 medt = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
1447 for (i = 0, pcum = 0; i < 64 && pcum < count / 2; i++)
1449 medpr = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
1450 for (i = 0, c = 0; i < 32 && c < count / 2; i++)
1452 medc = (i >= 2) ? (1 << (i - 2)) : 0;
1454 printf("Scheduled callouts statistic snapshot:\n");
1455 printf(" Callouts: %6d Buckets: %6d*%-3d Bucket size: 0.%06ds\n",
1456 count, callwheelsize, mp_ncpus, 1000000 >> CC_HASH_SHIFT);
1457 printf(" C/Bk: med %5d avg %6d.%06jd max %6d\n",
1459 count / callwheelsize / mp_ncpus,
1460 (uint64_t)count * 1000000 / callwheelsize / mp_ncpus % 1000000,
1462 printf(" Time: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
1463 medt / SBT_1S, (medt & 0xffffffff) * 1000000 >> 32,
1464 (st / count) / 1000000, (st / count) % 1000000,
1465 maxt / SBT_1S, (maxt & 0xffffffff) * 1000000 >> 32);
1466 printf(" Prec: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
1467 medpr / SBT_1S, (medpr & 0xffffffff) * 1000000 >> 32,
1468 (spr / count) / 1000000, (spr / count) % 1000000,
1469 maxpr / SBT_1S, (maxpr & 0xffffffff) * 1000000 >> 32);
1470 printf(" Distribution: \tbuckets\t time\t tcum\t"
1472 for (i = 0, tcum = pcum = 0; i < 64; i++) {
1473 if (ct[i] == 0 && cpr[i] == 0)
1475 t = (i != 0) ? (((sbintime_t)1) << (i - 1)) : 0;
1478 printf(" %10jd.%06jds\t 2**%d\t%7d\t%7d\t%7d\t%7d\n",
1479 t / SBT_1S, (t & 0xffffffff) * 1000000 >> 32,
1480 i - 1 - (32 - CC_HASH_SHIFT),
1481 ct[i], tcum, cpr[i], pcum);
1485 SYSCTL_PROC(_kern, OID_AUTO, callout_stat,
1486 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1487 0, 0, sysctl_kern_callout_stat, "I",
1488 "Dump immediate statistic snapshot of the scheduled callouts");
1492 _show_callout(struct callout *c)
1495 db_printf("callout %p\n", c);
1496 #define C_DB_PRINTF(f, e) db_printf(" %s = " f "\n", #e, c->e);
1497 db_printf(" &c_links = %p\n", &(c->c_links));
1498 C_DB_PRINTF("%" PRId64, c_time);
1499 C_DB_PRINTF("%" PRId64, c_precision);
1500 C_DB_PRINTF("%p", c_arg);
1501 C_DB_PRINTF("%p", c_func);
1502 C_DB_PRINTF("%p", c_lock);
1503 C_DB_PRINTF("%#x", c_flags);
1504 C_DB_PRINTF("%#x", c_iflags);
1505 C_DB_PRINTF("%d", c_cpu);
1509 DB_SHOW_COMMAND(callout, db_show_callout)
1513 db_printf("usage: show callout <struct callout *>\n");
1517 _show_callout((struct callout *)addr);
1521 _show_last_callout(int cpu, int direct, const char *dirstr)
1523 struct callout_cpu *cc;
1527 func = cc_exec_last_func(cc, direct);
1528 arg = cc_exec_last_arg(cc, direct);
1529 db_printf("cpu %d last%s callout function: %p ", cpu, dirstr, func);
1530 db_printsym((db_expr_t)func, DB_STGY_ANY);
1531 db_printf("\ncpu %d last%s callout argument: %p\n", cpu, dirstr, arg);
1534 DB_SHOW_COMMAND(callout_last, db_show_callout_last)
1539 if (addr < 0 || addr > mp_maxid || CPU_ABSENT(addr)) {
1540 db_printf("no such cpu: %d\n", (int)addr);
1549 while (cpu <= last) {
1550 if (!CPU_ABSENT(cpu)) {
1551 _show_last_callout(cpu, 0, "");
1552 _show_last_callout(cpu, 1, " direct");