2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
40 #include "opt_kdtrace.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
45 #include <sys/callout.h>
46 #include <sys/condvar.h>
47 #include <sys/interrupt.h>
48 #include <sys/kernel.h>
51 #include <sys/malloc.h>
52 #include <sys/mutex.h>
55 #include <sys/sleepqueue.h>
56 #include <sys/sysctl.h>
60 #include <machine/cpu.h>
63 SDT_PROVIDER_DEFINE(callout_execute);
64 SDT_PROBE_DEFINE(callout_execute, kernel, , callout_start, callout-start);
65 SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_start, 0,
67 SDT_PROBE_DEFINE(callout_execute, kernel, , callout_end, callout-end);
68 SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_end, 0,
72 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
73 "Average number of items examined per softclock call. Units = 1/1000");
74 static int avg_gcalls;
75 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
76 "Average number of Giant callouts made per softclock call. Units = 1/1000");
77 static int avg_lockcalls;
78 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0,
79 "Average number of lock callouts made per softclock call. Units = 1/1000");
80 static int avg_mpcalls;
81 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
82 "Average number of MP callouts made per softclock call. Units = 1/1000");
85 * allocate more timeout table slots when table overflows.
87 int callwheelsize, callwheelbits, callwheelmask;
90 * The callout cpu migration entity represents informations necessary for
91 * describing the migrating callout to the new callout cpu.
92 * The cached informations are very important for deferring migration when
93 * the migrating callout is already running.
97 void (*ce_migration_func)(void *);
98 void *ce_migration_arg;
100 int ce_migration_ticks;
105 * There is one struct callout_cpu per cpu, holding all relevant
106 * state for the callout processing thread on the individual CPU.
108 * cc_ticks is incremented once per tick in callout_cpu().
109 * It tracks the global 'ticks' but in a way that the individual
110 * threads should not worry about races in the order in which
111 * hardclock() and hardclock_cpu() run on the various CPUs.
112 * cc_softclock is advanced in callout_cpu() to point to the
113 * first entry in cc_callwheel that may need handling. In turn,
114 * a softclock() is scheduled so it can serve the various entries i
115 * such that cc_softclock <= i <= cc_ticks .
116 * XXX maybe cc_softclock and cc_ticks should be volatile ?
118 * cc_ticks is also used in callout_reset_cpu() to determine
119 * when the callout should be served.
122 struct cc_mig_ent cc_migrating_entity;
124 struct callout *cc_callout;
125 struct callout_tailq *cc_callwheel;
126 struct callout_list cc_callfree;
127 struct callout *cc_next;
128 struct callout *cc_curr;
138 #define cc_migration_func cc_migrating_entity.ce_migration_func
139 #define cc_migration_arg cc_migrating_entity.ce_migration_arg
140 #define cc_migration_cpu cc_migrating_entity.ce_migration_cpu
141 #define cc_migration_ticks cc_migrating_entity.ce_migration_ticks
143 struct callout_cpu cc_cpu[MAXCPU];
144 #define CPUBLOCK MAXCPU
145 #define CC_CPU(cpu) (&cc_cpu[(cpu)])
146 #define CC_SELF() CC_CPU(PCPU_GET(cpuid))
148 struct callout_cpu cc_cpu;
149 #define CC_CPU(cpu) &cc_cpu
150 #define CC_SELF() &cc_cpu
152 #define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock)
153 #define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock)
154 #define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED)
156 static int timeout_cpu;
157 void (*callout_new_inserted)(int cpu, int ticks) = NULL;
159 static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
163 * cc_curr - If a callout is in progress, it is curr_callout.
164 * If curr_callout is non-NULL, threads waiting in
165 * callout_drain() will be woken up as soon as the
166 * relevant callout completes.
167 * cc_cancel - Changing to 1 with both callout_lock and c_lock held
168 * guarantees that the current callout will not run.
169 * The softclock() function sets this to 0 before it
170 * drops callout_lock to acquire c_lock, and it calls
171 * the handler only if curr_cancelled is still 0 after
172 * c_lock is successfully acquired.
173 * cc_waiting - If a thread is waiting in callout_drain(), then
174 * callout_wait is nonzero. Set only when
175 * curr_callout is non-NULL.
179 * Resets the migration entity tied to a specific callout cpu.
182 cc_cme_cleanup(struct callout_cpu *cc)
186 cc->cc_migration_cpu = CPUBLOCK;
187 cc->cc_migration_ticks = 0;
188 cc->cc_migration_func = NULL;
189 cc->cc_migration_arg = NULL;
194 * Checks if migration is requested by a specific callout cpu.
197 cc_cme_migrating(struct callout_cpu *cc)
201 return (cc->cc_migration_cpu != CPUBLOCK);
208 * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
210 * This code is called very early in the kernel initialization sequence,
211 * and may be called more then once.
214 kern_timeout_callwheel_alloc(caddr_t v)
216 struct callout_cpu *cc;
218 timeout_cpu = PCPU_GET(cpuid);
219 cc = CC_CPU(timeout_cpu);
221 * Calculate callout wheel size
223 for (callwheelsize = 1, callwheelbits = 0;
224 callwheelsize < ncallout;
225 callwheelsize <<= 1, ++callwheelbits)
227 callwheelmask = callwheelsize - 1;
229 cc->cc_callout = (struct callout *)v;
230 v = (caddr_t)(cc->cc_callout + ncallout);
231 cc->cc_callwheel = (struct callout_tailq *)v;
232 v = (caddr_t)(cc->cc_callwheel + callwheelsize);
237 callout_cpu_init(struct callout_cpu *cc)
242 mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
243 SLIST_INIT(&cc->cc_callfree);
244 for (i = 0; i < callwheelsize; i++) {
245 TAILQ_INIT(&cc->cc_callwheel[i]);
248 if (cc->cc_callout == NULL)
250 for (i = 0; i < ncallout; i++) {
251 c = &cc->cc_callout[i];
253 c->c_flags = CALLOUT_LOCAL_ALLOC;
254 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
260 * Switches the cpu tied to a specific callout.
261 * The function expects a locked incoming callout cpu and returns with
262 * locked outcoming callout cpu.
264 static struct callout_cpu *
265 callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu)
267 struct callout_cpu *new_cc;
269 MPASS(c != NULL && cc != NULL);
273 * Avoid interrupts and preemption firing after the callout cpu
274 * is blocked in order to avoid deadlocks as the new thread
275 * may be willing to acquire the callout cpu lock.
280 new_cc = CC_CPU(new_cpu);
289 * kern_timeout_callwheel_init() - initialize previously reserved callwheel
292 * This code is called just once, after the space reserved for the
293 * callout wheel has been finalized.
296 kern_timeout_callwheel_init(void)
298 callout_cpu_init(CC_CPU(timeout_cpu));
302 * Start standard softclock thread.
305 start_softclock(void *dummy)
307 struct callout_cpu *cc;
312 cc = CC_CPU(timeout_cpu);
313 if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK,
314 INTR_MPSAFE, &cc->cc_cookie))
315 panic("died while creating standard software ithreads");
318 if (cpu == timeout_cpu)
321 if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK,
322 INTR_MPSAFE, &cc->cc_cookie))
323 panic("died while creating standard software ithreads");
324 cc->cc_callout = NULL; /* Only cpu0 handles timeout(). */
325 cc->cc_callwheel = malloc(
326 sizeof(struct callout_tailq) * callwheelsize, M_CALLOUT,
328 callout_cpu_init(cc);
333 SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL);
338 struct callout_cpu *cc;
343 * Process callouts at a very low cpu priority, so we don't keep the
344 * relatively high clock interrupt priority any longer than necessary.
348 mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
349 cc->cc_firsttick = cc->cc_ticks = ticks;
350 for (; (cc->cc_softticks - cc->cc_ticks) <= 0; cc->cc_softticks++) {
351 bucket = cc->cc_softticks & callwheelmask;
352 if (!TAILQ_EMPTY(&cc->cc_callwheel[bucket])) {
357 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
359 * swi_sched acquires the thread lock, so we don't want to call it
360 * with cc_lock held; incorrect locking order.
363 swi_sched(cc->cc_cookie, 0);
367 callout_tickstofirst(int limit)
369 struct callout_cpu *cc;
371 struct callout_tailq *sc;
376 mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
377 curticks = cc->cc_ticks;
378 while( skip < ncallout && skip < limit ) {
379 sc = &cc->cc_callwheel[ (curticks+skip) & callwheelmask ];
380 /* search scanning ticks */
381 TAILQ_FOREACH( c, sc, c_links.tqe ){
382 if (c->c_time - curticks <= ncallout)
388 cc->cc_firsttick = curticks + skip;
389 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
393 static struct callout_cpu *
394 callout_lock(struct callout *c)
396 struct callout_cpu *cc;
402 if (cpu == CPUBLOCK) {
403 while (c->c_cpu == CPUBLOCK)
418 callout_cc_add(struct callout *c, struct callout_cpu *cc, int to_ticks,
419 void (*func)(void *), void *arg, int cpu)
427 c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
429 c->c_time = ticks + to_ticks;
430 TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask],
432 if ((c->c_time - cc->cc_firsttick) < 0 &&
433 callout_new_inserted != NULL) {
434 cc->cc_firsttick = c->c_time;
435 (*callout_new_inserted)(cpu,
436 to_ticks + (ticks - cc->cc_ticks));
441 callout_cc_del(struct callout *c, struct callout_cpu *cc)
444 if ((c->c_flags & CALLOUT_LOCAL_ALLOC) == 0)
447 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
451 softclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls,
452 int *lockcalls, int *gcalls)
454 void (*c_func)(void *);
456 struct lock_class *class;
457 struct lock_object *c_lock;
458 int c_flags, sharedlock;
460 struct callout_cpu *new_cc;
461 void (*new_func)(void *);
463 int new_cpu, new_ticks;
466 struct bintime bt1, bt2;
468 static uint64_t maxdt = 36893488147419102LL; /* 2 msec */
469 static timeout_t *lastfunc;
472 KASSERT((c->c_flags & (CALLOUT_PENDING | CALLOUT_ACTIVE)) ==
473 (CALLOUT_PENDING | CALLOUT_ACTIVE),
474 ("softclock_call_cc: pend|act %p %x", c, c->c_flags));
475 class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL;
476 sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 0 : 1;
480 c_flags = c->c_flags;
481 if (c->c_flags & CALLOUT_LOCAL_ALLOC)
482 c->c_flags = CALLOUT_LOCAL_ALLOC;
484 c->c_flags &= ~CALLOUT_PENDING;
488 if (c_lock != NULL) {
489 class->lc_lock(c_lock, sharedlock);
491 * The callout may have been cancelled
492 * while we switched locks.
495 class->lc_unlock(c_lock);
498 /* The callout cannot be stopped now. */
501 if (c_lock == &Giant.lock_object) {
503 CTR3(KTR_CALLOUT, "callout %p func %p arg %p",
507 CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p",
512 CTR3(KTR_CALLOUT, "callout mpsafe %p func %p arg %p",
518 THREAD_NO_SLEEPING();
519 SDT_PROBE(callout_execute, kernel, , callout_start, c, 0, 0, 0, 0);
521 SDT_PROBE(callout_execute, kernel, , callout_end, c, 0, 0, 0, 0);
522 THREAD_SLEEPING_OK();
525 bintime_sub(&bt2, &bt1);
526 if (bt2.frac > maxdt) {
527 if (lastfunc != c_func || bt2.frac > maxdt * 2) {
528 bintime2timespec(&bt2, &ts2);
530 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
531 c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec);
537 CTR1(KTR_CALLOUT, "callout %p finished", c);
538 if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
539 class->lc_unlock(c_lock);
542 KASSERT(cc->cc_curr == c, ("mishandled cc_curr"));
544 if (cc->cc_waiting) {
546 * There is someone waiting for the
547 * callout to complete.
548 * If the callout was scheduled for
549 * migration just cancel it.
551 if (cc_cme_migrating(cc)) {
555 * It should be assert here that the callout is not
556 * destroyed but that is not easy.
558 c->c_flags &= ~CALLOUT_DFRMIGRATION;
562 wakeup(&cc->cc_waiting);
564 } else if (cc_cme_migrating(cc)) {
565 KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0,
566 ("Migrating legacy callout %p", c));
569 * If the callout was scheduled for
570 * migration just perform it now.
572 new_cpu = cc->cc_migration_cpu;
573 new_ticks = cc->cc_migration_ticks;
574 new_func = cc->cc_migration_func;
575 new_arg = cc->cc_migration_arg;
579 * It should be assert here that the callout is not destroyed
580 * but that is not easy.
582 * As first thing, handle deferred callout stops.
584 if ((c->c_flags & CALLOUT_DFRMIGRATION) == 0) {
586 "deferred cancelled %p func %p arg %p",
587 c, new_func, new_arg);
588 callout_cc_del(c, cc);
591 c->c_flags &= ~CALLOUT_DFRMIGRATION;
593 new_cc = callout_cpu_switch(c, cc, new_cpu);
594 callout_cc_add(c, new_cc, new_ticks, new_func, new_arg,
599 panic("migration should not happen");
603 * If the current callout is locally allocated (from
604 * timeout(9)) then put it on the freelist.
606 * Note: we need to check the cached copy of c_flags because
607 * if it was not local, then it's not safe to deref the
610 KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0 ||
611 c->c_flags == CALLOUT_LOCAL_ALLOC,
612 ("corrupted callout"));
613 if (c_flags & CALLOUT_LOCAL_ALLOC)
614 callout_cc_del(c, cc);
618 * The callout mechanism is based on the work of Adam M. Costello and
619 * George Varghese, published in a technical report entitled "Redesigning
620 * the BSD Callout and Timer Facilities" and modified slightly for inclusion
621 * in FreeBSD by Justin T. Gibbs. The original work on the data structures
622 * used in this implementation was published by G. Varghese and T. Lauck in
623 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
624 * the Efficient Implementation of a Timer Facility" in the Proceedings of
625 * the 11th ACM Annual Symposium on Operating Systems Principles,
626 * Austin, Texas Nov 1987.
630 * Software (low priority) clock interrupt.
631 * Run periodic events from timeout queue.
636 struct callout_cpu *cc;
638 struct callout_tailq *bucket;
640 int steps; /* #steps since we last allowed interrupts */
646 #ifndef MAX_SOFTCLOCK_STEPS
647 #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
648 #endif /* MAX_SOFTCLOCK_STEPS */
655 cc = (struct callout_cpu *)arg;
657 while (cc->cc_softticks - 1 != cc->cc_ticks) {
659 * cc_softticks may be modified by hard clock, so cache
660 * it while we work on a given bucket.
662 curticks = cc->cc_softticks;
664 bucket = &cc->cc_callwheel[curticks & callwheelmask];
665 c = TAILQ_FIRST(bucket);
668 if (c->c_time != curticks) {
669 c = TAILQ_NEXT(c, c_links.tqe);
671 if (steps >= MAX_SOFTCLOCK_STEPS) {
673 /* Give interrupts a chance. */
681 cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
682 TAILQ_REMOVE(bucket, c, c_links.tqe);
683 softclock_call_cc(c, cc, &mpcalls,
684 &lockcalls, &gcalls);
690 avg_depth += (depth * 1000 - avg_depth) >> 8;
691 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
692 avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8;
693 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
700 * Execute a function after a specified length of time.
703 * Cancel previous timeout function call.
705 * callout_handle_init --
706 * Initialize a handle so that using it with untimeout is benign.
708 * See AT&T BCI Driver Reference Manual for specification. This
709 * implementation differs from that one in that although an
710 * identification value is returned from timeout, the original
711 * arguments to timeout as well as the identifier are used to
712 * identify entries for untimeout.
714 struct callout_handle
715 timeout(ftn, arg, to_ticks)
720 struct callout_cpu *cc;
722 struct callout_handle handle;
724 cc = CC_CPU(timeout_cpu);
726 /* Fill in the next free callout structure. */
727 new = SLIST_FIRST(&cc->cc_callfree);
729 /* XXX Attempt to malloc first */
730 panic("timeout table full");
731 SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle);
732 callout_reset(new, to_ticks, ftn, arg);
733 handle.callout = new;
740 untimeout(ftn, arg, handle)
743 struct callout_handle handle;
745 struct callout_cpu *cc;
748 * Check for a handle that was initialized
749 * by callout_handle_init, but never used
750 * for a real timeout.
752 if (handle.callout == NULL)
755 cc = callout_lock(handle.callout);
756 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
757 callout_stop(handle.callout);
762 callout_handle_init(struct callout_handle *handle)
764 handle->callout = NULL;
768 * New interface; clients allocate their own callout structures.
770 * callout_reset() - establish or change a timeout
771 * callout_stop() - disestablish a timeout
772 * callout_init() - initialize a callout structure so that it can
773 * safely be passed to callout_reset() and callout_stop()
775 * <sys/callout.h> defines three convenience macros:
777 * callout_active() - returns truth if callout has not been stopped,
778 * drained, or deactivated since the last time the callout was
780 * callout_pending() - returns truth if callout is still waiting for timeout
781 * callout_deactivate() - marks the callout as having been serviced
784 callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *),
787 struct callout_cpu *cc;
791 * Don't allow migration of pre-allocated callouts lest they
794 if (c->c_flags & CALLOUT_LOCAL_ALLOC)
796 cc = callout_lock(c);
797 if (cc->cc_curr == c) {
799 * We're being asked to reschedule a callout which is
800 * currently in progress. If there is a lock then we
801 * can cancel the callout if it has not really started.
803 if (c->c_lock != NULL && !cc->cc_cancel)
804 cancelled = cc->cc_cancel = 1;
805 if (cc->cc_waiting) {
807 * Someone has called callout_drain to kill this
808 * callout. Don't reschedule.
810 CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
811 cancelled ? "cancelled" : "failed to cancel",
812 c, c->c_func, c->c_arg);
817 if (c->c_flags & CALLOUT_PENDING) {
818 if (cc->cc_next == c) {
819 cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
821 TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
825 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
830 * If the callout must migrate try to perform it immediately.
831 * If the callout is currently running, just defer the migration
832 * to a more appropriate moment.
834 if (c->c_cpu != cpu) {
835 if (cc->cc_curr == c) {
836 cc->cc_migration_cpu = cpu;
837 cc->cc_migration_ticks = to_ticks;
838 cc->cc_migration_func = ftn;
839 cc->cc_migration_arg = arg;
840 c->c_flags |= CALLOUT_DFRMIGRATION;
842 "migration of %p func %p arg %p in %d to %u deferred",
843 c, c->c_func, c->c_arg, to_ticks, cpu);
847 cc = callout_cpu_switch(c, cc, cpu);
851 callout_cc_add(c, cc, to_ticks, ftn, arg, cpu);
852 CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d",
853 cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks);
860 * Common idioms that can be optimized in the future.
863 callout_schedule_on(struct callout *c, int to_ticks, int cpu)
865 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu);
869 callout_schedule(struct callout *c, int to_ticks)
871 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu);
875 _callout_stop_safe(c, safe)
879 struct callout_cpu *cc, *old_cc;
880 struct lock_class *class;
881 int use_lock, sq_locked;
884 * Some old subsystems don't hold Giant while running a callout_stop(),
885 * so just discard this check for the moment.
887 if (!safe && c->c_lock != NULL) {
888 if (c->c_lock == &Giant.lock_object)
889 use_lock = mtx_owned(&Giant);
892 class = LOCK_CLASS(c->c_lock);
893 class->lc_assert(c->c_lock, LA_XLOCKED);
901 cc = callout_lock(c);
904 * If the callout was migrating while the callout cpu lock was
905 * dropped, just drop the sleepqueue lock and check the states
908 if (sq_locked != 0 && cc != old_cc) {
911 sleepq_release(&old_cc->cc_waiting);
916 panic("migration should not happen");
921 * If the callout isn't pending, it's not on the queue, so
922 * don't attempt to remove it from the queue. We can try to
923 * stop it by other means however.
925 if (!(c->c_flags & CALLOUT_PENDING)) {
926 c->c_flags &= ~CALLOUT_ACTIVE;
929 * If it wasn't on the queue and it isn't the current
930 * callout, then we can't stop it, so just bail.
932 if (cc->cc_curr != c) {
933 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
934 c, c->c_func, c->c_arg);
937 sleepq_release(&cc->cc_waiting);
943 * The current callout is running (or just
944 * about to run) and blocking is allowed, so
945 * just wait for the current invocation to
948 while (cc->cc_curr == c) {
951 * Use direct calls to sleepqueue interface
952 * instead of cv/msleep in order to avoid
953 * a LOR between cc_lock and sleepqueue
954 * chain spinlocks. This piece of code
955 * emulates a msleep_spin() call actually.
957 * If we already have the sleepqueue chain
958 * locked, then we can safely block. If we
959 * don't already have it locked, however,
960 * we have to drop the cc_lock to lock
961 * it. This opens several races, so we
962 * restart at the beginning once we have
963 * both locks. If nothing has changed, then
964 * we will end up back here with sq_locked
969 sleepq_lock(&cc->cc_waiting);
976 * Migration could be cancelled here, but
977 * as long as it is still not sure when it
978 * will be packed up, just let softclock()
984 sleepq_add(&cc->cc_waiting,
985 &cc->cc_lock.lock_object, "codrain",
987 sleepq_wait(&cc->cc_waiting, 0);
991 /* Reacquire locks previously released. */
995 } else if (use_lock && !cc->cc_cancel) {
997 * The current callout is waiting for its
998 * lock which we hold. Cancel the callout
999 * and return. After our caller drops the
1000 * lock, the callout will be skipped in
1004 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
1005 c, c->c_func, c->c_arg);
1006 KASSERT(!cc_cme_migrating(cc),
1007 ("callout wrongly scheduled for migration"));
1009 KASSERT(!sq_locked, ("sleepqueue chain locked"));
1011 } else if ((c->c_flags & CALLOUT_DFRMIGRATION) != 0) {
1012 c->c_flags &= ~CALLOUT_DFRMIGRATION;
1013 CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p",
1014 c, c->c_func, c->c_arg);
1018 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
1019 c, c->c_func, c->c_arg);
1021 KASSERT(!sq_locked, ("sleepqueue chain still locked"));
1025 sleepq_release(&cc->cc_waiting);
1027 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
1029 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
1030 c, c->c_func, c->c_arg);
1031 if (cc->cc_next == c)
1032 cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
1033 TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
1035 callout_cc_del(c, cc);
1042 callout_init(c, mpsafe)
1046 bzero(c, sizeof *c);
1049 c->c_flags = CALLOUT_RETURNUNLOCKED;
1051 c->c_lock = &Giant.lock_object;
1054 c->c_cpu = timeout_cpu;
1058 _callout_init_lock(c, lock, flags)
1060 struct lock_object *lock;
1063 bzero(c, sizeof *c);
1065 KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0,
1066 ("callout_init_lock: bad flags %d", flags));
1067 KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
1068 ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
1069 KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags &
1070 (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class",
1072 c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
1073 c->c_cpu = timeout_cpu;
1076 #ifdef APM_FIXUP_CALLTODO
1078 * Adjust the kernel calltodo timeout list. This routine is used after
1079 * an APM resume to recalculate the calltodo timer list values with the
1080 * number of hz's we have been sleeping. The next hardclock() will detect
1081 * that there are fired timers and run softclock() to execute them.
1083 * Please note, I have not done an exhaustive analysis of what code this
1084 * might break. I am motivated to have my select()'s and alarm()'s that
1085 * have expired during suspend firing upon resume so that the applications
1086 * which set the timer can do the maintanence the timer was for as close
1087 * as possible to the originally intended time. Testing this code for a
1088 * week showed that resuming from a suspend resulted in 22 to 25 timers
1089 * firing, which seemed independant on whether the suspend was 2 hours or
1090 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu>
1093 adjust_timeout_calltodo(time_change)
1094 struct timeval *time_change;
1096 register struct callout *p;
1097 unsigned long delta_ticks;
1100 * How many ticks were we asleep?
1101 * (stolen from tvtohz()).
1104 /* Don't do anything */
1105 if (time_change->tv_sec < 0)
1107 else if (time_change->tv_sec <= LONG_MAX / 1000000)
1108 delta_ticks = (time_change->tv_sec * 1000000 +
1109 time_change->tv_usec + (tick - 1)) / tick + 1;
1110 else if (time_change->tv_sec <= LONG_MAX / hz)
1111 delta_ticks = time_change->tv_sec * hz +
1112 (time_change->tv_usec + (tick - 1)) / tick + 1;
1114 delta_ticks = LONG_MAX;
1116 if (delta_ticks > INT_MAX)
1117 delta_ticks = INT_MAX;
1120 * Now rip through the timer calltodo list looking for timers
1124 /* don't collide with softclock() */
1126 for (p = calltodo.c_next; p != NULL; p = p->c_next) {
1127 p->c_time -= delta_ticks;
1129 /* Break if the timer had more time on it than delta_ticks */
1133 /* take back the ticks the timer didn't use (p->c_time <= 0) */
1134 delta_ticks = -p->c_time;
1140 #endif /* APM_FIXUP_CALLTODO */