2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/callout.h>
43 #include <sys/condvar.h>
44 #include <sys/kernel.h>
47 #include <sys/mutex.h>
49 #include <sys/sysctl.h>
52 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
53 "Average number of items examined per softclock call. Units = 1/1000");
54 static int avg_gcalls;
55 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
56 "Average number of Giant callouts made per softclock call. Units = 1/1000");
57 static int avg_mtxcalls;
58 SYSCTL_INT(_debug, OID_AUTO, to_avg_mtxcalls, CTLFLAG_RD, &avg_mtxcalls, 0,
59 "Average number of mtx callouts made per softclock call. Units = 1/1000");
60 static int avg_mpcalls;
61 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
62 "Average number of MP callouts made per softclock call. Units = 1/1000");
65 * allocate more timeout table slots when table overflows.
68 /* Exported to machdep.c and/or kern_clock.c. */
69 struct callout *callout;
70 struct callout_list callfree;
71 int callwheelsize, callwheelbits, callwheelmask;
72 struct callout_tailq *callwheel;
73 int softticks; /* Like ticks, but for softclock(). */
74 struct mtx callout_lock;
76 static struct callout *nextsoftcheck; /* Next callout to be checked. */
79 * Locked by callout_lock:
80 * curr_callout - If a callout is in progress, it is curr_callout.
81 * If curr_callout is non-NULL, threads waiting in
82 * callout_drain() will be woken up as soon as the
83 * relevant callout completes.
84 * curr_cancelled - Changing to 1 with both callout_lock and c_mtx held
85 * guarantees that the current callout will not run.
86 * The softclock() function sets this to 0 before it
87 * drops callout_lock to acquire c_mtx, and it calls
88 * the handler only if curr_cancelled is still 0 after
89 * c_mtx is successfully acquired.
90 * callout_wait - If a thread is waiting in callout_drain(), then
91 * callout_wait is nonzero. Set only when
92 * curr_callout is non-NULL.
94 static struct callout *curr_callout;
95 static int curr_cancelled;
96 static int callout_wait;
99 * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
101 * This code is called very early in the kernel initialization sequence,
102 * and may be called more then once.
105 kern_timeout_callwheel_alloc(caddr_t v)
108 * Calculate callout wheel size
110 for (callwheelsize = 1, callwheelbits = 0;
111 callwheelsize < ncallout;
112 callwheelsize <<= 1, ++callwheelbits)
114 callwheelmask = callwheelsize - 1;
116 callout = (struct callout *)v;
117 v = (caddr_t)(callout + ncallout);
118 callwheel = (struct callout_tailq *)v;
119 v = (caddr_t)(callwheel + callwheelsize);
124 * kern_timeout_callwheel_init() - initialize previously reserved callwheel
127 * This code is called just once, after the space reserved for the
128 * callout wheel has been finalized.
131 kern_timeout_callwheel_init(void)
135 SLIST_INIT(&callfree);
136 for (i = 0; i < ncallout; i++) {
137 callout_init(&callout[i], 0);
138 callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
139 SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
141 for (i = 0; i < callwheelsize; i++) {
142 TAILQ_INIT(&callwheel[i]);
144 mtx_init(&callout_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
148 * The callout mechanism is based on the work of Adam M. Costello and
149 * George Varghese, published in a technical report entitled "Redesigning
150 * the BSD Callout and Timer Facilities" and modified slightly for inclusion
151 * in FreeBSD by Justin T. Gibbs. The original work on the data structures
152 * used in this implementation was published by G. Varghese and T. Lauck in
153 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
154 * the Efficient Implementation of a Timer Facility" in the Proceedings of
155 * the 11th ACM Annual Symposium on Operating Systems Principles,
156 * Austin, Texas Nov 1987.
160 * Software (low priority) clock interrupt.
161 * Run periodic events from timeout queue.
164 softclock(void *dummy)
167 struct callout_tailq *bucket;
169 int steps; /* #steps since we last allowed interrupts */
175 struct bintime bt1, bt2;
177 static uint64_t maxdt = 36893488147419102LL; /* 2 msec */
178 static timeout_t *lastfunc;
181 #ifndef MAX_SOFTCLOCK_STEPS
182 #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
183 #endif /* MAX_SOFTCLOCK_STEPS */
190 mtx_lock_spin(&callout_lock);
191 while (softticks != ticks) {
194 * softticks may be modified by hard clock, so cache
195 * it while we work on a given bucket.
197 curticks = softticks;
198 bucket = &callwheel[curticks & callwheelmask];
199 c = TAILQ_FIRST(bucket);
202 if (c->c_time != curticks) {
203 c = TAILQ_NEXT(c, c_links.tqe);
205 if (steps >= MAX_SOFTCLOCK_STEPS) {
207 /* Give interrupts a chance. */
208 mtx_unlock_spin(&callout_lock);
210 mtx_lock_spin(&callout_lock);
215 void (*c_func)(void *);
220 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
221 TAILQ_REMOVE(bucket, c, c_links.tqe);
225 c_flags = c->c_flags;
226 if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
228 c->c_flags = CALLOUT_LOCAL_ALLOC;
229 SLIST_INSERT_HEAD(&callfree, c,
234 (c->c_flags & ~CALLOUT_PENDING);
238 mtx_unlock_spin(&callout_lock);
242 * The callout may have been cancelled
243 * while we switched locks.
245 if (curr_cancelled) {
249 /* The callout cannot be stopped now. */
252 if (c_mtx == &Giant) {
254 CTR1(KTR_CALLOUT, "callout %p",
264 CTR1(KTR_CALLOUT, "callout mpsafe %p",
270 THREAD_NO_SLEEPING();
272 THREAD_SLEEPING_OK();
275 bintime_sub(&bt2, &bt1);
276 if (bt2.frac > maxdt) {
277 if (lastfunc != c_func ||
278 bt2.frac > maxdt * 2) {
279 bintime2timespec(&bt2, &ts2);
281 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
283 (intmax_t)ts2.tv_sec,
290 if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
293 mtx_lock_spin(&callout_lock);
297 * There is someone waiting
298 * for the callout to complete.
300 wakeup(&callout_wait);
308 avg_depth += (depth * 1000 - avg_depth) >> 8;
309 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
310 avg_mtxcalls += (mtxcalls * 1000 - avg_mtxcalls) >> 8;
311 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
312 nextsoftcheck = NULL;
313 mtx_unlock_spin(&callout_lock);
318 * Execute a function after a specified length of time.
321 * Cancel previous timeout function call.
323 * callout_handle_init --
324 * Initialize a handle so that using it with untimeout is benign.
326 * See AT&T BCI Driver Reference Manual for specification. This
327 * implementation differs from that one in that although an
328 * identification value is returned from timeout, the original
329 * arguments to timeout as well as the identifier are used to
330 * identify entries for untimeout.
332 struct callout_handle
333 timeout(ftn, arg, to_ticks)
339 struct callout_handle handle;
341 mtx_lock_spin(&callout_lock);
343 /* Fill in the next free callout structure. */
344 new = SLIST_FIRST(&callfree);
346 /* XXX Attempt to malloc first */
347 panic("timeout table full");
348 SLIST_REMOVE_HEAD(&callfree, c_links.sle);
350 callout_reset(new, to_ticks, ftn, arg);
352 handle.callout = new;
353 mtx_unlock_spin(&callout_lock);
358 untimeout(ftn, arg, handle)
361 struct callout_handle handle;
365 * Check for a handle that was initialized
366 * by callout_handle_init, but never used
367 * for a real timeout.
369 if (handle.callout == NULL)
372 mtx_lock_spin(&callout_lock);
373 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
374 callout_stop(handle.callout);
375 mtx_unlock_spin(&callout_lock);
379 callout_handle_init(struct callout_handle *handle)
381 handle->callout = NULL;
385 * New interface; clients allocate their own callout structures.
387 * callout_reset() - establish or change a timeout
388 * callout_stop() - disestablish a timeout
389 * callout_init() - initialize a callout structure so that it can
390 * safely be passed to callout_reset() and callout_stop()
392 * <sys/callout.h> defines three convenience macros:
394 * callout_active() - returns truth if callout has not been stopped,
395 * drained, or deactivated since the last time the callout was
397 * callout_pending() - returns truth if callout is still waiting for timeout
398 * callout_deactivate() - marks the callout as having been serviced
401 callout_reset(c, to_ticks, ftn, arg)
409 #ifdef notyet /* Some callers of timeout() do not hold Giant. */
410 if (c->c_mtx != NULL)
411 mtx_assert(c->c_mtx, MA_OWNED);
414 mtx_lock_spin(&callout_lock);
415 if (c == curr_callout) {
417 * We're being asked to reschedule a callout which is
418 * currently in progress. If there is a mutex then we
419 * can cancel the callout if it has not really started.
421 if (c->c_mtx != NULL && !curr_cancelled)
422 cancelled = curr_cancelled = 1;
425 * Someone has called callout_drain to kill this
426 * callout. Don't reschedule.
428 mtx_unlock_spin(&callout_lock);
432 if (c->c_flags & CALLOUT_PENDING) {
433 if (nextsoftcheck == c) {
434 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
436 TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c,
442 * Part of the normal "stop a pending callout" process
443 * is to clear the CALLOUT_ACTIVE and CALLOUT_PENDING
444 * flags. We're not going to bother doing that here,
445 * because we're going to be setting those flags ten lines
446 * after this point, and we're holding callout_lock
447 * between now and then.
452 * We could unlock callout_lock here and lock it again before the
453 * TAILQ_INSERT_TAIL, but there's no point since doing this setup
454 * doesn't take much time.
460 c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
462 c->c_time = ticks + to_ticks;
463 TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask],
465 mtx_unlock_spin(&callout_lock);
471 _callout_stop_safe(c, safe)
477 if (!safe && c->c_mtx != NULL) {
478 #ifdef notyet /* Some callers do not hold Giant for Giant-locked callouts. */
479 mtx_assert(c->c_mtx, MA_OWNED);
482 use_mtx = mtx_owned(c->c_mtx);
488 mtx_lock_spin(&callout_lock);
490 * If the callout isn't pending, it's not on the queue, so
491 * don't attempt to remove it from the queue. We can try to
492 * stop it by other means however.
494 if (!(c->c_flags & CALLOUT_PENDING)) {
495 c->c_flags &= ~CALLOUT_ACTIVE;
498 * If it wasn't on the queue and it isn't the current
499 * callout, then we can't stop it, so just bail.
501 if (c != curr_callout) {
502 mtx_unlock_spin(&callout_lock);
508 * The current callout is running (or just
509 * about to run) and blocking is allowed, so
510 * just wait for the current invocation to
513 while (c == curr_callout) {
515 msleep_spin(&callout_wait, &callout_lock,
518 } else if (use_mtx && !curr_cancelled) {
520 * The current callout is waiting for it's
521 * mutex which we hold. Cancel the callout
522 * and return. After our caller drops the
523 * mutex, the callout will be skipped in
527 mtx_unlock_spin(&callout_lock);
530 mtx_unlock_spin(&callout_lock);
533 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
535 if (nextsoftcheck == c) {
536 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
538 TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe);
540 if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
542 SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
544 mtx_unlock_spin(&callout_lock);
549 callout_init(c, mpsafe)
556 c->c_flags = CALLOUT_RETURNUNLOCKED;
564 callout_init_mtx(c, mtx, flags)
571 KASSERT((flags & ~CALLOUT_RETURNUNLOCKED) == 0,
572 ("callout_init_mtx: bad flags %d", flags));
573 /* CALLOUT_RETURNUNLOCKED makes no sense without a mutex. */
574 KASSERT(mtx != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
575 ("callout_init_mtx: CALLOUT_RETURNUNLOCKED with no mutex"));
576 c->c_flags = flags & CALLOUT_RETURNUNLOCKED;
579 #ifdef APM_FIXUP_CALLTODO
581 * Adjust the kernel calltodo timeout list. This routine is used after
582 * an APM resume to recalculate the calltodo timer list values with the
583 * number of hz's we have been sleeping. The next hardclock() will detect
584 * that there are fired timers and run softclock() to execute them.
586 * Please note, I have not done an exhaustive analysis of what code this
587 * might break. I am motivated to have my select()'s and alarm()'s that
588 * have expired during suspend firing upon resume so that the applications
589 * which set the timer can do the maintanence the timer was for as close
590 * as possible to the originally intended time. Testing this code for a
591 * week showed that resuming from a suspend resulted in 22 to 25 timers
592 * firing, which seemed independant on whether the suspend was 2 hours or
593 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu>
596 adjust_timeout_calltodo(time_change)
597 struct timeval *time_change;
599 register struct callout *p;
600 unsigned long delta_ticks;
603 * How many ticks were we asleep?
604 * (stolen from tvtohz()).
607 /* Don't do anything */
608 if (time_change->tv_sec < 0)
610 else if (time_change->tv_sec <= LONG_MAX / 1000000)
611 delta_ticks = (time_change->tv_sec * 1000000 +
612 time_change->tv_usec + (tick - 1)) / tick + 1;
613 else if (time_change->tv_sec <= LONG_MAX / hz)
614 delta_ticks = time_change->tv_sec * hz +
615 (time_change->tv_usec + (tick - 1)) / tick + 1;
617 delta_ticks = LONG_MAX;
619 if (delta_ticks > INT_MAX)
620 delta_ticks = INT_MAX;
623 * Now rip through the timer calltodo list looking for timers
627 /* don't collide with softclock() */
628 mtx_lock_spin(&callout_lock);
629 for (p = calltodo.c_next; p != NULL; p = p->c_next) {
630 p->c_time -= delta_ticks;
632 /* Break if the timer had more time on it than delta_ticks */
636 /* take back the ticks the timer didn't use (p->c_time <= 0) */
637 delta_ticks = -p->c_time;
639 mtx_unlock_spin(&callout_lock);
643 #endif /* APM_FIXUP_CALLTODO */