4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Portions Copyright 2008 John Birrell <jb@freebsd.org>
26 * This is a simplified version of the cyclic timer subsystem from
27 * OpenSolaris. In the FreeBSD version, we don't use interrupt levels.
31 * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
32 * Use is subject to license terms.
36 * The Cyclic Subsystem
37 * --------------------
41 * Historically, most computer architectures have specified interval-based
42 * timer parts (e.g. SPARCstation's counter/timer; Intel's i8254). While
43 * these parts deal in relative (i.e. not absolute) time values, they are
44 * typically used by the operating system to implement the abstraction of
45 * absolute time. As a result, these parts cannot typically be reprogrammed
46 * without introducing error in the system's notion of time.
48 * Starting in about 1994, chip architectures began specifying high resolution
49 * timestamp registers. As of this writing (1999), all major chip families
50 * (UltraSPARC, PentiumPro, MIPS, PowerPC, Alpha) have high resolution
51 * timestamp registers, and two (UltraSPARC and MIPS) have added the capacity
52 * to interrupt based on timestamp values. These timestamp-compare registers
53 * present a time-based interrupt source which can be reprogrammed arbitrarily
54 * often without introducing error. Given the low cost of implementing such a
55 * timestamp-compare register (and the tangible benefit of eliminating
56 * discrete timer parts), it is reasonable to expect that future chip
57 * architectures will adopt this feature.
59 * The cyclic subsystem has been designed to take advantage of chip
60 * architectures with the capacity to interrupt based on absolute, high
61 * resolution values of time.
65 * The cyclic subsystem is a low-level kernel subsystem designed to provide
66 * arbitrarily high resolution, per-CPU interval timers (to avoid colliding
67 * with existing terms, we dub such an interval timer a "cyclic").
68 * Alternatively, a cyclic may be specified to be "omnipresent", denoting
69 * firing on all online CPUs.
71 * Cyclic Subsystem Interface Overview
72 * -----------------------------------
74 * The cyclic subsystem has interfaces with the kernel at-large, with other
75 * kernel subsystems (e.g. the processor management subsystem, the checkpoint
76 * resume subsystem) and with the platform (the cyclic backend). Each
77 * of these interfaces is given a brief synopsis here, and is described
78 * in full above the interface's implementation.
80 * The following diagram displays the cyclic subsystem's interfaces to
81 * other kernel components. The arrows denote a "calls" relationship, with
82 * the large arrow indicating the cyclic subsystem's consumer interface.
83 * Each arrow is labeled with the section in which the corresponding
84 * interface is described.
86 * Kernel at-large consumers
87 * -----------++------------
93 * +---------------------+
95 * | Cyclic subsystem |<----------- Other kernel subsystems
97 * +---------------------+
102 * +---------------------+
105 * | (platform specific) |
107 * +---------------------+
110 * Kernel At-Large Interfaces
112 * cyclic_add() <-- Creates a cyclic
113 * cyclic_add_omni() <-- Creates an omnipresent cyclic
114 * cyclic_remove() <-- Removes a cyclic
118 * cyclic_init() <-- Initializes the cyclic subsystem
119 * cyclic_fire() <-- Interrupt entry point
121 * The backend-supplied interfaces (through the cyc_backend structure) are
122 * documented in detail in <sys/cyclic_impl.h>
125 * Cyclic Subsystem Implementation Overview
126 * ----------------------------------------
128 * The cyclic subsystem is designed to minimize interference between cyclics
129 * on different CPUs. Thus, all of the cyclic subsystem's data structures
130 * hang off of a per-CPU structure, cyc_cpu.
132 * Each cyc_cpu has a power-of-two sized array of cyclic structures (the
133 * cyp_cyclics member of the cyc_cpu structure). If cyclic_add() is called
134 * and there does not exist a free slot in the cyp_cyclics array, the size of
135 * the array will be doubled. The array will never shrink. Cyclics are
136 * referred to by their index in the cyp_cyclics array, which is of type
139 * The cyclics are kept sorted by expiration time in the cyc_cpu's heap. The
140 * heap is keyed by cyclic expiration time, with parents expiring earlier
141 * than their children.
145 * The heap is managed primarily by cyclic_fire(). Upon entry, cyclic_fire()
146 * compares the root cyclic's expiration time to the current time. If the
147 * expiration time is in the past, cyclic_expire() is called on the root
148 * cyclic. Upon return from cyclic_expire(), the cyclic's new expiration time
149 * is derived by adding its interval to its old expiration time, and a
150 * downheap operation is performed. After the downheap, cyclic_fire()
151 * examines the (potentially changed) root cyclic, repeating the
152 * cyclic_expire()/add interval/cyclic_downheap() sequence until the root
153 * cyclic has an expiration time in the future. This expiration time
154 * (guaranteed to be the earliest in the heap) is then communicated to the
155 * backend via cyb_reprogram. Optimal backends will next call cyclic_fire()
156 * shortly after the root cyclic's expiration time.
158 * To allow efficient, deterministic downheap operations, we implement the
159 * heap as an array (the cyp_heap member of the cyc_cpu structure), with each
160 * element containing an index into the CPU's cyp_cyclics array.
162 * The heap is laid out in the array according to the following:
164 * 1. The root of the heap is always in the 0th element of the heap array
165 * 2. The left and right children of the nth element are element
166 * (((n + 1) << 1) - 1) and element ((n + 1) << 1), respectively.
168 * This layout is standard (see, e.g., Cormen's "Algorithms"); the proof
169 * that these constraints correctly lay out a heap (or indeed, any binary
170 * tree) is trivial and left to the reader.
172 * To see the heap by example, assume our cyclics array has the following
173 * members (at time t):
175 * cy_handler cy_expire
176 * ---------------------------------------------
177 * [ 0] clock() t+10000000
178 * [ 1] deadman() t+1000000000
179 * [ 2] clock_highres_fire() t+100
180 * [ 3] clock_highres_fire() t+1000
181 * [ 4] clock_highres_fire() t+500
186 * The heap array could be:
188 * [0] [1] [2] [3] [4] [5] [6] [7]
189 * +-----+-----+-----+-----+-----+-----+-----+-----+
191 * | 2 | 3 | 4 | 0 | 1 | x | x | x |
193 * +-----+-----+-----+-----+-----+-----+-----+-----+
195 * Graphically, this array corresponds to the following (excuse the ASCII art):
199 * +------------------+------------------+
202 * +---------+--------+
205 * Note that the heap is laid out by layer: all nodes at a given depth are
206 * stored in consecutive elements of the array. Moreover, layers of
207 * consecutive depths are in adjacent element ranges. This property
208 * guarantees high locality of reference during downheap operations.
209 * Specifically, we are guaranteed that we can downheap to a depth of
211 * lg (cache_line_size / sizeof (cyc_index_t))
213 * nodes with at most one cache miss. On UltraSPARC (64 byte e-cache line
214 * size), this corresponds to a depth of four nodes. Thus, if there are
215 * fewer than sixteen cyclics in the heap, downheaps on UltraSPARC miss at
216 * most once in the e-cache.
218 * Downheaps are required to compare siblings as they proceed down the
219 * heap. For downheaps proceeding beyond the one-cache-miss depth, every
220 * access to a left child could potentially miss in the cache. However,
223 * (cache_line_size / sizeof (cyc_index_t)) > 2,
225 * then all siblings are guaranteed to be on the same cache line. Thus, the
226 * miss on the left child will guarantee a hit on the right child; downheaps
227 * will incur at most one cache miss per layer beyond the one-cache-miss
228 * depth. The total number of cache misses for heap management during a
229 * downheap operation is thus bounded by
231 * lg (n) - lg (cache_line_size / sizeof (cyc_index_t))
233 * Traditional pointer-based heaps are implemented without regard to
234 * locality. Downheaps can thus incur two cache misses per layer (one for
235 * each child), but at most one cache miss at the root. This yields a bound
240 * on the total cache misses.
242 * This difference may seem theoretically trivial (the difference is, after
243 * all, constant), but can become substantial in practice -- especially for
244 * caches with very large cache lines and high miss penalties (e.g. TLBs).
246 * Heaps must always be full, balanced trees. Heap management must therefore
247 * track the next point-of-insertion into the heap. In pointer-based heaps,
248 * recomputing this point takes O(lg (n)). Given the layout of the
249 * array-based implementation, however, the next point-of-insertion is
252 * heap[number_of_elements]
254 * We exploit this property by implementing the free-list in the usused
255 * heap elements. Heap insertion, therefore, consists only of filling in
256 * the cyclic at cyp_cyclics[cyp_heap[number_of_elements]], incrementing
257 * the number of elements, and performing an upheap. Heap deletion consists
258 * of decrementing the number of elements, swapping the to-be-deleted element
259 * with the element at cyp_heap[number_of_elements], and downheaping.
261 * Filling in more details in our earlier example:
263 * +--- free list head
267 * [0] [1] [2] [3] [4] [5] [6] [7]
268 * +-----+-----+-----+-----+-----+-----+-----+-----+
270 * | 2 | 3 | 4 | 0 | 1 | 5 | 6 | 7 |
272 * +-----+-----+-----+-----+-----+-----+-----+-----+
274 * To insert into this heap, we would just need to fill in the cyclic at
275 * cyp_cyclics[5], bump the number of elements (from 5 to 6) and perform
278 * If we wanted to remove, say, cyp_cyclics[3], we would first scan for it
279 * in the cyp_heap, and discover it at cyp_heap[1]. We would then decrement
280 * the number of elements (from 5 to 4), swap cyp_heap[1] with cyp_heap[4],
281 * and perform a downheap from cyp_heap[1]. The linear scan is required
282 * because the cyclic does not keep a backpointer into the heap. This makes
283 * heap manipulation (e.g. downheaps) faster at the expense of removal
288 * As alluded to above, cyclic_expire() is called by cyclic_fire() to expire
289 * a cyclic. Cyclic subsystem consumers are guaranteed that for an arbitrary
290 * time t in the future, their cyclic handler will have been called
291 * (t - cyt_when) / cyt_interval times. cyclic_expire() simply needs to call
296 * All of the discussion thus far has assumed a static number of cyclics.
297 * Obviously, static limitations are not practical; we need the capacity
298 * to resize our data structures dynamically.
300 * We resize our data structures lazily, and only on a per-CPU basis.
301 * The size of the data structures always doubles and never shrinks. We
302 * serialize adds (and thus resizes) on cpu_lock; we never need to deal
303 * with concurrent resizes. Resizes should be rare; they may induce jitter
304 * on the CPU being resized, but should not affect cyclic operation on other
307 * Three key cyc_cpu data structures need to be resized: the cyclics array,
308 * nad the heap array. Resizing is relatively straightforward:
310 * 1. The new, larger arrays are allocated in cyclic_expand() (called
311 * from cyclic_add()).
312 * 2. The contents of the old arrays are copied into the new arrays.
313 * 3. The old cyclics array is bzero()'d
314 * 4. The pointers are updated.
318 * Cyclic removals should be rare. To simplify the implementation (and to
319 * allow optimization for the cyclic_fire()/cyclic_expire()
320 * path), we force removals and adds to serialize on cpu_lock.
323 #include <sys/cdefs.h>
324 #include <sys/param.h>
325 #include <sys/conf.h>
326 #include <sys/kernel.h>
327 #include <sys/lock.h>
329 #include <sys/cyclic_impl.h>
330 #include <sys/module.h>
331 #include <sys/systm.h>
332 #include <sys/atomic.h>
333 #include <sys/kmem.h>
334 #include <sys/cmn_err.h>
335 #include <sys/dtrace_bsd.h>
336 #include <machine/cpu.h>
338 static kmem_cache_t *cyclic_id_cache;
339 static cyc_id_t *cyclic_id_head;
340 static cyc_backend_t cyclic_backend;
342 static MALLOC_DEFINE(M_CYCLIC, "cyclic", "Cyclic timer subsystem");
344 static __inline hrtime_t
350 return ((hrtime_t)bt.sec * NANOSEC +
351 (((uint64_t)NANOSEC * (uint32_t)(bt.frac >> 32)) >> 32));
355 * Returns 1 if the upheap propagated to the root, 0 if it did not. This
356 * allows the caller to reprogram the backend only when the root has been
360 cyclic_upheap(cyc_cpu_t *cpu, cyc_index_t ndx)
364 cyc_index_t heap_parent, heap_current = ndx;
365 cyc_index_t parent, current;
367 if (heap_current == 0)
370 heap = cpu->cyp_heap;
371 cyclics = cpu->cyp_cyclics;
372 heap_parent = CYC_HEAP_PARENT(heap_current);
375 current = heap[heap_current];
376 parent = heap[heap_parent];
379 * We have an expiration time later than our parent; we're
382 if (cyclics[current].cy_expire >= cyclics[parent].cy_expire)
386 * We need to swap with our parent, and continue up the heap.
388 heap[heap_parent] = current;
389 heap[heap_current] = parent;
392 * If we just reached the root, we're done.
394 if (heap_parent == 0)
397 heap_current = heap_parent;
398 heap_parent = CYC_HEAP_PARENT(heap_current);
403 cyclic_downheap(cyc_cpu_t *cpu, cyc_index_t ndx)
405 cyclic_t *cyclics = cpu->cyp_cyclics;
406 cyc_index_t *heap = cpu->cyp_heap;
408 cyc_index_t heap_left, heap_right, heap_me = ndx;
409 cyc_index_t left, right, me;
410 cyc_index_t nelems = cpu->cyp_nelems;
414 * If we don't have a left child (i.e., we're a leaf), we're
417 if ((heap_left = CYC_HEAP_LEFT(heap_me)) >= nelems)
420 left = heap[heap_left];
423 heap_right = CYC_HEAP_RIGHT(heap_me);
426 * Even if we don't have a right child, we still need to compare
427 * our expiration time against that of our left child.
429 if (heap_right >= nelems)
432 right = heap[heap_right];
435 * We have both a left and a right child. We need to compare
436 * the expiration times of the children to determine which
439 if (cyclics[right].cy_expire < cyclics[left].cy_expire) {
441 * Our right child is the earlier of our children.
442 * We'll now compare our expiration time to its; if
443 * ours is the earlier, we're done.
445 if (cyclics[me].cy_expire <= cyclics[right].cy_expire)
449 * Our right child expires earlier than we do; swap
450 * with our right child, and descend right.
452 heap[heap_right] = me;
453 heap[heap_me] = right;
454 heap_me = heap_right;
460 * Our left child is the earlier of our children (or we have
461 * no right child). We'll now compare our expiration time
462 * to its; if ours is the earlier, we're done.
464 if (cyclics[me].cy_expire <= cyclics[left].cy_expire)
468 * Our left child expires earlier than we do; swap with our
469 * left child, and descend left.
471 heap[heap_left] = me;
472 heap[heap_me] = left;
478 cyclic_expire(cyc_cpu_t *cpu, cyc_index_t ndx, cyclic_t *cyclic)
480 cyc_func_t handler = cyclic->cy_handler;
481 void *arg = cyclic->cy_arg;
487 * cyclic_fire(cpu_t *)
491 * cyclic_fire() is the cyclic subsystem's interrupt handler.
492 * Called by the cyclic backend.
494 * Arguments and notes
496 * The only argument is the CPU on which the interrupt is executing;
497 * backends must call into cyclic_fire() on the specified CPU.
499 * cyclic_fire() may be called spuriously without ill effect. Optimal
500 * backends will call into cyclic_fire() at or shortly after the time
501 * requested via cyb_reprogram(). However, calling cyclic_fire()
502 * arbitrarily late will only manifest latency bubbles; the correctness
503 * of the cyclic subsystem does not rely on the timeliness of the backend.
505 * cyclic_fire() is wait-free; it will not block or spin.
513 cyclic_fire(cpu_t *c)
515 cyc_cpu_t *cpu = c->cpu_cyclic;
516 cyc_backend_t *be = cpu->cyp_backend;
517 cyc_index_t *heap = cpu->cyp_heap;
518 cyclic_t *cyclic, *cyclics = cpu->cyp_cyclics;
519 void *arg = be->cyb_arg;
520 hrtime_t now = cyc_gethrtime();
523 if (cpu->cyp_nelems == 0) {
524 /* This is a spurious fire. */
529 cyc_index_t ndx = heap[0];
531 cyclic = &cyclics[ndx];
533 ASSERT(!(cyclic->cy_flags & CYF_FREE));
535 if ((exp = cyclic->cy_expire) > now)
538 cyclic_expire(cpu, ndx, cyclic);
541 * If this cyclic will be set to next expire in the distant
542 * past, we have one of two situations:
544 * a) This is the first firing of a cyclic which had
545 * cy_expire set to 0.
547 * b) We are tragically late for a cyclic -- most likely
548 * due to being in the debugger.
550 * In either case, we set the new expiration time to be the
551 * the next interval boundary. This assures that the
552 * expiration time modulo the interval is invariant.
554 * We arbitrarily define "distant" to be one second (one second
555 * is chosen because it's shorter than any foray to the
556 * debugger while still being longer than any legitimate
559 exp += cyclic->cy_interval;
561 if (now - exp > NANOSEC) {
562 hrtime_t interval = cyclic->cy_interval;
564 exp += ((now - exp) / interval + 1) * interval;
567 cyclic->cy_expire = exp;
568 cyclic_downheap(cpu, 0);
572 * Now we have a cyclic in the root slot which isn't in the past;
573 * reprogram the interrupt source.
575 be->cyb_reprogram(arg, exp);
579 cyclic_expand_xcall(cyc_xcallarg_t *arg)
581 cyc_cpu_t *cpu = arg->cyx_cpu;
582 cyc_index_t new_size = arg->cyx_size, size = cpu->cyp_size, i;
583 cyc_index_t *new_heap = arg->cyx_heap;
584 cyclic_t *cyclics = cpu->cyp_cyclics, *new_cyclics = arg->cyx_cyclics;
586 /* Disable preemption and interrupts. */
587 mtx_lock_spin(&cpu->cyp_mtx);
590 * Assert that the new size is a power of 2.
592 ASSERT((new_size & (new_size - 1)) == 0);
593 ASSERT(new_size == (size << 1));
594 ASSERT(cpu->cyp_heap != NULL && cpu->cyp_cyclics != NULL);
596 bcopy(cpu->cyp_heap, new_heap, sizeof (cyc_index_t) * size);
597 bcopy(cyclics, new_cyclics, sizeof (cyclic_t) * size);
600 * Set up the free list, and set all of the new cyclics to be CYF_FREE.
602 for (i = size; i < new_size; i++) {
604 new_cyclics[i].cy_flags = CYF_FREE;
608 * We can go ahead and plow the value of cyp_heap and cyp_cyclics;
609 * cyclic_expand() has kept a copy.
611 cpu->cyp_heap = new_heap;
612 cpu->cyp_cyclics = new_cyclics;
613 cpu->cyp_size = new_size;
614 mtx_unlock_spin(&cpu->cyp_mtx);
618 * cyclic_expand() will cross call onto the CPU to perform the actual
622 cyclic_expand(cyc_cpu_t *cpu)
624 cyc_index_t new_size, old_size;
625 cyc_index_t *new_heap, *old_heap;
626 cyclic_t *new_cyclics, *old_cyclics;
628 cyc_backend_t *be = cpu->cyp_backend;
630 ASSERT(MUTEX_HELD(&cpu_lock));
632 old_heap = cpu->cyp_heap;
633 old_cyclics = cpu->cyp_cyclics;
635 if ((new_size = ((old_size = cpu->cyp_size) << 1)) == 0) {
636 new_size = CY_DEFAULT_PERCPU;
637 ASSERT(old_heap == NULL && old_cyclics == NULL);
641 * Check that the new_size is a power of 2.
643 ASSERT(((new_size - 1) & new_size) == 0);
645 new_heap = malloc(sizeof(cyc_index_t) * new_size, M_CYCLIC, M_WAITOK);
646 new_cyclics = malloc(sizeof(cyclic_t) * new_size, M_CYCLIC, M_ZERO | M_WAITOK);
649 arg.cyx_heap = new_heap;
650 arg.cyx_cyclics = new_cyclics;
651 arg.cyx_size = new_size;
653 be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu,
654 (cyc_func_t)cyclic_expand_xcall, &arg);
656 if (old_cyclics != NULL) {
657 ASSERT(old_heap != NULL);
658 ASSERT(old_size != 0);
659 free(old_cyclics, M_CYCLIC);
660 free(old_heap, M_CYCLIC);
665 cyclic_add_xcall(cyc_xcallarg_t *arg)
667 cyc_cpu_t *cpu = arg->cyx_cpu;
668 cyc_handler_t *hdlr = arg->cyx_hdlr;
669 cyc_time_t *when = arg->cyx_when;
670 cyc_backend_t *be = cpu->cyp_backend;
671 cyc_index_t ndx, nelems;
672 cyb_arg_t bar = be->cyb_arg;
675 ASSERT(cpu->cyp_nelems < cpu->cyp_size);
677 /* Disable preemption and interrupts. */
678 mtx_lock_spin(&cpu->cyp_mtx);
679 nelems = cpu->cyp_nelems++;
683 * If this is the first element, we need to enable the
684 * backend on this CPU.
689 ndx = cpu->cyp_heap[nelems];
690 cyclic = &cpu->cyp_cyclics[ndx];
692 ASSERT(cyclic->cy_flags == CYF_FREE);
693 cyclic->cy_interval = when->cyt_interval;
695 if (when->cyt_when == 0) {
697 * If a start time hasn't been explicitly specified, we'll
698 * start on the next interval boundary.
700 cyclic->cy_expire = (cyc_gethrtime() / cyclic->cy_interval + 1) *
703 cyclic->cy_expire = when->cyt_when;
706 cyclic->cy_handler = hdlr->cyh_func;
707 cyclic->cy_arg = hdlr->cyh_arg;
708 cyclic->cy_flags = arg->cyx_flags;
710 if (cyclic_upheap(cpu, nelems)) {
711 hrtime_t exp = cyclic->cy_expire;
714 * If our upheap propagated to the root, we need to
715 * reprogram the interrupt source.
717 be->cyb_reprogram(bar, exp);
719 mtx_unlock_spin(&cpu->cyp_mtx);
725 cyclic_add_here(cyc_cpu_t *cpu, cyc_handler_t *hdlr,
726 cyc_time_t *when, uint16_t flags)
728 cyc_backend_t *be = cpu->cyp_backend;
729 cyb_arg_t bar = be->cyb_arg;
732 ASSERT(MUTEX_HELD(&cpu_lock));
733 ASSERT(!(cpu->cyp_cpu->cpu_flags & CPU_OFFLINE));
734 ASSERT(when->cyt_when >= 0 && when->cyt_interval > 0);
736 if (cpu->cyp_nelems == cpu->cyp_size) {
738 * This is expensive; it will cross call onto the other
739 * CPU to perform the expansion.
742 ASSERT(cpu->cyp_nelems < cpu->cyp_size);
746 * By now, we know that we're going to be able to successfully
747 * perform the add. Now cross call over to the CPU of interest to
748 * actually add our cyclic.
753 arg.cyx_flags = flags;
755 be->cyb_xcall(bar, cpu->cyp_cpu, (cyc_func_t)cyclic_add_xcall, &arg);
757 return (arg.cyx_ndx);
761 cyclic_remove_xcall(cyc_xcallarg_t *arg)
763 cyc_cpu_t *cpu = arg->cyx_cpu;
764 cyc_backend_t *be = cpu->cyp_backend;
765 cyb_arg_t bar = be->cyb_arg;
766 cyc_index_t ndx = arg->cyx_ndx, nelems = cpu->cyp_nelems, i;
767 cyc_index_t *heap = cpu->cyp_heap, last;
772 /* Disable preemption and interrupts. */
773 mtx_lock_spin(&cpu->cyp_mtx);
774 cyclic = &cpu->cyp_cyclics[ndx];
777 * Grab the current expiration time. If this cyclic is being
778 * removed as part of a juggling operation, the expiration time
779 * will be used when the cyclic is added to the new CPU.
781 if (arg->cyx_when != NULL) {
782 arg->cyx_when->cyt_when = cyclic->cy_expire;
783 arg->cyx_when->cyt_interval = cyclic->cy_interval;
787 * Now set the flags to CYF_FREE. We don't need a membar_enter()
788 * between zeroing pend and setting the flags because we're at
789 * CY_HIGH_LEVEL (that is, the zeroing of pend and the setting
790 * of cy_flags appear atomic to softints).
792 cyclic->cy_flags = CYF_FREE;
794 for (i = 0; i < nelems; i++) {
800 panic("attempt to remove non-existent cyclic");
802 cpu->cyp_nelems = --nelems;
806 * If we just removed the last element, then we need to
807 * disable the backend on this CPU.
809 be->cyb_disable(bar);
814 * If we just removed the last element of the heap, then
815 * we don't have to downheap.
821 * Swap the last element of the heap with the one we want to
822 * remove, and downheap (this has the implicit effect of putting
823 * the newly freed element on the free list).
825 heap[i] = (last = heap[nelems]);
829 cyclic_downheap(cpu, 0);
831 if (cyclic_upheap(cpu, i) == 0) {
833 * The upheap didn't propagate to the root; if it
834 * didn't propagate at all, we need to downheap.
836 if (heap[i] == last) {
837 cyclic_downheap(cpu, i);
844 * We're here because we changed the root; we need to reprogram
847 cyclic = &cpu->cyp_cyclics[heap[0]];
850 be->cyb_reprogram(bar, cyclic->cy_expire);
852 mtx_unlock_spin(&cpu->cyp_mtx);
856 cyclic_remove_here(cyc_cpu_t *cpu, cyc_index_t ndx, cyc_time_t *when, int wait)
858 cyc_backend_t *be = cpu->cyp_backend;
861 ASSERT(MUTEX_HELD(&cpu_lock));
862 ASSERT(wait == CY_WAIT || wait == CY_NOWAIT);
869 be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu,
870 (cyc_func_t)cyclic_remove_xcall, &arg);
876 cyclic_configure(cpu_t *c)
878 cyc_cpu_t *cpu = malloc(sizeof(cyc_cpu_t), M_CYCLIC, M_ZERO | M_WAITOK);
879 cyc_backend_t *nbe = malloc(sizeof(cyc_backend_t), M_CYCLIC, M_ZERO | M_WAITOK);
881 ASSERT(MUTEX_HELD(&cpu_lock));
883 if (cyclic_id_cache == NULL)
884 cyclic_id_cache = kmem_cache_create("cyclic_id_cache",
885 sizeof (cyc_id_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
890 cpu->cyp_heap = malloc(sizeof(cyc_index_t), M_CYCLIC, M_ZERO | M_WAITOK);
891 cpu->cyp_cyclics = malloc(sizeof(cyclic_t), M_CYCLIC, M_ZERO | M_WAITOK);
892 cpu->cyp_cyclics->cy_flags = CYF_FREE;
894 mtx_init(&cpu->cyp_mtx, "cyclic cpu", NULL, MTX_SPIN);
897 * Setup the backend for this CPU.
899 bcopy(&cyclic_backend, nbe, sizeof (cyc_backend_t));
900 if (nbe->cyb_configure != NULL)
901 nbe->cyb_arg = nbe->cyb_configure(c);
902 cpu->cyp_backend = nbe;
905 * On platforms where stray interrupts may be taken during startup,
906 * the CPU's cpu_cyclic pointer serves as an indicator that the
907 * cyclic subsystem for this CPU is prepared to field interrupts.
915 cyclic_unconfigure(cpu_t *c)
917 cyc_cpu_t *cpu = c->cpu_cyclic;
918 cyc_backend_t *be = cpu->cyp_backend;
919 cyb_arg_t bar = be->cyb_arg;
921 ASSERT(MUTEX_HELD(&cpu_lock));
923 c->cpu_cyclic = NULL;
926 * Let the backend know that the CPU is being yanked, and free up
927 * the backend structure.
929 if (be->cyb_unconfigure != NULL)
930 be->cyb_unconfigure(bar);
932 cpu->cyp_backend = NULL;
934 mtx_destroy(&cpu->cyp_mtx);
936 /* Finally, clean up our remaining dynamic structures. */
937 free(cpu->cyp_cyclics, M_CYCLIC);
938 free(cpu->cyp_heap, M_CYCLIC);
943 cyclic_omni_start(cyc_id_t *idp, cyc_cpu_t *cpu)
945 cyc_omni_handler_t *omni = &idp->cyi_omni_hdlr;
946 cyc_omni_cpu_t *ocpu = malloc(sizeof(cyc_omni_cpu_t), M_CYCLIC , M_WAITOK);
950 ASSERT(MUTEX_HELD(&cpu_lock));
951 ASSERT(idp->cyi_cpu == NULL);
953 hdlr.cyh_func = NULL;
957 when.cyt_interval = 0;
959 omni->cyo_online(omni->cyo_arg, cpu->cyp_cpu, &hdlr, &when);
961 ASSERT(hdlr.cyh_func != NULL);
962 ASSERT(when.cyt_when >= 0 && when.cyt_interval > 0);
965 ocpu->cyo_arg = hdlr.cyh_arg;
966 ocpu->cyo_ndx = cyclic_add_here(cpu, &hdlr, &when, 0);
967 ocpu->cyo_next = idp->cyi_omni_list;
968 idp->cyi_omni_list = ocpu;
972 cyclic_omni_stop(cyc_id_t *idp, cyc_cpu_t *cpu)
974 cyc_omni_handler_t *omni = &idp->cyi_omni_hdlr;
975 cyc_omni_cpu_t *ocpu = idp->cyi_omni_list, *prev = NULL;
977 ASSERT(MUTEX_HELD(&cpu_lock));
978 ASSERT(idp->cyi_cpu == NULL);
979 ASSERT(ocpu != NULL);
981 while (ocpu != NULL && ocpu->cyo_cpu != cpu) {
983 ocpu = ocpu->cyo_next;
987 * We _must_ have found an cyc_omni_cpu which corresponds to this
988 * CPU -- the definition of an omnipresent cyclic is that it runs
989 * on all online CPUs.
991 ASSERT(ocpu != NULL);
994 idp->cyi_omni_list = ocpu->cyo_next;
996 prev->cyo_next = ocpu->cyo_next;
999 (void) cyclic_remove_here(ocpu->cyo_cpu, ocpu->cyo_ndx, NULL, CY_WAIT);
1002 * The cyclic has been removed from this CPU; time to call the
1003 * omnipresent offline handler.
1005 if (omni->cyo_offline != NULL)
1006 omni->cyo_offline(omni->cyo_arg, cpu->cyp_cpu, ocpu->cyo_arg);
1008 free(ocpu, M_CYCLIC);
1016 ASSERT(MUTEX_HELD(&cpu_lock));
1018 idp = kmem_cache_alloc(cyclic_id_cache, KM_SLEEP);
1021 * The cyi_cpu field of the cyc_id_t structure tracks the CPU
1022 * associated with the cyclic. If and only if this field is NULL, the
1023 * cyc_id_t is an omnipresent cyclic. Note that cyi_omni_list may be
1024 * NULL for an omnipresent cyclic while the cyclic is being created
1027 idp->cyi_cpu = NULL;
1030 idp->cyi_next = cyclic_id_head;
1031 idp->cyi_prev = NULL;
1032 idp->cyi_omni_list = NULL;
1034 if (cyclic_id_head != NULL) {
1035 ASSERT(cyclic_id_head->cyi_prev == NULL);
1036 cyclic_id_head->cyi_prev = idp;
1039 cyclic_id_head = idp;
1045 * cyclic_id_t cyclic_add(cyc_handler_t *, cyc_time_t *)
1049 * cyclic_add() will create an unbound cyclic with the specified handler and
1050 * interval. The cyclic will run on a CPU which both has interrupts enabled
1051 * and is in the system CPU partition.
1053 * Arguments and notes
1055 * As its first argument, cyclic_add() takes a cyc_handler, which has the
1056 * following members:
1058 * cyc_func_t cyh_func <-- Cyclic handler
1059 * void *cyh_arg <-- Argument to cyclic handler
1061 * In addition to a cyc_handler, cyclic_add() takes a cyc_time, which
1062 * has the following members:
1064 * hrtime_t cyt_when <-- Absolute time, in nanoseconds since boot, at
1065 * which to start firing
1066 * hrtime_t cyt_interval <-- Length of interval, in nanoseconds
1068 * gethrtime() is the time source for nanoseconds since boot. If cyt_when
1069 * is set to 0, the cyclic will start to fire when cyt_interval next
1070 * divides the number of nanoseconds since boot.
1072 * The cyt_interval field _must_ be filled in by the caller; one-shots are
1073 * _not_ explicitly supported by the cyclic subsystem (cyclic_add() will
1074 * assert that cyt_interval is non-zero). The maximum value for either
1075 * field is INT64_MAX; the caller is responsible for assuring that
1076 * cyt_when + cyt_interval <= INT64_MAX. Neither field may be negative.
1078 * For an arbitrary time t in the future, the cyclic handler is guaranteed
1079 * to have been called (t - cyt_when) / cyt_interval times. This will
1080 * be true even if interrupts have been disabled for periods greater than
1081 * cyt_interval nanoseconds. In order to compensate for such periods,
1082 * the cyclic handler may be called a finite number of times with an
1083 * arbitrarily small interval.
1085 * The cyclic subsystem will not enforce any lower bound on the interval;
1086 * if the interval is less than the time required to process an interrupt,
1087 * the CPU will wedge. It's the responsibility of the caller to assure that
1088 * either the value of the interval is sane, or that its caller has
1089 * sufficient privilege to deny service (i.e. its caller is root).
1093 * cyclic_add() returns a cyclic_id_t, which is guaranteed to be a value
1094 * other than CYCLIC_NONE. cyclic_add() cannot fail.
1098 * cpu_lock must be held by the caller, and the caller must not be in
1099 * interrupt context. cyclic_add() will perform a KM_SLEEP kernel
1100 * memory allocation, so the usual rules (e.g. p_lock cannot be held)
1101 * apply. A cyclic may be added even in the presence of CPUs that have
1102 * not been configured with respect to the cyclic subsystem, but only
1103 * configured CPUs will be eligible to run the new cyclic.
1105 * Cyclic handler's context
1107 * Cyclic handlers will be executed in the interrupt context corresponding
1108 * to the specified level (i.e. either high, lock or low level). The
1109 * usual context rules apply.
1111 * A cyclic handler may not grab ANY locks held by the caller of any of
1112 * cyclic_add() or cyclic_remove(); the implementation of these functions
1113 * may require blocking on cyclic handler completion.
1114 * Moreover, cyclic handlers may not make any call back into the cyclic
1118 cyclic_add(cyc_handler_t *hdlr, cyc_time_t *when)
1120 cyc_id_t *idp = cyclic_new_id();
1121 solaris_cpu_t *c = &solaris_cpu[curcpu];
1123 ASSERT(MUTEX_HELD(&cpu_lock));
1124 ASSERT(when->cyt_when >= 0 && when->cyt_interval > 0);
1126 idp->cyi_cpu = c->cpu_cyclic;
1127 idp->cyi_ndx = cyclic_add_here(idp->cyi_cpu, hdlr, when, 0);
1129 return ((uintptr_t)idp);
1133 * cyclic_id_t cyclic_add_omni(cyc_omni_handler_t *)
1137 * cyclic_add_omni() will create an omnipresent cyclic with the specified
1138 * online and offline handlers. Omnipresent cyclics run on all online
1139 * CPUs, including CPUs which have unbound interrupts disabled.
1143 * As its only argument, cyclic_add_omni() takes a cyc_omni_handler, which
1144 * has the following members:
1146 * void (*cyo_online)() <-- Online handler
1147 * void (*cyo_offline)() <-- Offline handler
1148 * void *cyo_arg <-- Argument to be passed to on/offline handlers
1152 * The cyo_online member is a pointer to a function which has the following
1155 * void * <-- Argument (cyo_arg)
1156 * cpu_t * <-- Pointer to CPU about to be onlined
1157 * cyc_handler_t * <-- Pointer to cyc_handler_t; must be filled in
1158 * by omni online handler
1159 * cyc_time_t * <-- Pointer to cyc_time_t; must be filled in by
1160 * omni online handler
1162 * The omni cyclic online handler is always called _before_ the omni
1163 * cyclic begins to fire on the specified CPU. As the above argument
1164 * description implies, the online handler must fill in the two structures
1165 * passed to it: the cyc_handler_t and the cyc_time_t. These are the
1166 * same two structures passed to cyclic_add(), outlined above. This
1167 * allows the omni cyclic to have maximum flexibility; different CPUs may
1170 * (a) have different intervals
1171 * (b) be explicitly in or out of phase with one another
1172 * (c) have different handlers
1173 * (d) have different handler arguments
1174 * (e) fire at different levels
1176 * Of these, (e) seems somewhat dubious, but is nonetheless allowed.
1178 * The omni online handler is called in the same context as cyclic_add(),
1179 * and has the same liberties: omni online handlers may perform KM_SLEEP
1180 * kernel memory allocations, and may grab locks which are also acquired
1181 * by cyclic handlers. However, omni cyclic online handlers may _not_
1182 * call back into the cyclic subsystem, and should be generally careful
1183 * about calling into arbitrary kernel subsystems.
1187 * The cyo_offline member is a pointer to a function which has the following
1190 * void * <-- Argument (cyo_arg)
1191 * cpu_t * <-- Pointer to CPU about to be offlined
1192 * void * <-- CPU's cyclic argument (that is, value
1193 * to which cyh_arg member of the cyc_handler_t
1194 * was set in the omni online handler)
1196 * The omni cyclic offline handler is always called _after_ the omni
1197 * cyclic has ceased firing on the specified CPU. Its purpose is to
1198 * allow cleanup of any resources dynamically allocated in the omni cyclic
1199 * online handler. The context of the offline handler is identical to
1200 * that of the online handler; the same constraints and liberties apply.
1202 * The offline handler is optional; it may be NULL.
1206 * cyclic_add_omni() returns a cyclic_id_t, which is guaranteed to be a
1207 * value other than CYCLIC_NONE. cyclic_add_omni() cannot fail.
1211 * The caller's context is identical to that of cyclic_add(), specified
1215 cyclic_add_omni(cyc_omni_handler_t *omni)
1217 cyc_id_t *idp = cyclic_new_id();
1222 ASSERT(MUTEX_HELD(&cpu_lock));
1223 ASSERT(omni != NULL && omni->cyo_online != NULL);
1225 idp->cyi_omni_hdlr = *omni;
1228 c = &solaris_cpu[i];
1229 if ((cpu = c->cpu_cyclic) == NULL)
1231 cyclic_omni_start(idp, cpu);
1235 * We must have found at least one online CPU on which to run
1238 ASSERT(idp->cyi_omni_list != NULL);
1239 ASSERT(idp->cyi_cpu == NULL);
1241 return ((uintptr_t)idp);
1245 * void cyclic_remove(cyclic_id_t)
1249 * cyclic_remove() will remove the specified cyclic from the system.
1251 * Arguments and notes
1253 * The only argument is a cyclic_id returned from either cyclic_add() or
1254 * cyclic_add_omni().
1256 * By the time cyclic_remove() returns, the caller is guaranteed that the
1257 * removed cyclic handler has completed execution (this is the same
1258 * semantic that untimeout() provides). As a result, cyclic_remove() may
1259 * need to block, waiting for the removed cyclic to complete execution.
1260 * This leads to an important constraint on the caller: no lock may be
1261 * held across cyclic_remove() that also may be acquired by a cyclic
1266 * None; cyclic_remove() always succeeds.
1270 * cpu_lock must be held by the caller, and the caller must not be in
1271 * interrupt context. The caller may not hold any locks which are also
1272 * grabbed by any cyclic handler. See "Arguments and notes", above.
1275 cyclic_remove(cyclic_id_t id)
1277 cyc_id_t *idp = (cyc_id_t *)id;
1278 cyc_id_t *prev = idp->cyi_prev, *next = idp->cyi_next;
1279 cyc_cpu_t *cpu = idp->cyi_cpu;
1281 ASSERT(MUTEX_HELD(&cpu_lock));
1284 (void) cyclic_remove_here(cpu, idp->cyi_ndx, NULL, CY_WAIT);
1286 ASSERT(idp->cyi_omni_list != NULL);
1287 while (idp->cyi_omni_list != NULL)
1288 cyclic_omni_stop(idp, idp->cyi_omni_list->cyo_cpu);
1292 ASSERT(cyclic_id_head != idp);
1293 prev->cyi_next = next;
1295 ASSERT(cyclic_id_head == idp);
1296 cyclic_id_head = next;
1300 next->cyi_prev = prev;
1302 kmem_cache_free(cyclic_id_cache, idp);
1306 cyclic_init(cyc_backend_t *be)
1308 ASSERT(MUTEX_HELD(&cpu_lock));
1311 * Copy the passed cyc_backend into the backend template. This must
1312 * be done before the CPU can be configured.
1314 bcopy(be, &cyclic_backend, sizeof (cyc_backend_t));
1316 cyclic_configure(&solaris_cpu[curcpu]);
1320 * It is assumed that cyclic_mp_init() is called some time after cyclic
1321 * init (and therefore, after cpu0 has been initialized). We grab cpu_lock,
1322 * find the already initialized CPU, and initialize every other CPU with the
1326 cyclic_mp_init(void)
1331 mutex_enter(&cpu_lock);
1334 c = &solaris_cpu[i];
1335 if (c->cpu_cyclic == NULL)
1336 cyclic_configure(c);
1339 mutex_exit(&cpu_lock);
1349 c = &solaris_cpu[id];
1350 if (c->cpu_cyclic == NULL)
1352 cyclic_unconfigure(c);
1355 if (cyclic_id_cache != NULL)
1356 kmem_cache_destroy(cyclic_id_cache);
1359 #include "cyclic_machdep.c"
1362 * Cyclic subsystem initialisation.
1365 cyclic_load(void *dummy)
1367 mutex_enter(&cpu_lock);
1369 /* Initialise the machine-dependent backend. */
1370 cyclic_machdep_init();
1372 mutex_exit(&cpu_lock);
1375 SYSINIT(cyclic_register, SI_SUB_CYCLIC, SI_ORDER_SECOND, cyclic_load, NULL);
1380 mutex_enter(&cpu_lock);
1382 /* Uninitialise the machine-dependent backend. */
1383 cyclic_machdep_uninit();
1385 mutex_exit(&cpu_lock);
1388 SYSUNINIT(cyclic_unregister, SI_SUB_CYCLIC, SI_ORDER_SECOND, cyclic_unload, NULL);
1392 cyclic_modevent(module_t mod __unused, int type, void *data __unused)
1414 DEV_MODULE(cyclic, cyclic_modevent, NULL);
1415 MODULE_VERSION(cyclic, 1);
1416 MODULE_DEPEND(cyclic, opensolaris, 1, 1, 1);