4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Portions Copyright 2008 John Birrell <jb@freebsd.org>
26 * This is a simplified version of the cyclic timer subsystem from
27 * OpenSolaris. In the FreeBSD version, we don't use interrupt levels.
31 * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
32 * Use is subject to license terms.
36 * The Cyclic Subsystem
37 * --------------------
41 * Historically, most computer architectures have specified interval-based
42 * timer parts (e.g. SPARCstation's counter/timer; Intel's i8254). While
43 * these parts deal in relative (i.e. not absolute) time values, they are
44 * typically used by the operating system to implement the abstraction of
45 * absolute time. As a result, these parts cannot typically be reprogrammed
46 * without introducing error in the system's notion of time.
48 * Starting in about 1994, chip architectures began specifying high resolution
49 * timestamp registers. As of this writing (1999), all major chip families
50 * (UltraSPARC, PentiumPro, MIPS, PowerPC, Alpha) have high resolution
51 * timestamp registers, and two (UltraSPARC and MIPS) have added the capacity
52 * to interrupt based on timestamp values. These timestamp-compare registers
53 * present a time-based interrupt source which can be reprogrammed arbitrarily
54 * often without introducing error. Given the low cost of implementing such a
55 * timestamp-compare register (and the tangible benefit of eliminating
56 * discrete timer parts), it is reasonable to expect that future chip
57 * architectures will adopt this feature.
59 * The cyclic subsystem has been designed to take advantage of chip
60 * architectures with the capacity to interrupt based on absolute, high
61 * resolution values of time.
65 * The cyclic subsystem is a low-level kernel subsystem designed to provide
66 * arbitrarily high resolution, per-CPU interval timers (to avoid colliding
67 * with existing terms, we dub such an interval timer a "cyclic").
68 * Alternatively, a cyclic may be specified to be "omnipresent", denoting
69 * firing on all online CPUs.
71 * Cyclic Subsystem Interface Overview
72 * -----------------------------------
74 * The cyclic subsystem has interfaces with the kernel at-large, with other
75 * kernel subsystems (e.g. the processor management subsystem, the checkpoint
76 * resume subsystem) and with the platform (the cyclic backend). Each
77 * of these interfaces is given a brief synopsis here, and is described
78 * in full above the interface's implementation.
80 * The following diagram displays the cyclic subsystem's interfaces to
81 * other kernel components. The arrows denote a "calls" relationship, with
82 * the large arrow indicating the cyclic subsystem's consumer interface.
83 * Each arrow is labeled with the section in which the corresponding
84 * interface is described.
86 * Kernel at-large consumers
87 * -----------++------------
93 * +---------------------+
95 * | Cyclic subsystem |<----------- Other kernel subsystems
97 * +---------------------+
102 * +---------------------+
105 * | (platform specific) |
107 * +---------------------+
110 * Kernel At-Large Interfaces
112 * cyclic_add() <-- Creates a cyclic
113 * cyclic_add_omni() <-- Creates an omnipresent cyclic
114 * cyclic_remove() <-- Removes a cyclic
118 * cyclic_init() <-- Initializes the cyclic subsystem
119 * cyclic_fire() <-- Interrupt entry point
121 * The backend-supplied interfaces (through the cyc_backend structure) are
122 * documented in detail in <sys/cyclic_impl.h>
125 * Cyclic Subsystem Implementation Overview
126 * ----------------------------------------
128 * The cyclic subsystem is designed to minimize interference between cyclics
129 * on different CPUs. Thus, all of the cyclic subsystem's data structures
130 * hang off of a per-CPU structure, cyc_cpu.
132 * Each cyc_cpu has a power-of-two sized array of cyclic structures (the
133 * cyp_cyclics member of the cyc_cpu structure). If cyclic_add() is called
134 * and there does not exist a free slot in the cyp_cyclics array, the size of
135 * the array will be doubled. The array will never shrink. Cyclics are
136 * referred to by their index in the cyp_cyclics array, which is of type
139 * The cyclics are kept sorted by expiration time in the cyc_cpu's heap. The
140 * heap is keyed by cyclic expiration time, with parents expiring earlier
141 * than their children.
145 * The heap is managed primarily by cyclic_fire(). Upon entry, cyclic_fire()
146 * compares the root cyclic's expiration time to the current time. If the
147 * expiration time is in the past, cyclic_expire() is called on the root
148 * cyclic. Upon return from cyclic_expire(), the cyclic's new expiration time
149 * is derived by adding its interval to its old expiration time, and a
150 * downheap operation is performed. After the downheap, cyclic_fire()
151 * examines the (potentially changed) root cyclic, repeating the
152 * cyclic_expire()/add interval/cyclic_downheap() sequence until the root
153 * cyclic has an expiration time in the future. This expiration time
154 * (guaranteed to be the earliest in the heap) is then communicated to the
155 * backend via cyb_reprogram. Optimal backends will next call cyclic_fire()
156 * shortly after the root cyclic's expiration time.
158 * To allow efficient, deterministic downheap operations, we implement the
159 * heap as an array (the cyp_heap member of the cyc_cpu structure), with each
160 * element containing an index into the CPU's cyp_cyclics array.
162 * The heap is laid out in the array according to the following:
164 * 1. The root of the heap is always in the 0th element of the heap array
165 * 2. The left and right children of the nth element are element
166 * (((n + 1) << 1) - 1) and element ((n + 1) << 1), respectively.
168 * This layout is standard (see, e.g., Cormen's "Algorithms"); the proof
169 * that these constraints correctly lay out a heap (or indeed, any binary
170 * tree) is trivial and left to the reader.
172 * To see the heap by example, assume our cyclics array has the following
173 * members (at time t):
175 * cy_handler cy_expire
176 * ---------------------------------------------
177 * [ 0] clock() t+10000000
178 * [ 1] deadman() t+1000000000
179 * [ 2] clock_highres_fire() t+100
180 * [ 3] clock_highres_fire() t+1000
181 * [ 4] clock_highres_fire() t+500
186 * The heap array could be:
188 * [0] [1] [2] [3] [4] [5] [6] [7]
189 * +-----+-----+-----+-----+-----+-----+-----+-----+
191 * | 2 | 3 | 4 | 0 | 1 | x | x | x |
193 * +-----+-----+-----+-----+-----+-----+-----+-----+
195 * Graphically, this array corresponds to the following (excuse the ASCII art):
199 * +------------------+------------------+
202 * +---------+--------+
205 * Note that the heap is laid out by layer: all nodes at a given depth are
206 * stored in consecutive elements of the array. Moreover, layers of
207 * consecutive depths are in adjacent element ranges. This property
208 * guarantees high locality of reference during downheap operations.
209 * Specifically, we are guaranteed that we can downheap to a depth of
211 * lg (cache_line_size / sizeof (cyc_index_t))
213 * nodes with at most one cache miss. On UltraSPARC (64 byte e-cache line
214 * size), this corresponds to a depth of four nodes. Thus, if there are
215 * fewer than sixteen cyclics in the heap, downheaps on UltraSPARC miss at
216 * most once in the e-cache.
218 * Downheaps are required to compare siblings as they proceed down the
219 * heap. For downheaps proceeding beyond the one-cache-miss depth, every
220 * access to a left child could potentially miss in the cache. However,
223 * (cache_line_size / sizeof (cyc_index_t)) > 2,
225 * then all siblings are guaranteed to be on the same cache line. Thus, the
226 * miss on the left child will guarantee a hit on the right child; downheaps
227 * will incur at most one cache miss per layer beyond the one-cache-miss
228 * depth. The total number of cache misses for heap management during a
229 * downheap operation is thus bounded by
231 * lg (n) - lg (cache_line_size / sizeof (cyc_index_t))
233 * Traditional pointer-based heaps are implemented without regard to
234 * locality. Downheaps can thus incur two cache misses per layer (one for
235 * each child), but at most one cache miss at the root. This yields a bound
240 * on the total cache misses.
242 * This difference may seem theoretically trivial (the difference is, after
243 * all, constant), but can become substantial in practice -- especially for
244 * caches with very large cache lines and high miss penalties (e.g. TLBs).
246 * Heaps must always be full, balanced trees. Heap management must therefore
247 * track the next point-of-insertion into the heap. In pointer-based heaps,
248 * recomputing this point takes O(lg (n)). Given the layout of the
249 * array-based implementation, however, the next point-of-insertion is
252 * heap[number_of_elements]
254 * We exploit this property by implementing the free-list in the usused
255 * heap elements. Heap insertion, therefore, consists only of filling in
256 * the cyclic at cyp_cyclics[cyp_heap[number_of_elements]], incrementing
257 * the number of elements, and performing an upheap. Heap deletion consists
258 * of decrementing the number of elements, swapping the to-be-deleted element
259 * with the element at cyp_heap[number_of_elements], and downheaping.
261 * Filling in more details in our earlier example:
263 * +--- free list head
267 * [0] [1] [2] [3] [4] [5] [6] [7]
268 * +-----+-----+-----+-----+-----+-----+-----+-----+
270 * | 2 | 3 | 4 | 0 | 1 | 5 | 6 | 7 |
272 * +-----+-----+-----+-----+-----+-----+-----+-----+
274 * To insert into this heap, we would just need to fill in the cyclic at
275 * cyp_cyclics[5], bump the number of elements (from 5 to 6) and perform
278 * If we wanted to remove, say, cyp_cyclics[3], we would first scan for it
279 * in the cyp_heap, and discover it at cyp_heap[1]. We would then decrement
280 * the number of elements (from 5 to 4), swap cyp_heap[1] with cyp_heap[4],
281 * and perform a downheap from cyp_heap[1]. The linear scan is required
282 * because the cyclic does not keep a backpointer into the heap. This makes
283 * heap manipulation (e.g. downheaps) faster at the expense of removal
288 * As alluded to above, cyclic_expire() is called by cyclic_fire() to expire
289 * a cyclic. Cyclic subsystem consumers are guaranteed that for an arbitrary
290 * time t in the future, their cyclic handler will have been called
291 * (t - cyt_when) / cyt_interval times. cyclic_expire() simply needs to call
296 * All of the discussion thus far has assumed a static number of cyclics.
297 * Obviously, static limitations are not practical; we need the capacity
298 * to resize our data structures dynamically.
300 * We resize our data structures lazily, and only on a per-CPU basis.
301 * The size of the data structures always doubles and never shrinks. We
302 * serialize adds (and thus resizes) on cpu_lock; we never need to deal
303 * with concurrent resizes. Resizes should be rare; they may induce jitter
304 * on the CPU being resized, but should not affect cyclic operation on other
307 * Three key cyc_cpu data structures need to be resized: the cyclics array,
308 * nad the heap array. Resizing is relatively straightforward:
310 * 1. The new, larger arrays are allocated in cyclic_expand() (called
311 * from cyclic_add()).
312 * 2. The contents of the old arrays are copied into the new arrays.
313 * 3. The old cyclics array is bzero()'d
314 * 4. The pointers are updated.
318 * Cyclic removals should be rare. To simplify the implementation (and to
319 * allow optimization for the cyclic_fire()/cyclic_expire()
320 * path), we force removals and adds to serialize on cpu_lock.
323 #include <sys/cdefs.h>
324 #include <sys/param.h>
325 #include <sys/conf.h>
326 #include <sys/kernel.h>
327 #include <sys/lock.h>
329 #include <sys/cyclic_impl.h>
330 #include <sys/module.h>
331 #include <sys/systm.h>
332 #include <sys/atomic.h>
333 #include <sys/kmem.h>
334 #include <sys/cmn_err.h>
335 #include <sys/dtrace_bsd.h>
336 #include <machine/cpu.h>
338 static kmem_cache_t *cyclic_id_cache;
339 static cyc_id_t *cyclic_id_head;
340 static cyc_backend_t cyclic_backend;
342 MALLOC_DEFINE(M_CYCLIC, "cyclic", "Cyclic timer subsystem");
345 * Returns 1 if the upheap propagated to the root, 0 if it did not. This
346 * allows the caller to reprogram the backend only when the root has been
350 cyclic_upheap(cyc_cpu_t *cpu, cyc_index_t ndx)
354 cyc_index_t heap_parent, heap_current = ndx;
355 cyc_index_t parent, current;
357 if (heap_current == 0)
360 heap = cpu->cyp_heap;
361 cyclics = cpu->cyp_cyclics;
362 heap_parent = CYC_HEAP_PARENT(heap_current);
365 current = heap[heap_current];
366 parent = heap[heap_parent];
369 * We have an expiration time later than our parent; we're
372 if (cyclics[current].cy_expire >= cyclics[parent].cy_expire)
376 * We need to swap with our parent, and continue up the heap.
378 heap[heap_parent] = current;
379 heap[heap_current] = parent;
382 * If we just reached the root, we're done.
384 if (heap_parent == 0)
387 heap_current = heap_parent;
388 heap_parent = CYC_HEAP_PARENT(heap_current);
393 cyclic_downheap(cyc_cpu_t *cpu, cyc_index_t ndx)
395 cyclic_t *cyclics = cpu->cyp_cyclics;
396 cyc_index_t *heap = cpu->cyp_heap;
398 cyc_index_t heap_left, heap_right, heap_me = ndx;
399 cyc_index_t left, right, me;
400 cyc_index_t nelems = cpu->cyp_nelems;
404 * If we don't have a left child (i.e., we're a leaf), we're
407 if ((heap_left = CYC_HEAP_LEFT(heap_me)) >= nelems)
410 left = heap[heap_left];
413 heap_right = CYC_HEAP_RIGHT(heap_me);
416 * Even if we don't have a right child, we still need to compare
417 * our expiration time against that of our left child.
419 if (heap_right >= nelems)
422 right = heap[heap_right];
425 * We have both a left and a right child. We need to compare
426 * the expiration times of the children to determine which
429 if (cyclics[right].cy_expire < cyclics[left].cy_expire) {
431 * Our right child is the earlier of our children.
432 * We'll now compare our expiration time to its; if
433 * ours is the earlier, we're done.
435 if (cyclics[me].cy_expire <= cyclics[right].cy_expire)
439 * Our right child expires earlier than we do; swap
440 * with our right child, and descend right.
442 heap[heap_right] = me;
443 heap[heap_me] = right;
444 heap_me = heap_right;
450 * Our left child is the earlier of our children (or we have
451 * no right child). We'll now compare our expiration time
452 * to its; if ours is the earlier, we're done.
454 if (cyclics[me].cy_expire <= cyclics[left].cy_expire)
458 * Our left child expires earlier than we do; swap with our
459 * left child, and descend left.
461 heap[heap_left] = me;
462 heap[heap_me] = left;
468 cyclic_expire(cyc_cpu_t *cpu, cyc_index_t ndx, cyclic_t *cyclic)
470 cyc_func_t handler = cyclic->cy_handler;
471 void *arg = cyclic->cy_arg;
477 cyclic_enable_xcall(void *v)
479 cyc_xcallarg_t *argp = v;
480 cyc_cpu_t *cpu = argp->cyx_cpu;
481 cyc_backend_t *be = cpu->cyp_backend;
483 be->cyb_enable(be->cyb_arg);
487 cyclic_enable(cyc_cpu_t *cpu)
489 cyc_backend_t *be = cpu->cyp_backend;
494 /* Cross call to the target CPU */
495 be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu, cyclic_enable_xcall, &arg);
499 cyclic_disable_xcall(void *v)
501 cyc_xcallarg_t *argp = v;
502 cyc_cpu_t *cpu = argp->cyx_cpu;
503 cyc_backend_t *be = cpu->cyp_backend;
505 be->cyb_disable(be->cyb_arg);
509 cyclic_disable(cyc_cpu_t *cpu)
511 cyc_backend_t *be = cpu->cyp_backend;
516 /* Cross call to the target CPU */
517 be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu, cyclic_disable_xcall, &arg);
521 cyclic_reprogram_xcall(void *v)
523 cyc_xcallarg_t *argp = v;
524 cyc_cpu_t *cpu = argp->cyx_cpu;
525 cyc_backend_t *be = cpu->cyp_backend;
527 be->cyb_reprogram(be->cyb_arg, argp->cyx_exp);
531 cyclic_reprogram(cyc_cpu_t *cpu, hrtime_t exp)
533 cyc_backend_t *be = cpu->cyp_backend;
539 /* Cross call to the target CPU */
540 be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu, cyclic_reprogram_xcall, &arg);
544 * cyclic_fire(cpu_t *)
548 * cyclic_fire() is the cyclic subsystem's interrupt handler.
549 * Called by the cyclic backend.
551 * Arguments and notes
553 * The only argument is the CPU on which the interrupt is executing;
554 * backends must call into cyclic_fire() on the specified CPU.
556 * cyclic_fire() may be called spuriously without ill effect. Optimal
557 * backends will call into cyclic_fire() at or shortly after the time
558 * requested via cyb_reprogram(). However, calling cyclic_fire()
559 * arbitrarily late will only manifest latency bubbles; the correctness
560 * of the cyclic subsystem does not rely on the timeliness of the backend.
562 * cyclic_fire() is wait-free; it will not block or spin.
570 cyclic_fire(cpu_t *c)
572 cyc_cpu_t *cpu = c->cpu_cyclic;
574 mtx_lock_spin(&cpu->cyp_mtx);
576 cyc_index_t *heap = cpu->cyp_heap;
577 cyclic_t *cyclic, *cyclics = cpu->cyp_cyclics;
578 hrtime_t now = gethrtime();
581 if (cpu->cyp_nelems == 0) {
582 /* This is a spurious fire. */
583 mtx_unlock_spin(&cpu->cyp_mtx);
588 cyc_index_t ndx = heap[0];
590 cyclic = &cyclics[ndx];
592 ASSERT(!(cyclic->cy_flags & CYF_FREE));
594 if ((exp = cyclic->cy_expire) > now)
597 cyclic_expire(cpu, ndx, cyclic);
600 * If this cyclic will be set to next expire in the distant
601 * past, we have one of two situations:
603 * a) This is the first firing of a cyclic which had
604 * cy_expire set to 0.
606 * b) We are tragically late for a cyclic -- most likely
607 * due to being in the debugger.
609 * In either case, we set the new expiration time to be the
610 * the next interval boundary. This assures that the
611 * expiration time modulo the interval is invariant.
613 * We arbitrarily define "distant" to be one second (one second
614 * is chosen because it's shorter than any foray to the
615 * debugger while still being longer than any legitimate
618 exp += cyclic->cy_interval;
620 if (now - exp > NANOSEC) {
621 hrtime_t interval = cyclic->cy_interval;
623 exp += ((now - exp) / interval + 1) * interval;
626 cyclic->cy_expire = exp;
627 cyclic_downheap(cpu, 0);
631 * Now we have a cyclic in the root slot which isn't in the past;
632 * reprogram the interrupt source.
634 cyclic_reprogram(cpu, exp);
636 mtx_unlock_spin(&cpu->cyp_mtx);
640 * cyclic_expand() will cross call onto the CPU to perform the actual
644 cyclic_expand(cyc_cpu_t *cpu)
646 cyc_index_t new_size, old_size, i;
647 cyc_index_t *new_heap, *old_heap;
648 cyclic_t *new_cyclics, *old_cyclics;
650 ASSERT(MUTEX_HELD(&cpu_lock));
652 if ((new_size = ((old_size = cpu->cyp_size) << 1)) == 0)
653 new_size = CY_DEFAULT_PERCPU;
656 * Check that the new_size is a power of 2.
658 ASSERT(((new_size - 1) & new_size) == 0);
660 /* Unlock the mutex while allocating memory so we can wait... */
661 mtx_unlock_spin(&cpu->cyp_mtx);
663 new_heap = malloc(sizeof(cyc_index_t) * new_size, M_CYCLIC, M_WAITOK);
664 new_cyclics = malloc(sizeof(cyclic_t) * new_size, M_CYCLIC, M_ZERO | M_WAITOK);
666 /* Grab the lock again now we've got the memory... */
667 mtx_lock_spin(&cpu->cyp_mtx);
669 /* Check if another thread beat us while the mutex was unlocked. */
670 if (old_size != cpu->cyp_size) {
671 /* Oh well, he won. */
672 mtx_unlock_spin(&cpu->cyp_mtx);
674 free(new_heap, M_CYCLIC);
675 free(new_cyclics, M_CYCLIC);
677 mtx_lock_spin(&cpu->cyp_mtx);
681 old_heap = cpu->cyp_heap;
682 old_cyclics = cpu->cyp_cyclics;
684 bcopy(cpu->cyp_heap, new_heap, sizeof (cyc_index_t) * old_size);
685 bcopy(old_cyclics, new_cyclics, sizeof (cyclic_t) * old_size);
688 * Set up the free list, and set all of the new cyclics to be CYF_FREE.
690 for (i = old_size; i < new_size; i++) {
692 new_cyclics[i].cy_flags = CYF_FREE;
696 * We can go ahead and plow the value of cyp_heap and cyp_cyclics;
697 * cyclic_expand() has kept a copy.
699 cpu->cyp_heap = new_heap;
700 cpu->cyp_cyclics = new_cyclics;
701 cpu->cyp_size = new_size;
703 if (old_cyclics != NULL) {
704 ASSERT(old_heap != NULL);
705 ASSERT(old_size != 0);
706 mtx_unlock_spin(&cpu->cyp_mtx);
708 free(old_cyclics, M_CYCLIC);
709 free(old_heap, M_CYCLIC);
711 mtx_lock_spin(&cpu->cyp_mtx);
716 cyclic_add_here(cyc_cpu_t *cpu, cyc_handler_t *hdlr,
717 cyc_time_t *when, uint16_t flags)
719 cyc_index_t ndx, nelems;
722 ASSERT(MUTEX_HELD(&cpu_lock));
724 mtx_lock_spin(&cpu->cyp_mtx);
726 ASSERT(!(cpu->cyp_cpu->cpu_flags & CPU_OFFLINE));
727 ASSERT(when->cyt_when >= 0 && when->cyt_interval > 0);
729 while (cpu->cyp_nelems == cpu->cyp_size)
732 ASSERT(cpu->cyp_nelems < cpu->cyp_size);
734 nelems = cpu->cyp_nelems++;
738 * If this is the first element, we need to enable the
739 * backend on this CPU.
743 ndx = cpu->cyp_heap[nelems];
744 cyclic = &cpu->cyp_cyclics[ndx];
746 ASSERT(cyclic->cy_flags == CYF_FREE);
747 cyclic->cy_interval = when->cyt_interval;
749 if (when->cyt_when == 0)
750 cyclic->cy_expire = gethrtime() + cyclic->cy_interval;
752 cyclic->cy_expire = when->cyt_when;
754 cyclic->cy_handler = hdlr->cyh_func;
755 cyclic->cy_arg = hdlr->cyh_arg;
756 cyclic->cy_flags = flags;
758 if (cyclic_upheap(cpu, nelems)) {
759 hrtime_t exp = cyclic->cy_expire;
762 * If our upheap propagated to the root, we need to
763 * reprogram the interrupt source.
765 cyclic_reprogram(cpu, exp);
768 mtx_unlock_spin(&cpu->cyp_mtx);
775 cyclic_remove_here(cyc_cpu_t *cpu, cyc_index_t ndx, cyc_time_t *when, int wait)
777 cyc_index_t nelems, i;
779 cyc_index_t *heap, last;
781 ASSERT(MUTEX_HELD(&cpu_lock));
782 ASSERT(wait == CY_WAIT || wait == CY_NOWAIT);
784 mtx_lock_spin(&cpu->cyp_mtx);
786 heap = cpu->cyp_heap;
788 nelems = cpu->cyp_nelems;
790 cyclic = &cpu->cyp_cyclics[ndx];
793 * Grab the current expiration time. If this cyclic is being
794 * removed as part of a juggling operation, the expiration time
795 * will be used when the cyclic is added to the new CPU.
798 when->cyt_when = cyclic->cy_expire;
799 when->cyt_interval = cyclic->cy_interval;
802 cyclic->cy_flags = CYF_FREE;
804 for (i = 0; i < nelems; i++) {
810 panic("attempt to remove non-existent cyclic");
812 cpu->cyp_nelems = --nelems;
816 * If we just removed the last element, then we need to
817 * disable the backend on this CPU.
823 * If we just removed the last element of the heap, then
824 * we don't have to downheap.
829 * Swap the last element of the heap with the one we want to
830 * remove, and downheap (this has the implicit effect of putting
831 * the newly freed element on the free list).
833 heap[i] = (last = heap[nelems]);
837 cyclic_downheap(cpu, 0);
839 if (cyclic_upheap(cpu, i) == 0) {
841 * The upheap didn't propagate to the root; if it
842 * didn't propagate at all, we need to downheap.
845 cyclic_downheap(cpu, i);
851 * We're here because we changed the root; we need to reprogram
854 cyclic = &cpu->cyp_cyclics[heap[0]];
857 cyclic_reprogram(cpu, cyclic->cy_expire);
860 mtx_unlock_spin(&cpu->cyp_mtx);
866 cyclic_configure(cpu_t *c)
868 cyc_cpu_t *cpu = malloc(sizeof(cyc_cpu_t), M_CYCLIC, M_ZERO | M_WAITOK);
869 cyc_backend_t *nbe = malloc(sizeof(cyc_backend_t), M_CYCLIC, M_ZERO | M_WAITOK);
871 ASSERT(MUTEX_HELD(&cpu_lock));
873 if (cyclic_id_cache == NULL)
874 cyclic_id_cache = kmem_cache_create("cyclic_id_cache",
875 sizeof (cyc_id_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
880 cpu->cyp_heap = malloc(sizeof(cyc_index_t), M_CYCLIC, M_ZERO | M_WAITOK);
881 cpu->cyp_cyclics = malloc(sizeof(cyclic_t), M_CYCLIC, M_ZERO | M_WAITOK);
882 cpu->cyp_cyclics->cy_flags = CYF_FREE;
884 mtx_init(&cpu->cyp_mtx, "cyclic cpu", NULL, MTX_SPIN);
887 * Setup the backend for this CPU.
889 bcopy(&cyclic_backend, nbe, sizeof (cyc_backend_t));
890 if (nbe->cyb_configure != NULL)
891 nbe->cyb_arg = nbe->cyb_configure(c);
892 cpu->cyp_backend = nbe;
895 * On platforms where stray interrupts may be taken during startup,
896 * the CPU's cpu_cyclic pointer serves as an indicator that the
897 * cyclic subsystem for this CPU is prepared to field interrupts.
905 cyclic_unconfigure(cpu_t *c)
907 cyc_cpu_t *cpu = c->cpu_cyclic;
908 cyc_backend_t *be = cpu->cyp_backend;
909 cyb_arg_t bar = be->cyb_arg;
911 ASSERT(MUTEX_HELD(&cpu_lock));
913 c->cpu_cyclic = NULL;
916 * Let the backend know that the CPU is being yanked, and free up
917 * the backend structure.
919 if (be->cyb_unconfigure != NULL)
920 be->cyb_unconfigure(bar);
922 cpu->cyp_backend = NULL;
924 mtx_destroy(&cpu->cyp_mtx);
926 /* Finally, clean up our remaining dynamic structures. */
927 free(cpu->cyp_cyclics, M_CYCLIC);
928 free(cpu->cyp_heap, M_CYCLIC);
933 cyclic_omni_start(cyc_id_t *idp, cyc_cpu_t *cpu)
935 cyc_omni_handler_t *omni = &idp->cyi_omni_hdlr;
936 cyc_omni_cpu_t *ocpu = malloc(sizeof(cyc_omni_cpu_t), M_CYCLIC , M_WAITOK);
940 ASSERT(MUTEX_HELD(&cpu_lock));
941 ASSERT(idp->cyi_cpu == NULL);
943 hdlr.cyh_func = NULL;
947 when.cyt_interval = 0;
949 omni->cyo_online(omni->cyo_arg, cpu->cyp_cpu, &hdlr, &when);
951 ASSERT(hdlr.cyh_func != NULL);
952 ASSERT(when.cyt_when >= 0 && when.cyt_interval > 0);
955 ocpu->cyo_arg = hdlr.cyh_arg;
956 ocpu->cyo_ndx = cyclic_add_here(cpu, &hdlr, &when, 0);
957 ocpu->cyo_next = idp->cyi_omni_list;
958 idp->cyi_omni_list = ocpu;
962 cyclic_omni_stop(cyc_id_t *idp, cyc_cpu_t *cpu)
964 cyc_omni_handler_t *omni = &idp->cyi_omni_hdlr;
965 cyc_omni_cpu_t *ocpu = idp->cyi_omni_list, *prev = NULL;
967 ASSERT(MUTEX_HELD(&cpu_lock));
968 ASSERT(idp->cyi_cpu == NULL);
969 ASSERT(ocpu != NULL);
971 while (ocpu != NULL && ocpu->cyo_cpu != cpu) {
973 ocpu = ocpu->cyo_next;
977 * We _must_ have found an cyc_omni_cpu which corresponds to this
978 * CPU -- the definition of an omnipresent cyclic is that it runs
979 * on all online CPUs.
981 ASSERT(ocpu != NULL);
984 idp->cyi_omni_list = ocpu->cyo_next;
986 prev->cyo_next = ocpu->cyo_next;
989 (void) cyclic_remove_here(ocpu->cyo_cpu, ocpu->cyo_ndx, NULL, CY_WAIT);
992 * The cyclic has been removed from this CPU; time to call the
993 * omnipresent offline handler.
995 if (omni->cyo_offline != NULL)
996 omni->cyo_offline(omni->cyo_arg, cpu->cyp_cpu, ocpu->cyo_arg);
998 free(ocpu, M_CYCLIC);
1006 ASSERT(MUTEX_HELD(&cpu_lock));
1008 idp = kmem_cache_alloc(cyclic_id_cache, KM_SLEEP);
1011 * The cyi_cpu field of the cyc_id_t structure tracks the CPU
1012 * associated with the cyclic. If and only if this field is NULL, the
1013 * cyc_id_t is an omnipresent cyclic. Note that cyi_omni_list may be
1014 * NULL for an omnipresent cyclic while the cyclic is being created
1017 idp->cyi_cpu = NULL;
1020 idp->cyi_next = cyclic_id_head;
1021 idp->cyi_prev = NULL;
1022 idp->cyi_omni_list = NULL;
1024 if (cyclic_id_head != NULL) {
1025 ASSERT(cyclic_id_head->cyi_prev == NULL);
1026 cyclic_id_head->cyi_prev = idp;
1029 cyclic_id_head = idp;
1035 * cyclic_id_t cyclic_add(cyc_handler_t *, cyc_time_t *)
1039 * cyclic_add() will create an unbound cyclic with the specified handler and
1040 * interval. The cyclic will run on a CPU which both has interrupts enabled
1041 * and is in the system CPU partition.
1043 * Arguments and notes
1045 * As its first argument, cyclic_add() takes a cyc_handler, which has the
1046 * following members:
1048 * cyc_func_t cyh_func <-- Cyclic handler
1049 * void *cyh_arg <-- Argument to cyclic handler
1051 * In addition to a cyc_handler, cyclic_add() takes a cyc_time, which
1052 * has the following members:
1054 * hrtime_t cyt_when <-- Absolute time, in nanoseconds since boot, at
1055 * which to start firing
1056 * hrtime_t cyt_interval <-- Length of interval, in nanoseconds
1058 * gethrtime() is the time source for nanoseconds since boot. If cyt_when
1059 * is set to 0, the cyclic will start to fire when cyt_interval next
1060 * divides the number of nanoseconds since boot.
1062 * The cyt_interval field _must_ be filled in by the caller; one-shots are
1063 * _not_ explicitly supported by the cyclic subsystem (cyclic_add() will
1064 * assert that cyt_interval is non-zero). The maximum value for either
1065 * field is INT64_MAX; the caller is responsible for assuring that
1066 * cyt_when + cyt_interval <= INT64_MAX. Neither field may be negative.
1068 * For an arbitrary time t in the future, the cyclic handler is guaranteed
1069 * to have been called (t - cyt_when) / cyt_interval times. This will
1070 * be true even if interrupts have been disabled for periods greater than
1071 * cyt_interval nanoseconds. In order to compensate for such periods,
1072 * the cyclic handler may be called a finite number of times with an
1073 * arbitrarily small interval.
1075 * The cyclic subsystem will not enforce any lower bound on the interval;
1076 * if the interval is less than the time required to process an interrupt,
1077 * the CPU will wedge. It's the responsibility of the caller to assure that
1078 * either the value of the interval is sane, or that its caller has
1079 * sufficient privilege to deny service (i.e. its caller is root).
1083 * cyclic_add() returns a cyclic_id_t, which is guaranteed to be a value
1084 * other than CYCLIC_NONE. cyclic_add() cannot fail.
1088 * cpu_lock must be held by the caller, and the caller must not be in
1089 * interrupt context. cyclic_add() will perform a KM_SLEEP kernel
1090 * memory allocation, so the usual rules (e.g. p_lock cannot be held)
1091 * apply. A cyclic may be added even in the presence of CPUs that have
1092 * not been configured with respect to the cyclic subsystem, but only
1093 * configured CPUs will be eligible to run the new cyclic.
1095 * Cyclic handler's context
1097 * Cyclic handlers will be executed in the interrupt context corresponding
1098 * to the specified level (i.e. either high, lock or low level). The
1099 * usual context rules apply.
1101 * A cyclic handler may not grab ANY locks held by the caller of any of
1102 * cyclic_add() or cyclic_remove(); the implementation of these functions
1103 * may require blocking on cyclic handler completion.
1104 * Moreover, cyclic handlers may not make any call back into the cyclic
1108 cyclic_add(cyc_handler_t *hdlr, cyc_time_t *when)
1110 cyc_id_t *idp = cyclic_new_id();
1111 solaris_cpu_t *c = &solaris_cpu[curcpu];
1113 ASSERT(MUTEX_HELD(&cpu_lock));
1114 ASSERT(when->cyt_when >= 0 && when->cyt_interval > 0);
1116 idp->cyi_cpu = c->cpu_cyclic;
1117 idp->cyi_ndx = cyclic_add_here(idp->cyi_cpu, hdlr, when, 0);
1119 return ((uintptr_t)idp);
1123 * cyclic_id_t cyclic_add_omni(cyc_omni_handler_t *)
1127 * cyclic_add_omni() will create an omnipresent cyclic with the specified
1128 * online and offline handlers. Omnipresent cyclics run on all online
1129 * CPUs, including CPUs which have unbound interrupts disabled.
1133 * As its only argument, cyclic_add_omni() takes a cyc_omni_handler, which
1134 * has the following members:
1136 * void (*cyo_online)() <-- Online handler
1137 * void (*cyo_offline)() <-- Offline handler
1138 * void *cyo_arg <-- Argument to be passed to on/offline handlers
1142 * The cyo_online member is a pointer to a function which has the following
1145 * void * <-- Argument (cyo_arg)
1146 * cpu_t * <-- Pointer to CPU about to be onlined
1147 * cyc_handler_t * <-- Pointer to cyc_handler_t; must be filled in
1148 * by omni online handler
1149 * cyc_time_t * <-- Pointer to cyc_time_t; must be filled in by
1150 * omni online handler
1152 * The omni cyclic online handler is always called _before_ the omni
1153 * cyclic begins to fire on the specified CPU. As the above argument
1154 * description implies, the online handler must fill in the two structures
1155 * passed to it: the cyc_handler_t and the cyc_time_t. These are the
1156 * same two structures passed to cyclic_add(), outlined above. This
1157 * allows the omni cyclic to have maximum flexibility; different CPUs may
1160 * (a) have different intervals
1161 * (b) be explicitly in or out of phase with one another
1162 * (c) have different handlers
1163 * (d) have different handler arguments
1164 * (e) fire at different levels
1166 * Of these, (e) seems somewhat dubious, but is nonetheless allowed.
1168 * The omni online handler is called in the same context as cyclic_add(),
1169 * and has the same liberties: omni online handlers may perform KM_SLEEP
1170 * kernel memory allocations, and may grab locks which are also acquired
1171 * by cyclic handlers. However, omni cyclic online handlers may _not_
1172 * call back into the cyclic subsystem, and should be generally careful
1173 * about calling into arbitrary kernel subsystems.
1177 * The cyo_offline member is a pointer to a function which has the following
1180 * void * <-- Argument (cyo_arg)
1181 * cpu_t * <-- Pointer to CPU about to be offlined
1182 * void * <-- CPU's cyclic argument (that is, value
1183 * to which cyh_arg member of the cyc_handler_t
1184 * was set in the omni online handler)
1186 * The omni cyclic offline handler is always called _after_ the omni
1187 * cyclic has ceased firing on the specified CPU. Its purpose is to
1188 * allow cleanup of any resources dynamically allocated in the omni cyclic
1189 * online handler. The context of the offline handler is identical to
1190 * that of the online handler; the same constraints and liberties apply.
1192 * The offline handler is optional; it may be NULL.
1196 * cyclic_add_omni() returns a cyclic_id_t, which is guaranteed to be a
1197 * value other than CYCLIC_NONE. cyclic_add_omni() cannot fail.
1201 * The caller's context is identical to that of cyclic_add(), specified
1205 cyclic_add_omni(cyc_omni_handler_t *omni)
1207 cyc_id_t *idp = cyclic_new_id();
1212 ASSERT(MUTEX_HELD(&cpu_lock));
1213 ASSERT(omni != NULL && omni->cyo_online != NULL);
1215 idp->cyi_omni_hdlr = *omni;
1217 for (i = 0; i < MAXCPU; i++) {
1218 if (pcpu_find(i) == NULL)
1221 c = &solaris_cpu[i];
1223 if ((cpu = c->cpu_cyclic) == NULL)
1226 cyclic_omni_start(idp, cpu);
1230 * We must have found at least one online CPU on which to run
1233 ASSERT(idp->cyi_omni_list != NULL);
1234 ASSERT(idp->cyi_cpu == NULL);
1236 return ((uintptr_t)idp);
1240 * void cyclic_remove(cyclic_id_t)
1244 * cyclic_remove() will remove the specified cyclic from the system.
1246 * Arguments and notes
1248 * The only argument is a cyclic_id returned from either cyclic_add() or
1249 * cyclic_add_omni().
1251 * By the time cyclic_remove() returns, the caller is guaranteed that the
1252 * removed cyclic handler has completed execution (this is the same
1253 * semantic that untimeout() provides). As a result, cyclic_remove() may
1254 * need to block, waiting for the removed cyclic to complete execution.
1255 * This leads to an important constraint on the caller: no lock may be
1256 * held across cyclic_remove() that also may be acquired by a cyclic
1261 * None; cyclic_remove() always succeeds.
1265 * cpu_lock must be held by the caller, and the caller must not be in
1266 * interrupt context. The caller may not hold any locks which are also
1267 * grabbed by any cyclic handler. See "Arguments and notes", above.
1270 cyclic_remove(cyclic_id_t id)
1272 cyc_id_t *idp = (cyc_id_t *)id;
1273 cyc_id_t *prev = idp->cyi_prev, *next = idp->cyi_next;
1274 cyc_cpu_t *cpu = idp->cyi_cpu;
1276 ASSERT(MUTEX_HELD(&cpu_lock));
1279 (void) cyclic_remove_here(cpu, idp->cyi_ndx, NULL, CY_WAIT);
1281 ASSERT(idp->cyi_omni_list != NULL);
1282 while (idp->cyi_omni_list != NULL)
1283 cyclic_omni_stop(idp, idp->cyi_omni_list->cyo_cpu);
1287 ASSERT(cyclic_id_head != idp);
1288 prev->cyi_next = next;
1290 ASSERT(cyclic_id_head == idp);
1291 cyclic_id_head = next;
1295 next->cyi_prev = prev;
1297 kmem_cache_free(cyclic_id_cache, idp);
1301 cyclic_init(cyc_backend_t *be)
1303 ASSERT(MUTEX_HELD(&cpu_lock));
1306 * Copy the passed cyc_backend into the backend template. This must
1307 * be done before the CPU can be configured.
1309 bcopy(be, &cyclic_backend, sizeof (cyc_backend_t));
1311 cyclic_configure(&solaris_cpu[curcpu]);
1315 * It is assumed that cyclic_mp_init() is called some time after cyclic
1316 * init (and therefore, after cpu0 has been initialized). We grab cpu_lock,
1317 * find the already initialized CPU, and initialize every other CPU with the
1321 cyclic_mp_init(void)
1326 mutex_enter(&cpu_lock);
1328 for (i = 0; i <= mp_maxid; i++) {
1329 if (pcpu_find(i) == NULL)
1332 c = &solaris_cpu[i];
1334 if (c->cpu_cyclic == NULL)
1335 cyclic_configure(c);
1338 mutex_exit(&cpu_lock);
1347 for (id = 0; id <= mp_maxid; id++) {
1348 if (pcpu_find(id) == NULL)
1351 c = &solaris_cpu[id];
1353 if (c->cpu_cyclic == NULL)
1356 cyclic_unconfigure(c);
1359 if (cyclic_id_cache != NULL)
1360 kmem_cache_destroy(cyclic_id_cache);
1363 #include "cyclic_machdep.c"
1366 * Cyclic subsystem initialisation.
1369 cyclic_load(void *dummy)
1371 mutex_enter(&cpu_lock);
1373 /* Initialise the machine-dependent backend. */
1374 cyclic_machdep_init();
1376 mutex_exit(&cpu_lock);
1379 SYSINIT(cyclic_register, SI_SUB_CYCLIC, SI_ORDER_SECOND, cyclic_load, NULL);
1384 mutex_enter(&cpu_lock);
1386 /* Uninitialise the machine-dependent backend. */
1387 cyclic_machdep_uninit();
1389 mutex_exit(&cpu_lock);
1392 SYSUNINIT(cyclic_unregister, SI_SUB_CYCLIC, SI_ORDER_SECOND, cyclic_unload, NULL);
1396 cyclic_modevent(module_t mod __unused, int type, void *data __unused)
1418 DEV_MODULE(cyclic, cyclic_modevent, NULL);
1419 MODULE_VERSION(cyclic, 1);
1420 MODULE_DEPEND(cyclic, opensolaris, 1, 1, 1);