2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
35 #include <sys/cpuset.h>
36 #include <sys/rtprio.h>
37 #include <sys/systm.h>
38 #include <sys/interrupt.h>
39 #include <sys/kernel.h>
40 #include <sys/kthread.h>
42 #include <sys/limits.h>
44 #include <sys/malloc.h>
45 #include <sys/mutex.h>
48 #include <sys/random.h>
49 #include <sys/resourcevar.h>
50 #include <sys/sched.h>
52 #include <sys/sysctl.h>
53 #include <sys/syslog.h>
54 #include <sys/unistd.h>
55 #include <sys/vmmeter.h>
56 #include <machine/atomic.h>
57 #include <machine/cpu.h>
58 #include <machine/md_var.h>
59 #include <machine/stdarg.h>
62 #include <ddb/db_sym.h>
66 * Describe an interrupt thread. There is one of these per interrupt event.
69 struct intr_event *it_event;
70 struct thread *it_thread; /* Kernel thread. */
71 int it_flags; /* (j) IT_* flags. */
72 int it_need; /* Needs service. */
75 /* Interrupt thread flags kept in it_flags */
76 #define IT_DEAD 0x000001 /* Thread is waiting to exit. */
77 #define IT_WAIT 0x000002 /* Thread is waiting for completion. */
84 struct intr_event *clk_intr_event;
85 struct intr_event *tty_intr_event;
87 struct proc *intrproc;
89 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
91 static int intr_storm_threshold = 1000;
92 TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold);
93 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW,
94 &intr_storm_threshold, 0,
95 "Number of consecutive interrupts before storm protection is enabled");
96 static TAILQ_HEAD(, intr_event) event_list =
97 TAILQ_HEAD_INITIALIZER(event_list);
98 static struct mtx event_lock;
99 MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF);
101 static void intr_event_update(struct intr_event *ie);
103 static int intr_event_schedule_thread(struct intr_event *ie,
104 struct intr_thread *ithd);
105 static int intr_filter_loop(struct intr_event *ie,
106 struct trapframe *frame, struct intr_thread **ithd);
107 static struct intr_thread *ithread_create(const char *name,
108 struct intr_handler *ih);
110 static int intr_event_schedule_thread(struct intr_event *ie);
111 static struct intr_thread *ithread_create(const char *name);
113 static void ithread_destroy(struct intr_thread *ithread);
114 static void ithread_execute_handlers(struct proc *p,
115 struct intr_event *ie);
117 static void priv_ithread_execute_handler(struct proc *p,
118 struct intr_handler *ih);
120 static void ithread_loop(void *);
121 static void ithread_update(struct intr_thread *ithd);
122 static void start_softintr(void *);
124 /* Map an interrupt type to an ithread priority. */
126 intr_priority(enum intr_type flags)
130 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
131 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
152 pri = PI_DULL; /* don't care */
155 /* We didn't specify an interrupt level. */
156 panic("intr_priority: no interrupt type in flags");
163 * Update an ithread based on the associated intr_event.
166 ithread_update(struct intr_thread *ithd)
168 struct intr_event *ie;
173 td = ithd->it_thread;
175 /* Determine the overall priority of this event. */
176 if (TAILQ_EMPTY(&ie->ie_handlers))
179 pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri;
181 /* Update name and priority. */
182 strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name));
184 sched_clear_tdname(td);
192 * Regenerate the full name of an interrupt event and update its priority.
195 intr_event_update(struct intr_event *ie)
197 struct intr_handler *ih;
201 /* Start off with no entropy and just the name of the event. */
202 mtx_assert(&ie->ie_lock, MA_OWNED);
203 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
204 ie->ie_flags &= ~IE_ENTROPY;
208 /* Run through all the handlers updating values. */
209 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
210 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
211 sizeof(ie->ie_fullname)) {
212 strcat(ie->ie_fullname, " ");
213 strcat(ie->ie_fullname, ih->ih_name);
217 if (ih->ih_flags & IH_ENTROPY)
218 ie->ie_flags |= IE_ENTROPY;
222 * If the handler names were too long, add +'s to indicate missing
223 * names. If we run out of room and still have +'s to add, change
224 * the last character from a + to a *.
226 last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2];
227 while (missed-- > 0) {
228 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) {
235 strcat(ie->ie_fullname, " +");
238 strcat(ie->ie_fullname, "+");
242 * If this event has an ithread, update it's priority and
245 if (ie->ie_thread != NULL)
246 ithread_update(ie->ie_thread);
247 CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname);
251 intr_event_create(struct intr_event **event, void *source, int flags, int irq,
252 void (*pre_ithread)(void *), void (*post_ithread)(void *),
253 void (*post_filter)(void *), int (*assign_cpu)(void *, u_char),
254 const char *fmt, ...)
256 struct intr_event *ie;
259 /* The only valid flag during creation is IE_SOFT. */
260 if ((flags & ~IE_SOFT) != 0)
262 ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
263 ie->ie_source = source;
264 ie->ie_pre_ithread = pre_ithread;
265 ie->ie_post_ithread = post_ithread;
266 ie->ie_post_filter = post_filter;
267 ie->ie_assign_cpu = assign_cpu;
268 ie->ie_flags = flags;
271 TAILQ_INIT(&ie->ie_handlers);
272 mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
275 vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
277 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
278 mtx_lock(&event_lock);
279 TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
280 mtx_unlock(&event_lock);
283 CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
288 * Bind an interrupt event to the specified CPU. Note that not all
289 * platforms support binding an interrupt to a CPU. For those
290 * platforms this request will fail. For supported platforms, any
291 * associated ithreads as well as the primary interrupt context will
292 * be bound to the specificed CPU. Using a cpu id of NOCPU unbinds
293 * the interrupt event.
296 intr_event_bind(struct intr_event *ie, u_char cpu)
302 /* Need a CPU to bind to. */
303 if (cpu != NOCPU && CPU_ABSENT(cpu))
306 if (ie->ie_assign_cpu == NULL)
309 error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR);
314 * If we have any ithreads try to set their mask first to verify
317 mtx_lock(&ie->ie_lock);
318 if (ie->ie_thread != NULL) {
321 CPU_COPY(cpuset_root, &mask);
324 id = ie->ie_thread->it_thread->td_tid;
325 mtx_unlock(&ie->ie_lock);
326 error = cpuset_setthread(id, &mask);
330 mtx_unlock(&ie->ie_lock);
331 error = ie->ie_assign_cpu(ie->ie_source, cpu);
333 mtx_lock(&ie->ie_lock);
334 if (ie->ie_thread != NULL) {
336 if (ie->ie_cpu == NOCPU)
337 CPU_COPY(cpuset_root, &mask);
340 id = ie->ie_thread->it_thread->td_tid;
341 mtx_unlock(&ie->ie_lock);
342 (void)cpuset_setthread(id, &mask);
344 mtx_unlock(&ie->ie_lock);
348 mtx_lock(&ie->ie_lock);
350 mtx_unlock(&ie->ie_lock);
355 static struct intr_event *
358 struct intr_event *ie;
360 mtx_lock(&event_lock);
361 TAILQ_FOREACH(ie, &event_list, ie_list)
362 if (ie->ie_irq == irq &&
363 (ie->ie_flags & IE_SOFT) == 0 &&
364 TAILQ_FIRST(&ie->ie_handlers) != NULL)
366 mtx_unlock(&event_lock);
371 intr_setaffinity(int irq, void *m)
373 struct intr_event *ie;
381 * If we're setting all cpus we can unbind. Otherwise make sure
382 * only one cpu is in the set.
384 if (CPU_CMP(cpuset_root, mask)) {
385 for (n = 0; n < CPU_SETSIZE; n++) {
386 if (!CPU_ISSET(n, mask))
393 ie = intr_lookup(irq);
396 return (intr_event_bind(ie, cpu));
400 intr_getaffinity(int irq, void *m)
402 struct intr_event *ie;
406 ie = intr_lookup(irq);
410 mtx_lock(&ie->ie_lock);
411 if (ie->ie_cpu == NOCPU)
412 CPU_COPY(cpuset_root, mask);
414 CPU_SET(ie->ie_cpu, mask);
415 mtx_unlock(&ie->ie_lock);
420 intr_event_destroy(struct intr_event *ie)
423 mtx_lock(&event_lock);
424 mtx_lock(&ie->ie_lock);
425 if (!TAILQ_EMPTY(&ie->ie_handlers)) {
426 mtx_unlock(&ie->ie_lock);
427 mtx_unlock(&event_lock);
430 TAILQ_REMOVE(&event_list, ie, ie_list);
432 if (ie->ie_thread != NULL) {
433 ithread_destroy(ie->ie_thread);
434 ie->ie_thread = NULL;
437 mtx_unlock(&ie->ie_lock);
438 mtx_unlock(&event_lock);
439 mtx_destroy(&ie->ie_lock);
445 static struct intr_thread *
446 ithread_create(const char *name)
448 struct intr_thread *ithd;
452 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
454 error = kproc_kthread_add(ithread_loop, ithd, &intrproc,
455 &td, RFSTOPPED | RFHIGHPID,
456 0, "intr", "%s", name);
458 panic("kproc_create() failed with %d", error);
460 sched_class(td, PRI_ITHD);
463 td->td_pflags |= TDP_ITHREAD;
464 ithd->it_thread = td;
465 CTR2(KTR_INTR, "%s: created %s", __func__, name);
469 static struct intr_thread *
470 ithread_create(const char *name, struct intr_handler *ih)
472 struct intr_thread *ithd;
476 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
478 error = kproc_kthread_add(ithread_loop, ih, &intrproc,
479 &td, RFSTOPPED | RFHIGHPID,
480 0, "intr", "%s", name);
482 panic("kproc_create() failed with %d", error);
484 sched_class(td, PRI_ITHD);
487 td->td_pflags |= TDP_ITHREAD;
488 ithd->it_thread = td;
489 CTR2(KTR_INTR, "%s: created %s", __func__, name);
495 ithread_destroy(struct intr_thread *ithread)
499 CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name);
500 td = ithread->it_thread;
502 ithread->it_flags |= IT_DEAD;
503 if (TD_AWAITING_INTR(td)) {
505 sched_add(td, SRQ_INTR);
512 intr_event_add_handler(struct intr_event *ie, const char *name,
513 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
514 enum intr_type flags, void **cookiep)
516 struct intr_handler *ih, *temp_ih;
517 struct intr_thread *it;
519 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
522 /* Allocate and populate an interrupt handler structure. */
523 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
524 ih->ih_filter = filter;
525 ih->ih_handler = handler;
526 ih->ih_argument = arg;
527 strlcpy(ih->ih_name, name, sizeof(ih->ih_name));
530 if (flags & INTR_EXCL)
531 ih->ih_flags = IH_EXCLUSIVE;
532 if (flags & INTR_MPSAFE)
533 ih->ih_flags |= IH_MPSAFE;
534 if (flags & INTR_ENTROPY)
535 ih->ih_flags |= IH_ENTROPY;
537 /* We can only have one exclusive handler in a event. */
538 mtx_lock(&ie->ie_lock);
539 if (!TAILQ_EMPTY(&ie->ie_handlers)) {
540 if ((flags & INTR_EXCL) ||
541 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
542 mtx_unlock(&ie->ie_lock);
548 /* Add the new handler to the event in priority order. */
549 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
550 if (temp_ih->ih_pri > ih->ih_pri)
554 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
556 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
557 intr_event_update(ie);
559 /* Create a thread if we need one. */
560 while (ie->ie_thread == NULL && handler != NULL) {
561 if (ie->ie_flags & IE_ADDING_THREAD)
562 msleep(ie, &ie->ie_lock, 0, "ithread", 0);
564 ie->ie_flags |= IE_ADDING_THREAD;
565 mtx_unlock(&ie->ie_lock);
566 it = ithread_create("intr: newborn");
567 mtx_lock(&ie->ie_lock);
568 ie->ie_flags &= ~IE_ADDING_THREAD;
575 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
577 mtx_unlock(&ie->ie_lock);
585 intr_event_add_handler(struct intr_event *ie, const char *name,
586 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
587 enum intr_type flags, void **cookiep)
589 struct intr_handler *ih, *temp_ih;
590 struct intr_thread *it;
592 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
595 /* Allocate and populate an interrupt handler structure. */
596 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
597 ih->ih_filter = filter;
598 ih->ih_handler = handler;
599 ih->ih_argument = arg;
600 strlcpy(ih->ih_name, name, sizeof(ih->ih_name));
603 if (flags & INTR_EXCL)
604 ih->ih_flags = IH_EXCLUSIVE;
605 if (flags & INTR_MPSAFE)
606 ih->ih_flags |= IH_MPSAFE;
607 if (flags & INTR_ENTROPY)
608 ih->ih_flags |= IH_ENTROPY;
610 /* We can only have one exclusive handler in a event. */
611 mtx_lock(&ie->ie_lock);
612 if (!TAILQ_EMPTY(&ie->ie_handlers)) {
613 if ((flags & INTR_EXCL) ||
614 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
615 mtx_unlock(&ie->ie_lock);
621 /* Add the new handler to the event in priority order. */
622 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
623 if (temp_ih->ih_pri > ih->ih_pri)
627 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
629 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
630 intr_event_update(ie);
632 /* For filtered handlers, create a private ithread to run on. */
633 if (filter != NULL && handler != NULL) {
634 mtx_unlock(&ie->ie_lock);
635 it = ithread_create("intr: newborn", ih);
636 mtx_lock(&ie->ie_lock);
639 ithread_update(it); // XXX - do we really need this?!?!?
640 } else { /* Create the global per-event thread if we need one. */
641 while (ie->ie_thread == NULL && handler != NULL) {
642 if (ie->ie_flags & IE_ADDING_THREAD)
643 msleep(ie, &ie->ie_lock, 0, "ithread", 0);
645 ie->ie_flags |= IE_ADDING_THREAD;
646 mtx_unlock(&ie->ie_lock);
647 it = ithread_create("intr: newborn", ih);
648 mtx_lock(&ie->ie_lock);
649 ie->ie_flags &= ~IE_ADDING_THREAD;
657 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
659 mtx_unlock(&ie->ie_lock);
668 * Append a description preceded by a ':' to the name of the specified
672 intr_event_describe_handler(struct intr_event *ie, void *cookie,
675 struct intr_handler *ih;
679 mtx_lock(&ie->ie_lock);
681 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
686 mtx_unlock(&ie->ie_lock);
687 panic("handler %p not found in interrupt event %p", cookie, ie);
693 * Look for an existing description by checking for an
694 * existing ":". This assumes device names do not include
695 * colons. If one is found, prepare to insert the new
696 * description at that point. If one is not found, find the
697 * end of the name to use as the insertion point.
699 start = index(ih->ih_name, ':');
701 start = index(ih->ih_name, 0);
704 * See if there is enough remaining room in the string for the
705 * description + ":". The "- 1" leaves room for the trailing
706 * '\0'. The "+ 1" accounts for the colon.
708 space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1;
709 if (strlen(descr) + 1 > space) {
710 mtx_unlock(&ie->ie_lock);
714 /* Append a colon followed by the description. */
716 strcpy(start + 1, descr);
717 intr_event_update(ie);
718 mtx_unlock(&ie->ie_lock);
723 * Return the ie_source field from the intr_event an intr_handler is
727 intr_handler_source(void *cookie)
729 struct intr_handler *ih;
730 struct intr_event *ie;
732 ih = (struct intr_handler *)cookie;
737 ("interrupt handler \"%s\" has a NULL interrupt event",
739 return (ie->ie_source);
743 * Sleep until an ithread finishes executing an interrupt handler.
745 * XXX Doesn't currently handle interrupt filters or fast interrupt
746 * handlers. This is intended for compatibility with linux drivers
747 * only. Do not use in BSD code.
752 struct intr_event *ie;
753 struct intr_thread *ithd;
756 ie = intr_lookup(irq);
759 if (ie->ie_thread == NULL)
761 ithd = ie->ie_thread;
762 td = ithd->it_thread;
764 * We set the flag and wait for it to be cleared to avoid
765 * long delays with potentially busy interrupt handlers
766 * were we to only sample TD_AWAITING_INTR() every tick.
769 if (!TD_AWAITING_INTR(td)) {
770 ithd->it_flags |= IT_WAIT;
771 while (ithd->it_flags & IT_WAIT) {
784 intr_event_remove_handler(void *cookie)
786 struct intr_handler *handler = (struct intr_handler *)cookie;
787 struct intr_event *ie;
789 struct intr_handler *ih;
797 ie = handler->ih_event;
799 ("interrupt handler \"%s\" has a NULL interrupt event",
801 mtx_lock(&ie->ie_lock);
802 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
805 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
808 mtx_unlock(&ie->ie_lock);
809 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
810 ih->ih_name, ie->ie_name);
814 * If there is no ithread, then just remove the handler and return.
815 * XXX: Note that an INTR_FAST handler might be running on another
818 if (ie->ie_thread == NULL) {
819 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
820 mtx_unlock(&ie->ie_lock);
821 free(handler, M_ITHREAD);
826 * If the interrupt thread is already running, then just mark this
827 * handler as being dead and let the ithread do the actual removal.
829 * During a cold boot while cold is set, msleep() does not sleep,
830 * so we have to remove the handler here rather than letting the
833 thread_lock(ie->ie_thread->it_thread);
834 if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) {
835 handler->ih_flags |= IH_DEAD;
838 * Ensure that the thread will process the handler list
839 * again and remove this handler if it has already passed
842 ie->ie_thread->it_need = 1;
844 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
845 thread_unlock(ie->ie_thread->it_thread);
846 while (handler->ih_flags & IH_DEAD)
847 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
848 intr_event_update(ie);
851 * XXX: This could be bad in the case of ppbus(8). Also, I think
852 * this could lead to races of stale data when servicing an
856 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
857 if (!(ih->ih_flags & IH_FAST)) {
863 ithread_destroy(ie->ie_thread);
864 ie->ie_thread = NULL;
867 mtx_unlock(&ie->ie_lock);
868 free(handler, M_ITHREAD);
873 intr_event_schedule_thread(struct intr_event *ie)
875 struct intr_entropy entropy;
876 struct intr_thread *it;
882 * If no ithread or no handlers, then we have a stray interrupt.
884 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) ||
885 ie->ie_thread == NULL)
894 * If any of the handlers for this ithread claim to be good
895 * sources of entropy, then gather some.
897 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
898 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
899 p->p_pid, td->td_name);
900 entropy.event = (uintptr_t)ie;
902 random_harvest(&entropy, sizeof(entropy), 2, 0,
906 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
909 * Set it_need to tell the thread to keep running if it is already
910 * running. Then, lock the thread and see if we actually need to
911 * put it on the runqueue.
915 if (TD_AWAITING_INTR(td)) {
916 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
919 sched_add(td, SRQ_INTR);
921 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
922 __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
930 intr_event_remove_handler(void *cookie)
932 struct intr_handler *handler = (struct intr_handler *)cookie;
933 struct intr_event *ie;
934 struct intr_thread *it;
936 struct intr_handler *ih;
944 ie = handler->ih_event;
946 ("interrupt handler \"%s\" has a NULL interrupt event",
948 mtx_lock(&ie->ie_lock);
949 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
952 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
955 mtx_unlock(&ie->ie_lock);
956 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
957 ih->ih_name, ie->ie_name);
961 * If there are no ithreads (per event and per handler), then
962 * just remove the handler and return.
963 * XXX: Note that an INTR_FAST handler might be running on another CPU!
965 if (ie->ie_thread == NULL && handler->ih_thread == NULL) {
966 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
967 mtx_unlock(&ie->ie_lock);
968 free(handler, M_ITHREAD);
972 /* Private or global ithread? */
973 it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread;
975 * If the interrupt thread is already running, then just mark this
976 * handler as being dead and let the ithread do the actual removal.
978 * During a cold boot while cold is set, msleep() does not sleep,
979 * so we have to remove the handler here rather than letting the
982 thread_lock(it->it_thread);
983 if (!TD_AWAITING_INTR(it->it_thread) && !cold) {
984 handler->ih_flags |= IH_DEAD;
987 * Ensure that the thread will process the handler list
988 * again and remove this handler if it has already passed
993 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
994 thread_unlock(it->it_thread);
995 while (handler->ih_flags & IH_DEAD)
996 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
998 * At this point, the handler has been disconnected from the event,
999 * so we can kill the private ithread if any.
1001 if (handler->ih_thread) {
1002 ithread_destroy(handler->ih_thread);
1003 handler->ih_thread = NULL;
1005 intr_event_update(ie);
1008 * XXX: This could be bad in the case of ppbus(8). Also, I think
1009 * this could lead to races of stale data when servicing an
1013 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
1014 if (handler != NULL) {
1020 ithread_destroy(ie->ie_thread);
1021 ie->ie_thread = NULL;
1024 mtx_unlock(&ie->ie_lock);
1025 free(handler, M_ITHREAD);
1030 intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it)
1032 struct intr_entropy entropy;
1038 * If no ithread or no handlers, then we have a stray interrupt.
1040 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL)
1048 * If any of the handlers for this ithread claim to be good
1049 * sources of entropy, then gather some.
1051 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
1052 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
1053 p->p_pid, td->td_name);
1054 entropy.event = (uintptr_t)ie;
1056 random_harvest(&entropy, sizeof(entropy), 2, 0,
1060 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
1063 * Set it_need to tell the thread to keep running if it is already
1064 * running. Then, lock the thread and see if we actually need to
1065 * put it on the runqueue.
1069 if (TD_AWAITING_INTR(td)) {
1070 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
1073 sched_add(td, SRQ_INTR);
1075 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
1076 __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
1085 * Allow interrupt event binding for software interrupt handlers -- a no-op,
1086 * since interrupts are generated in software rather than being directed by
1090 swi_assign_cpu(void *arg, u_char cpu)
1097 * Add a software interrupt handler to a specified event. If a given event
1098 * is not specified, then a new event is created.
1101 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
1102 void *arg, int pri, enum intr_type flags, void **cookiep)
1105 struct intr_event *ie;
1108 if (flags & INTR_ENTROPY)
1111 ie = (eventp != NULL) ? *eventp : NULL;
1114 if (!(ie->ie_flags & IE_SOFT))
1117 error = intr_event_create(&ie, NULL, IE_SOFT, 0,
1118 NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri);
1124 error = intr_event_add_handler(ie, name, NULL, handler, arg,
1125 PI_SWI(pri), flags, cookiep);
1128 if (pri == SWI_CLOCK) {
1129 td = ie->ie_thread->it_thread;
1131 td->td_flags |= TDF_NOLOAD;
1138 * Schedule a software interrupt thread.
1141 swi_sched(void *cookie, int flags)
1143 struct intr_handler *ih = (struct intr_handler *)cookie;
1144 struct intr_event *ie = ih->ih_event;
1147 CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
1151 * Set ih_need for this handler so that if the ithread is already
1152 * running it will execute this handler on the next pass. Otherwise,
1153 * it will execute it the next time it runs.
1155 atomic_store_rel_int(&ih->ih_need, 1);
1157 if (!(flags & SWI_DELAY)) {
1158 PCPU_INC(cnt.v_soft);
1160 error = intr_event_schedule_thread(ie, ie->ie_thread);
1162 error = intr_event_schedule_thread(ie);
1164 KASSERT(error == 0, ("stray software interrupt"));
1169 * Remove a software interrupt handler. Currently this code does not
1170 * remove the associated interrupt event if it becomes empty. Calling code
1171 * may do so manually via intr_event_destroy(), but that's not really
1172 * an optimal interface.
1175 swi_remove(void *cookie)
1178 return (intr_event_remove_handler(cookie));
1183 priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih)
1185 struct intr_event *ie;
1189 * If this handler is marked for death, remove it from
1190 * the list of handlers and wake up the sleeper.
1192 if (ih->ih_flags & IH_DEAD) {
1193 mtx_lock(&ie->ie_lock);
1194 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
1195 ih->ih_flags &= ~IH_DEAD;
1197 mtx_unlock(&ie->ie_lock);
1201 /* Execute this handler. */
1202 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
1203 __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument,
1204 ih->ih_name, ih->ih_flags);
1206 if (!(ih->ih_flags & IH_MPSAFE))
1208 ih->ih_handler(ih->ih_argument);
1209 if (!(ih->ih_flags & IH_MPSAFE))
1215 * This is a public function for use by drivers that mux interrupt
1216 * handlers for child devices from their interrupt handler.
1219 intr_event_execute_handlers(struct proc *p, struct intr_event *ie)
1221 struct intr_handler *ih, *ihn;
1223 TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
1225 * If this handler is marked for death, remove it from
1226 * the list of handlers and wake up the sleeper.
1228 if (ih->ih_flags & IH_DEAD) {
1229 mtx_lock(&ie->ie_lock);
1230 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
1231 ih->ih_flags &= ~IH_DEAD;
1233 mtx_unlock(&ie->ie_lock);
1237 /* Skip filter only handlers */
1238 if (ih->ih_handler == NULL)
1242 * For software interrupt threads, we only execute
1243 * handlers that have their need flag set. Hardware
1244 * interrupt threads always invoke all of their handlers.
1246 if (ie->ie_flags & IE_SOFT) {
1250 atomic_store_rel_int(&ih->ih_need, 0);
1253 /* Execute this handler. */
1254 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
1255 __func__, p->p_pid, (void *)ih->ih_handler,
1256 ih->ih_argument, ih->ih_name, ih->ih_flags);
1258 if (!(ih->ih_flags & IH_MPSAFE))
1260 ih->ih_handler(ih->ih_argument);
1261 if (!(ih->ih_flags & IH_MPSAFE))
1267 ithread_execute_handlers(struct proc *p, struct intr_event *ie)
1270 /* Interrupt handlers should not sleep. */
1271 if (!(ie->ie_flags & IE_SOFT))
1272 THREAD_NO_SLEEPING();
1273 intr_event_execute_handlers(p, ie);
1274 if (!(ie->ie_flags & IE_SOFT))
1275 THREAD_SLEEPING_OK();
1278 * Interrupt storm handling:
1280 * If this interrupt source is currently storming, then throttle
1281 * it to only fire the handler once per clock tick.
1283 * If this interrupt source is not currently storming, but the
1284 * number of back to back interrupts exceeds the storm threshold,
1285 * then enter storming mode.
1287 if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold &&
1288 !(ie->ie_flags & IE_SOFT)) {
1289 /* Report the message only once every second. */
1290 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) {
1292 "interrupt storm detected on \"%s\"; throttling interrupt source\n",
1300 * Now that all the handlers have had a chance to run, reenable
1301 * the interrupt source.
1303 if (ie->ie_post_ithread != NULL)
1304 ie->ie_post_ithread(ie->ie_source);
1309 * This is the main code for interrupt threads.
1312 ithread_loop(void *arg)
1314 struct intr_thread *ithd;
1315 struct intr_event *ie;
1322 ithd = (struct intr_thread *)arg;
1323 KASSERT(ithd->it_thread == td,
1324 ("%s: ithread and proc linkage out of sync", __func__));
1325 ie = ithd->it_event;
1330 * As long as we have interrupts outstanding, go through the
1331 * list of handlers, giving each one a go at it.
1335 * If we are an orphaned thread, then just die.
1337 if (ithd->it_flags & IT_DEAD) {
1338 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
1339 p->p_pid, td->td_name);
1340 free(ithd, M_ITHREAD);
1345 * Service interrupts. If another interrupt arrives while
1346 * we are running, it will set it_need to note that we
1347 * should make another pass.
1349 while (ithd->it_need) {
1351 * This might need a full read and write barrier
1352 * to make sure that this write posts before any
1353 * of the memory or device accesses in the
1356 atomic_store_rel_int(&ithd->it_need, 0);
1357 ithread_execute_handlers(p, ie);
1359 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
1360 mtx_assert(&Giant, MA_NOTOWNED);
1363 * Processed all our interrupts. Now get the sched
1364 * lock. This may take a while and it_need may get
1365 * set again, so we have to check it again.
1368 if (!ithd->it_need && !(ithd->it_flags & (IT_DEAD | IT_WAIT))) {
1371 mi_switch(SW_VOL | SWT_IWAIT, NULL);
1373 if (ithd->it_flags & IT_WAIT) {
1375 ithd->it_flags &= ~IT_WAIT;
1386 * Main interrupt handling body.
1389 * o ie: the event connected to this interrupt.
1390 * o frame: some archs (i.e. i386) pass a frame to some.
1391 * handlers as their main argument.
1393 * o 0: everything ok.
1394 * o EINVAL: stray interrupt.
1397 intr_event_handle(struct intr_event *ie, struct trapframe *frame)
1399 struct intr_handler *ih;
1400 struct trapframe *oldframe;
1402 int error, ret, thread;
1406 /* An interrupt with no event or handlers is a stray interrupt. */
1407 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
1411 * Execute fast interrupt handlers directly.
1412 * To support clock handlers, if a handler registers
1413 * with a NULL argument, then we pass it a pointer to
1414 * a trapframe as its argument.
1416 td->td_intr_nesting_level++;
1420 oldframe = td->td_intr_frame;
1421 td->td_intr_frame = frame;
1422 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
1423 if (ih->ih_filter == NULL) {
1427 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
1428 ih->ih_filter, ih->ih_argument == NULL ? frame :
1429 ih->ih_argument, ih->ih_name);
1430 if (ih->ih_argument == NULL)
1431 ret = ih->ih_filter(frame);
1433 ret = ih->ih_filter(ih->ih_argument);
1434 KASSERT(ret == FILTER_STRAY ||
1435 ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
1436 (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
1437 ("%s: incorrect return value %#x from %s", __func__, ret,
1441 * Wrapper handler special handling:
1443 * in some particular cases (like pccard and pccbb),
1444 * the _real_ device handler is wrapped in a couple of
1445 * functions - a filter wrapper and an ithread wrapper.
1446 * In this case (and just in this case), the filter wrapper
1447 * could ask the system to schedule the ithread and mask
1448 * the interrupt source if the wrapped handler is composed
1449 * of just an ithread handler.
1451 * TODO: write a generic wrapper to avoid people rolling
1455 if (ret == FILTER_SCHEDULE_THREAD)
1459 td->td_intr_frame = oldframe;
1462 if (ie->ie_pre_ithread != NULL)
1463 ie->ie_pre_ithread(ie->ie_source);
1465 if (ie->ie_post_filter != NULL)
1466 ie->ie_post_filter(ie->ie_source);
1469 /* Schedule the ithread if needed. */
1471 error = intr_event_schedule_thread(ie);
1473 KASSERT(error == 0, ("bad stray interrupt"));
1476 log(LOG_WARNING, "bad stray interrupt");
1480 td->td_intr_nesting_level--;
1485 * This is the main code for interrupt threads.
1488 ithread_loop(void *arg)
1490 struct intr_thread *ithd;
1491 struct intr_handler *ih;
1492 struct intr_event *ie;
1500 ih = (struct intr_handler *)arg;
1501 priv = (ih->ih_thread != NULL) ? 1 : 0;
1502 ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread;
1503 KASSERT(ithd->it_thread == td,
1504 ("%s: ithread and proc linkage out of sync", __func__));
1505 ie = ithd->it_event;
1510 * As long as we have interrupts outstanding, go through the
1511 * list of handlers, giving each one a go at it.
1515 * If we are an orphaned thread, then just die.
1517 if (ithd->it_flags & IT_DEAD) {
1518 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
1519 p->p_pid, td->td_name);
1520 free(ithd, M_ITHREAD);
1525 * Service interrupts. If another interrupt arrives while
1526 * we are running, it will set it_need to note that we
1527 * should make another pass.
1529 while (ithd->it_need) {
1531 * This might need a full read and write barrier
1532 * to make sure that this write posts before any
1533 * of the memory or device accesses in the
1536 atomic_store_rel_int(&ithd->it_need, 0);
1538 priv_ithread_execute_handler(p, ih);
1540 ithread_execute_handlers(p, ie);
1542 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
1543 mtx_assert(&Giant, MA_NOTOWNED);
1546 * Processed all our interrupts. Now get the sched
1547 * lock. This may take a while and it_need may get
1548 * set again, so we have to check it again.
1551 if (!ithd->it_need && !(ithd->it_flags & (IT_DEAD | IT_WAIT))) {
1554 mi_switch(SW_VOL | SWT_IWAIT, NULL);
1556 if (ithd->it_flags & IT_WAIT) {
1558 ithd->it_flags &= ~IT_WAIT;
1569 * Main loop for interrupt filter.
1571 * Some architectures (i386, amd64 and arm) require the optional frame
1572 * parameter, and use it as the main argument for fast handler execution
1573 * when ih_argument == NULL.
1576 * o FILTER_STRAY: No filter recognized the event, and no
1577 * filter-less handler is registered on this
1579 * o FILTER_HANDLED: A filter claimed the event and served it.
1580 * o FILTER_SCHEDULE_THREAD: No filter claimed the event, but there's at
1581 * least one filter-less handler on this line.
1582 * o FILTER_HANDLED |
1583 * FILTER_SCHEDULE_THREAD: A filter claimed the event, and asked for
1584 * scheduling the per-handler ithread.
1586 * In case an ithread has to be scheduled, in *ithd there will be a
1587 * pointer to a struct intr_thread containing the thread to be
1592 intr_filter_loop(struct intr_event *ie, struct trapframe *frame,
1593 struct intr_thread **ithd)
1595 struct intr_handler *ih;
1597 int ret, thread_only;
1601 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
1603 * Execute fast interrupt handlers directly.
1604 * To support clock handlers, if a handler registers
1605 * with a NULL argument, then we pass it a pointer to
1606 * a trapframe as its argument.
1608 arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument);
1610 CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__,
1611 ih->ih_filter, ih->ih_handler, arg, ih->ih_name);
1613 if (ih->ih_filter != NULL)
1614 ret = ih->ih_filter(arg);
1619 KASSERT(ret == FILTER_STRAY ||
1620 ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
1621 (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
1622 ("%s: incorrect return value %#x from %s", __func__, ret,
1624 if (ret & FILTER_STRAY)
1627 *ithd = ih->ih_thread;
1633 * No filters handled the interrupt and we have at least
1634 * one handler without a filter. In this case, we schedule
1635 * all of the filter-less handlers to run in the ithread.
1638 *ithd = ie->ie_thread;
1639 return (FILTER_SCHEDULE_THREAD);
1641 return (FILTER_STRAY);
1645 * Main interrupt handling body.
1648 * o ie: the event connected to this interrupt.
1649 * o frame: some archs (i.e. i386) pass a frame to some.
1650 * handlers as their main argument.
1652 * o 0: everything ok.
1653 * o EINVAL: stray interrupt.
1656 intr_event_handle(struct intr_event *ie, struct trapframe *frame)
1658 struct intr_thread *ithd;
1659 struct trapframe *oldframe;
1666 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
1669 td->td_intr_nesting_level++;
1672 oldframe = td->td_intr_frame;
1673 td->td_intr_frame = frame;
1674 thread = intr_filter_loop(ie, frame, &ithd);
1675 if (thread & FILTER_HANDLED) {
1676 if (ie->ie_post_filter != NULL)
1677 ie->ie_post_filter(ie->ie_source);
1679 if (ie->ie_pre_ithread != NULL)
1680 ie->ie_pre_ithread(ie->ie_source);
1682 td->td_intr_frame = oldframe;
1685 /* Interrupt storm logic */
1686 if (thread & FILTER_STRAY) {
1688 if (ie->ie_count < intr_storm_threshold)
1689 printf("Interrupt stray detection not present\n");
1692 /* Schedule an ithread if needed. */
1693 if (thread & FILTER_SCHEDULE_THREAD) {
1694 if (intr_event_schedule_thread(ie, ithd) != 0)
1695 panic("%s: impossible stray interrupt", __func__);
1697 td->td_intr_nesting_level--;
1704 * Dump details about an interrupt handler
1707 db_dump_intrhand(struct intr_handler *ih)
1711 db_printf("\t%-10s ", ih->ih_name);
1712 switch (ih->ih_pri) {
1732 if (ih->ih_pri >= PI_SOFT)
1735 db_printf("%4u", ih->ih_pri);
1739 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
1740 db_printf("(%p)", ih->ih_argument);
1742 (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
1746 if (ih->ih_flags & IH_EXCLUSIVE) {
1752 if (ih->ih_flags & IH_ENTROPY) {
1755 db_printf("ENTROPY");
1758 if (ih->ih_flags & IH_DEAD) {
1764 if (ih->ih_flags & IH_MPSAFE) {
1767 db_printf("MPSAFE");
1781 * Dump details about a event.
1784 db_dump_intr_event(struct intr_event *ie, int handlers)
1786 struct intr_handler *ih;
1787 struct intr_thread *it;
1790 db_printf("%s ", ie->ie_fullname);
1793 db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
1795 db_printf("(no thread)");
1796 if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 ||
1797 (it != NULL && it->it_need)) {
1800 if (ie->ie_flags & IE_SOFT) {
1804 if (ie->ie_flags & IE_ENTROPY) {
1807 db_printf("ENTROPY");
1810 if (ie->ie_flags & IE_ADDING_THREAD) {
1813 db_printf("ADDING_THREAD");
1816 if (it != NULL && it->it_need) {
1826 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
1827 db_dump_intrhand(ih);
1831 * Dump data about interrupt handlers
1833 DB_SHOW_COMMAND(intr, db_show_intr)
1835 struct intr_event *ie;
1838 verbose = index(modif, 'v') != NULL;
1839 all = index(modif, 'a') != NULL;
1840 TAILQ_FOREACH(ie, &event_list, ie_list) {
1841 if (!all && TAILQ_EMPTY(&ie->ie_handlers))
1843 db_dump_intr_event(ie, verbose);
1851 * Start standard software interrupt threads
1854 start_softintr(void *dummy)
1857 if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih))
1858 panic("died while creating vm swi ithread");
1860 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr,
1864 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
1865 * The data for this machine dependent, and the declarations are in machine
1866 * dependent code. The layout of intrnames and intrcnt however is machine
1869 * We do not know the length of intrcnt and intrnames at compile time, so
1870 * calculate things at run time.
1873 sysctl_intrnames(SYSCTL_HANDLER_ARGS)
1875 return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req));
1878 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
1879 NULL, 0, sysctl_intrnames, "", "Interrupt Names");
1882 sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
1884 return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req));
1887 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
1888 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
1892 * DDB command to dump the interrupt statistics.
1894 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt)
1902 for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit;
1907 db_printf("%s\t%lu\n", cp, *i);
1908 cp += strlen(cp) + 1;