2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
35 #include <sys/rtprio.h>
36 #include <sys/systm.h>
37 #include <sys/interrupt.h>
38 #include <sys/kernel.h>
39 #include <sys/kthread.h>
41 #include <sys/limits.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
46 #include <sys/random.h>
47 #include <sys/resourcevar.h>
48 #include <sys/sched.h>
50 #include <sys/sysctl.h>
51 #include <sys/unistd.h>
52 #include <sys/vmmeter.h>
53 #include <machine/atomic.h>
54 #include <machine/cpu.h>
55 #include <machine/md_var.h>
56 #include <machine/stdarg.h>
59 #include <ddb/db_sym.h>
63 * Describe an interrupt thread. There is one of these per interrupt event.
66 struct intr_event *it_event;
67 struct thread *it_thread; /* Kernel thread. */
68 int it_flags; /* (j) IT_* flags. */
69 int it_need; /* Needs service. */
72 /* Interrupt thread flags kept in it_flags */
73 #define IT_DEAD 0x000001 /* Thread is waiting to exit. */
80 struct intr_event *clk_intr_event;
81 struct intr_event *tty_intr_event;
84 struct proc *intrproc;
86 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
88 static int intr_storm_threshold = 1000;
89 TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold);
90 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW,
91 &intr_storm_threshold, 0,
92 "Number of consecutive interrupts before storm protection is enabled");
93 static TAILQ_HEAD(, intr_event) event_list =
94 TAILQ_HEAD_INITIALIZER(event_list);
96 static void intr_event_update(struct intr_event *ie);
98 static struct intr_thread *ithread_create(const char *name,
99 struct intr_handler *ih);
101 static struct intr_thread *ithread_create(const char *name);
103 static void ithread_destroy(struct intr_thread *ithread);
104 static void ithread_execute_handlers(struct proc *p,
105 struct intr_event *ie);
107 static void priv_ithread_execute_handler(struct proc *p,
108 struct intr_handler *ih);
110 static void ithread_loop(void *);
111 static void ithread_update(struct intr_thread *ithd);
112 static void start_softintr(void *);
114 /* Map an interrupt type to an ithread priority. */
116 intr_priority(enum intr_type flags)
120 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
121 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
128 * XXX We need to refine this. BSD/OS distinguishes
129 * between tape and disk priorities.
137 pri = PI_DISK; /* XXX or PI_CAM? */
139 case INTR_TYPE_AV: /* Audio/video */
146 pri = PI_DULL; /* don't care */
149 /* We didn't specify an interrupt level. */
150 panic("intr_priority: no interrupt type in flags");
157 * Update an ithread based on the associated intr_event.
160 ithread_update(struct intr_thread *ithd)
162 struct intr_event *ie;
167 td = ithd->it_thread;
169 /* Determine the overall priority of this event. */
170 if (TAILQ_EMPTY(&ie->ie_handlers))
173 pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri;
175 /* Update name and priority. */
176 strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name));
183 * Regenerate the full name of an interrupt event and update its priority.
186 intr_event_update(struct intr_event *ie)
188 struct intr_handler *ih;
192 /* Start off with no entropy and just the name of the event. */
193 mtx_assert(&ie->ie_lock, MA_OWNED);
194 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
195 ie->ie_flags &= ~IE_ENTROPY;
199 /* Run through all the handlers updating values. */
200 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
201 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
202 sizeof(ie->ie_fullname)) {
203 strcat(ie->ie_fullname, " ");
204 strcat(ie->ie_fullname, ih->ih_name);
208 if (ih->ih_flags & IH_ENTROPY)
209 ie->ie_flags |= IE_ENTROPY;
213 * If the handler names were too long, add +'s to indicate missing
214 * names. If we run out of room and still have +'s to add, change
215 * the last character from a + to a *.
217 last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2];
218 while (missed-- > 0) {
219 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) {
226 strcat(ie->ie_fullname, " +");
229 strcat(ie->ie_fullname, "+");
233 * If this event has an ithread, update it's priority and
236 if (ie->ie_thread != NULL)
237 ithread_update(ie->ie_thread);
238 CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname);
242 intr_event_create(struct intr_event **event, void *source,int flags,
243 void (*disable)(void *), void (*enable)(void *), void (*eoi)(void *),
244 int (*assign_cpu)(void *, u_char), const char *fmt, ...)
246 struct intr_event *ie;
249 /* The only valid flag during creation is IE_SOFT. */
250 if ((flags & ~IE_SOFT) != 0)
252 ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
253 ie->ie_source = source;
254 ie->ie_disable = disable;
255 ie->ie_enable = enable;
257 ie->ie_assign_cpu = assign_cpu;
258 ie->ie_flags = flags;
260 TAILQ_INIT(&ie->ie_handlers);
261 mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
264 vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
266 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
267 mtx_pool_lock(mtxpool_sleep, &event_list);
268 TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
269 mtx_pool_unlock(mtxpool_sleep, &event_list);
272 CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
277 * Bind an interrupt event to the specified CPU. Note that not all
278 * platforms support binding an interrupt to a CPU. For those
279 * platforms this request will fail. For supported platforms, any
280 * associated ithreads as well as the primary interrupt context will
281 * be bound to the specificed CPU. Using a cpu id of NOCPU unbinds
282 * the interrupt event.
285 intr_event_bind(struct intr_event *ie, u_char cpu)
290 /* Need a CPU to bind to. */
291 if (cpu != NOCPU && CPU_ABSENT(cpu))
294 if (ie->ie_assign_cpu == NULL)
297 /* Don't allow a bind request if the interrupt is already bound. */
298 mtx_lock(&ie->ie_lock);
299 if (ie->ie_cpu != NOCPU && cpu != NOCPU) {
300 mtx_unlock(&ie->ie_lock);
303 mtx_unlock(&ie->ie_lock);
305 error = ie->ie_assign_cpu(ie->ie_source, cpu);
308 mtx_lock(&ie->ie_lock);
309 if (ie->ie_thread != NULL)
310 td = ie->ie_thread->it_thread;
318 mtx_unlock(&ie->ie_lock);
323 intr_event_destroy(struct intr_event *ie)
326 mtx_lock(&ie->ie_lock);
327 if (!TAILQ_EMPTY(&ie->ie_handlers)) {
328 mtx_unlock(&ie->ie_lock);
331 mtx_pool_lock(mtxpool_sleep, &event_list);
332 TAILQ_REMOVE(&event_list, ie, ie_list);
333 mtx_pool_unlock(mtxpool_sleep, &event_list);
335 if (ie->ie_thread != NULL) {
336 ithread_destroy(ie->ie_thread);
337 ie->ie_thread = NULL;
340 mtx_unlock(&ie->ie_lock);
341 mtx_destroy(&ie->ie_lock);
347 static struct intr_thread *
348 ithread_create(const char *name)
350 struct intr_thread *ithd;
354 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
356 error = kproc_kthread_add(ithread_loop, ithd, &intrproc,
357 &td, RFSTOPPED | RFHIGHPID,
358 0, "intr", "%s", name);
360 panic("kproc_create() failed with %d", error);
362 sched_class(td, PRI_ITHD);
365 td->td_pflags |= TDP_ITHREAD;
366 ithd->it_thread = td;
367 CTR2(KTR_INTR, "%s: created %s", __func__, name);
371 static struct intr_thread *
372 ithread_create(const char *name, struct intr_handler *ih)
374 struct intr_thread *ithd;
378 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
380 error = kproc_kthread_add(ithread_loop, ih, &intrproc,
381 &td, RFSTOPPED | RFHIGHPID,
382 0, "intr", "%s", name);
384 panic("kproc_create() failed with %d", error);
386 sched_class(td, PRI_ITHD);
389 td->td_pflags |= TDP_ITHREAD;
390 ithd->it_thread = td;
391 CTR2(KTR_INTR, "%s: created %s", __func__, name);
397 ithread_destroy(struct intr_thread *ithread)
401 CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name);
402 td = ithread->it_thread;
404 ithread->it_flags |= IT_DEAD;
405 if (TD_AWAITING_INTR(td)) {
407 sched_add(td, SRQ_INTR);
414 intr_event_add_handler(struct intr_event *ie, const char *name,
415 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
416 enum intr_type flags, void **cookiep)
418 struct intr_handler *ih, *temp_ih;
419 struct intr_thread *it;
421 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
424 /* Allocate and populate an interrupt handler structure. */
425 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
426 ih->ih_filter = filter;
427 ih->ih_handler = handler;
428 ih->ih_argument = arg;
432 if (flags & INTR_EXCL)
433 ih->ih_flags = IH_EXCLUSIVE;
434 if (flags & INTR_MPSAFE)
435 ih->ih_flags |= IH_MPSAFE;
436 if (flags & INTR_ENTROPY)
437 ih->ih_flags |= IH_ENTROPY;
439 /* We can only have one exclusive handler in a event. */
440 mtx_lock(&ie->ie_lock);
441 if (!TAILQ_EMPTY(&ie->ie_handlers)) {
442 if ((flags & INTR_EXCL) ||
443 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
444 mtx_unlock(&ie->ie_lock);
450 /* Add the new handler to the event in priority order. */
451 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
452 if (temp_ih->ih_pri > ih->ih_pri)
456 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
458 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
459 intr_event_update(ie);
461 /* Create a thread if we need one. */
462 while (ie->ie_thread == NULL && handler != NULL) {
463 if (ie->ie_flags & IE_ADDING_THREAD)
464 msleep(ie, &ie->ie_lock, 0, "ithread", 0);
466 ie->ie_flags |= IE_ADDING_THREAD;
467 mtx_unlock(&ie->ie_lock);
468 it = ithread_create("intr: newborn");
469 mtx_lock(&ie->ie_lock);
470 ie->ie_flags &= ~IE_ADDING_THREAD;
477 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
479 mtx_unlock(&ie->ie_lock);
487 intr_event_add_handler(struct intr_event *ie, const char *name,
488 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
489 enum intr_type flags, void **cookiep)
491 struct intr_handler *ih, *temp_ih;
492 struct intr_thread *it;
494 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
497 /* Allocate and populate an interrupt handler structure. */
498 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
499 ih->ih_filter = filter;
500 ih->ih_handler = handler;
501 ih->ih_argument = arg;
505 if (flags & INTR_EXCL)
506 ih->ih_flags = IH_EXCLUSIVE;
507 if (flags & INTR_MPSAFE)
508 ih->ih_flags |= IH_MPSAFE;
509 if (flags & INTR_ENTROPY)
510 ih->ih_flags |= IH_ENTROPY;
512 /* We can only have one exclusive handler in a event. */
513 mtx_lock(&ie->ie_lock);
514 if (!TAILQ_EMPTY(&ie->ie_handlers)) {
515 if ((flags & INTR_EXCL) ||
516 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
517 mtx_unlock(&ie->ie_lock);
523 /* Add the new handler to the event in priority order. */
524 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
525 if (temp_ih->ih_pri > ih->ih_pri)
529 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
531 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
532 intr_event_update(ie);
534 /* For filtered handlers, create a private ithread to run on. */
535 if (filter != NULL && handler != NULL) {
536 mtx_unlock(&ie->ie_lock);
537 it = ithread_create("intr: newborn", ih);
538 mtx_lock(&ie->ie_lock);
541 ithread_update(it); // XXX - do we really need this?!?!?
542 } else { /* Create the global per-event thread if we need one. */
543 while (ie->ie_thread == NULL && handler != NULL) {
544 if (ie->ie_flags & IE_ADDING_THREAD)
545 msleep(ie, &ie->ie_lock, 0, "ithread", 0);
547 ie->ie_flags |= IE_ADDING_THREAD;
548 mtx_unlock(&ie->ie_lock);
549 it = ithread_create("intr: newborn", ih);
550 mtx_lock(&ie->ie_lock);
551 ie->ie_flags &= ~IE_ADDING_THREAD;
559 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
561 mtx_unlock(&ie->ie_lock);
570 * Return the ie_source field from the intr_event an intr_handler is
574 intr_handler_source(void *cookie)
576 struct intr_handler *ih;
577 struct intr_event *ie;
579 ih = (struct intr_handler *)cookie;
584 ("interrupt handler \"%s\" has a NULL interrupt event",
586 return (ie->ie_source);
591 intr_event_remove_handler(void *cookie)
593 struct intr_handler *handler = (struct intr_handler *)cookie;
594 struct intr_event *ie;
596 struct intr_handler *ih;
604 ie = handler->ih_event;
606 ("interrupt handler \"%s\" has a NULL interrupt event",
608 mtx_lock(&ie->ie_lock);
609 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
612 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
615 mtx_unlock(&ie->ie_lock);
616 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
617 ih->ih_name, ie->ie_name);
621 * If there is no ithread, then just remove the handler and return.
622 * XXX: Note that an INTR_FAST handler might be running on another
625 if (ie->ie_thread == NULL) {
626 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
627 mtx_unlock(&ie->ie_lock);
628 free(handler, M_ITHREAD);
633 * If the interrupt thread is already running, then just mark this
634 * handler as being dead and let the ithread do the actual removal.
636 * During a cold boot while cold is set, msleep() does not sleep,
637 * so we have to remove the handler here rather than letting the
640 thread_lock(ie->ie_thread->it_thread);
641 if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) {
642 handler->ih_flags |= IH_DEAD;
645 * Ensure that the thread will process the handler list
646 * again and remove this handler if it has already passed
649 ie->ie_thread->it_need = 1;
651 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
652 thread_unlock(ie->ie_thread->it_thread);
653 while (handler->ih_flags & IH_DEAD)
654 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
655 intr_event_update(ie);
658 * XXX: This could be bad in the case of ppbus(8). Also, I think
659 * this could lead to races of stale data when servicing an
663 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
664 if (!(ih->ih_flags & IH_FAST)) {
670 ithread_destroy(ie->ie_thread);
671 ie->ie_thread = NULL;
674 mtx_unlock(&ie->ie_lock);
675 free(handler, M_ITHREAD);
680 intr_event_schedule_thread(struct intr_event *ie)
682 struct intr_entropy entropy;
683 struct intr_thread *it;
689 * If no ithread or no handlers, then we have a stray interrupt.
691 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) ||
692 ie->ie_thread == NULL)
701 * If any of the handlers for this ithread claim to be good
702 * sources of entropy, then gather some.
704 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
705 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
706 p->p_pid, td->td_name);
707 entropy.event = (uintptr_t)ie;
709 random_harvest(&entropy, sizeof(entropy), 2, 0,
713 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
716 * Set it_need to tell the thread to keep running if it is already
717 * running. Then, lock the thread and see if we actually need to
718 * put it on the runqueue.
722 if (TD_AWAITING_INTR(td)) {
723 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
726 sched_add(td, SRQ_INTR);
728 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
729 __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
737 intr_event_remove_handler(void *cookie)
739 struct intr_handler *handler = (struct intr_handler *)cookie;
740 struct intr_event *ie;
741 struct intr_thread *it;
743 struct intr_handler *ih;
751 ie = handler->ih_event;
753 ("interrupt handler \"%s\" has a NULL interrupt event",
755 mtx_lock(&ie->ie_lock);
756 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
759 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
762 mtx_unlock(&ie->ie_lock);
763 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
764 ih->ih_name, ie->ie_name);
768 * If there are no ithreads (per event and per handler), then
769 * just remove the handler and return.
770 * XXX: Note that an INTR_FAST handler might be running on another CPU!
772 if (ie->ie_thread == NULL && handler->ih_thread == NULL) {
773 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
774 mtx_unlock(&ie->ie_lock);
775 free(handler, M_ITHREAD);
779 /* Private or global ithread? */
780 it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread;
782 * If the interrupt thread is already running, then just mark this
783 * handler as being dead and let the ithread do the actual removal.
785 * During a cold boot while cold is set, msleep() does not sleep,
786 * so we have to remove the handler here rather than letting the
789 thread_lock(it->it_thread);
790 if (!TD_AWAITING_INTR(it->it_thread) && !cold) {
791 handler->ih_flags |= IH_DEAD;
794 * Ensure that the thread will process the handler list
795 * again and remove this handler if it has already passed
800 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
801 thread_unlock(it->it_thread);
802 while (handler->ih_flags & IH_DEAD)
803 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
805 * At this point, the handler has been disconnected from the event,
806 * so we can kill the private ithread if any.
808 if (handler->ih_thread) {
809 ithread_destroy(handler->ih_thread);
810 handler->ih_thread = NULL;
812 intr_event_update(ie);
815 * XXX: This could be bad in the case of ppbus(8). Also, I think
816 * this could lead to races of stale data when servicing an
820 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
821 if (handler != NULL) {
827 ithread_destroy(ie->ie_thread);
828 ie->ie_thread = NULL;
831 mtx_unlock(&ie->ie_lock);
832 free(handler, M_ITHREAD);
837 intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it)
839 struct intr_entropy entropy;
845 * If no ithread or no handlers, then we have a stray interrupt.
847 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL)
855 * If any of the handlers for this ithread claim to be good
856 * sources of entropy, then gather some.
858 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
859 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
860 p->p_pid, td->td_name);
861 entropy.event = (uintptr_t)ie;
863 random_harvest(&entropy, sizeof(entropy), 2, 0,
867 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
870 * Set it_need to tell the thread to keep running if it is already
871 * running. Then, lock the thread and see if we actually need to
872 * put it on the runqueue.
876 if (TD_AWAITING_INTR(td)) {
877 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
880 sched_add(td, SRQ_INTR);
882 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
883 __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
892 * Add a software interrupt handler to a specified event. If a given event
893 * is not specified, then a new event is created.
896 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
897 void *arg, int pri, enum intr_type flags, void **cookiep)
899 struct intr_event *ie;
902 if (flags & INTR_ENTROPY)
905 ie = (eventp != NULL) ? *eventp : NULL;
908 if (!(ie->ie_flags & IE_SOFT))
911 error = intr_event_create(&ie, NULL, IE_SOFT,
912 NULL, NULL, NULL, NULL, "swi%d:", pri);
918 return (intr_event_add_handler(ie, name, NULL, handler, arg,
919 (pri * RQ_PPQ) + PI_SOFT, flags, cookiep));
923 * Schedule a software interrupt thread.
926 swi_sched(void *cookie, int flags)
928 struct intr_handler *ih = (struct intr_handler *)cookie;
929 struct intr_event *ie = ih->ih_event;
932 CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
936 * Set ih_need for this handler so that if the ithread is already
937 * running it will execute this handler on the next pass. Otherwise,
938 * it will execute it the next time it runs.
940 atomic_store_rel_int(&ih->ih_need, 1);
942 if (!(flags & SWI_DELAY)) {
943 PCPU_INC(cnt.v_soft);
945 error = intr_event_schedule_thread(ie, ie->ie_thread);
947 error = intr_event_schedule_thread(ie);
949 KASSERT(error == 0, ("stray software interrupt"));
954 * Remove a software interrupt handler. Currently this code does not
955 * remove the associated interrupt event if it becomes empty. Calling code
956 * may do so manually via intr_event_destroy(), but that's not really
957 * an optimal interface.
960 swi_remove(void *cookie)
963 return (intr_event_remove_handler(cookie));
968 priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih)
970 struct intr_event *ie;
974 * If this handler is marked for death, remove it from
975 * the list of handlers and wake up the sleeper.
977 if (ih->ih_flags & IH_DEAD) {
978 mtx_lock(&ie->ie_lock);
979 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
980 ih->ih_flags &= ~IH_DEAD;
982 mtx_unlock(&ie->ie_lock);
986 /* Execute this handler. */
987 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
988 __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument,
989 ih->ih_name, ih->ih_flags);
991 if (!(ih->ih_flags & IH_MPSAFE))
993 ih->ih_handler(ih->ih_argument);
994 if (!(ih->ih_flags & IH_MPSAFE))
1000 ithread_execute_handlers(struct proc *p, struct intr_event *ie)
1002 struct intr_handler *ih, *ihn;
1004 /* Interrupt handlers should not sleep. */
1005 if (!(ie->ie_flags & IE_SOFT))
1006 THREAD_NO_SLEEPING();
1007 TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
1010 * If this handler is marked for death, remove it from
1011 * the list of handlers and wake up the sleeper.
1013 if (ih->ih_flags & IH_DEAD) {
1014 mtx_lock(&ie->ie_lock);
1015 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
1016 ih->ih_flags &= ~IH_DEAD;
1018 mtx_unlock(&ie->ie_lock);
1022 /* Skip filter only handlers */
1023 if (ih->ih_handler == NULL)
1027 * For software interrupt threads, we only execute
1028 * handlers that have their need flag set. Hardware
1029 * interrupt threads always invoke all of their handlers.
1031 if (ie->ie_flags & IE_SOFT) {
1035 atomic_store_rel_int(&ih->ih_need, 0);
1038 /* Execute this handler. */
1039 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
1040 __func__, p->p_pid, (void *)ih->ih_handler,
1041 ih->ih_argument, ih->ih_name, ih->ih_flags);
1043 if (!(ih->ih_flags & IH_MPSAFE))
1045 ih->ih_handler(ih->ih_argument);
1046 if (!(ih->ih_flags & IH_MPSAFE))
1049 if (!(ie->ie_flags & IE_SOFT))
1050 THREAD_SLEEPING_OK();
1053 * Interrupt storm handling:
1055 * If this interrupt source is currently storming, then throttle
1056 * it to only fire the handler once per clock tick.
1058 * If this interrupt source is not currently storming, but the
1059 * number of back to back interrupts exceeds the storm threshold,
1060 * then enter storming mode.
1062 if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold &&
1063 !(ie->ie_flags & IE_SOFT)) {
1064 /* Report the message only once every second. */
1065 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) {
1067 "interrupt storm detected on \"%s\"; throttling interrupt source\n",
1075 * Now that all the handlers have had a chance to run, reenable
1076 * the interrupt source.
1078 if (ie->ie_enable != NULL)
1079 ie->ie_enable(ie->ie_source);
1084 * This is the main code for interrupt threads.
1087 ithread_loop(void *arg)
1089 struct intr_thread *ithd;
1090 struct intr_event *ie;
1097 ithd = (struct intr_thread *)arg;
1098 KASSERT(ithd->it_thread == td,
1099 ("%s: ithread and proc linkage out of sync", __func__));
1100 ie = ithd->it_event;
1105 * As long as we have interrupts outstanding, go through the
1106 * list of handlers, giving each one a go at it.
1110 * If we are an orphaned thread, then just die.
1112 if (ithd->it_flags & IT_DEAD) {
1113 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
1114 p->p_pid, td->td_name);
1115 free(ithd, M_ITHREAD);
1120 * Service interrupts. If another interrupt arrives while
1121 * we are running, it will set it_need to note that we
1122 * should make another pass.
1124 while (ithd->it_need) {
1126 * This might need a full read and write barrier
1127 * to make sure that this write posts before any
1128 * of the memory or device accesses in the
1131 atomic_store_rel_int(&ithd->it_need, 0);
1132 ithread_execute_handlers(p, ie);
1134 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
1135 mtx_assert(&Giant, MA_NOTOWNED);
1138 * Processed all our interrupts. Now get the sched
1139 * lock. This may take a while and it_need may get
1140 * set again, so we have to check it again.
1143 if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) {
1146 mi_switch(SW_VOL, NULL);
1151 * Ensure we are bound to the correct CPU. We can't
1152 * move ithreads until SMP is running however, so just
1153 * leave interrupts on the boor CPU during boot.
1155 if (ie->ie_cpu != cpu && smp_started) {
1160 sched_bind(td, cpu);
1168 * This is the main code for interrupt threads.
1171 ithread_loop(void *arg)
1173 struct intr_thread *ithd;
1174 struct intr_handler *ih;
1175 struct intr_event *ie;
1183 ih = (struct intr_handler *)arg;
1184 priv = (ih->ih_thread != NULL) ? 1 : 0;
1185 ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread;
1186 KASSERT(ithd->it_thread == td,
1187 ("%s: ithread and proc linkage out of sync", __func__));
1188 ie = ithd->it_event;
1193 * As long as we have interrupts outstanding, go through the
1194 * list of handlers, giving each one a go at it.
1198 * If we are an orphaned thread, then just die.
1200 if (ithd->it_flags & IT_DEAD) {
1201 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
1202 p->p_pid, td->td_name);
1203 free(ithd, M_ITHREAD);
1208 * Service interrupts. If another interrupt arrives while
1209 * we are running, it will set it_need to note that we
1210 * should make another pass.
1212 while (ithd->it_need) {
1214 * This might need a full read and write barrier
1215 * to make sure that this write posts before any
1216 * of the memory or device accesses in the
1219 atomic_store_rel_int(&ithd->it_need, 0);
1221 priv_ithread_execute_handler(p, ih);
1223 ithread_execute_handlers(p, ie);
1225 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
1226 mtx_assert(&Giant, MA_NOTOWNED);
1229 * Processed all our interrupts. Now get the sched
1230 * lock. This may take a while and it_need may get
1231 * set again, so we have to check it again.
1234 if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) {
1237 mi_switch(SW_VOL, NULL);
1242 * Ensure we are bound to the correct CPU. We can't
1243 * move ithreads until SMP is running however, so just
1244 * leave interrupts on the boor CPU during boot.
1246 if (!priv && ie->ie_cpu != cpu && smp_started) {
1251 sched_bind(td, cpu);
1259 * Main loop for interrupt filter.
1261 * Some architectures (i386, amd64 and arm) require the optional frame
1262 * parameter, and use it as the main argument for fast handler execution
1263 * when ih_argument == NULL.
1266 * o FILTER_STRAY: No filter recognized the event, and no
1267 * filter-less handler is registered on this
1269 * o FILTER_HANDLED: A filter claimed the event and served it.
1270 * o FILTER_SCHEDULE_THREAD: No filter claimed the event, but there's at
1271 * least one filter-less handler on this line.
1272 * o FILTER_HANDLED |
1273 * FILTER_SCHEDULE_THREAD: A filter claimed the event, and asked for
1274 * scheduling the per-handler ithread.
1276 * In case an ithread has to be scheduled, in *ithd there will be a
1277 * pointer to a struct intr_thread containing the thread to be
1282 intr_filter_loop(struct intr_event *ie, struct trapframe *frame,
1283 struct intr_thread **ithd)
1285 struct intr_handler *ih;
1287 int ret, thread_only;
1291 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
1293 * Execute fast interrupt handlers directly.
1294 * To support clock handlers, if a handler registers
1295 * with a NULL argument, then we pass it a pointer to
1296 * a trapframe as its argument.
1298 arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument);
1300 CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__,
1301 ih->ih_filter, ih->ih_handler, arg, ih->ih_name);
1303 if (ih->ih_filter != NULL)
1304 ret = ih->ih_filter(arg);
1310 if (ret & FILTER_STRAY)
1313 *ithd = ih->ih_thread;
1319 * No filters handled the interrupt and we have at least
1320 * one handler without a filter. In this case, we schedule
1321 * all of the filter-less handlers to run in the ithread.
1324 *ithd = ie->ie_thread;
1325 return (FILTER_SCHEDULE_THREAD);
1327 return (FILTER_STRAY);
1331 * Main interrupt handling body.
1334 * o ie: the event connected to this interrupt.
1335 * o frame: some archs (i.e. i386) pass a frame to some.
1336 * handlers as their main argument.
1338 * o 0: everything ok.
1339 * o EINVAL: stray interrupt.
1342 intr_event_handle(struct intr_event *ie, struct trapframe *frame)
1344 struct intr_thread *ithd;
1351 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
1354 td->td_intr_nesting_level++;
1357 thread = intr_filter_loop(ie, frame, &ithd);
1360 * If the interrupt was fully served, send it an EOI but leave
1361 * it unmasked. Otherwise, mask the source as well as sending
1364 if (thread & FILTER_HANDLED) {
1365 if (ie->ie_eoi != NULL)
1366 ie->ie_eoi(ie->ie_source);
1368 if (ie->ie_disable != NULL)
1369 ie->ie_disable(ie->ie_source);
1373 /* Interrupt storm logic */
1374 if (thread & FILTER_STRAY) {
1376 if (ie->ie_count < intr_storm_threshold)
1377 printf("Interrupt stray detection not present\n");
1380 /* Schedule an ithread if needed. */
1381 if (thread & FILTER_SCHEDULE_THREAD) {
1382 if (intr_event_schedule_thread(ie, ithd) != 0)
1383 panic("%s: impossible stray interrupt", __func__);
1385 td->td_intr_nesting_level--;
1392 * Dump details about an interrupt handler
1395 db_dump_intrhand(struct intr_handler *ih)
1399 db_printf("\t%-10s ", ih->ih_name);
1400 switch (ih->ih_pri) {
1425 if (ih->ih_pri >= PI_SOFT)
1428 db_printf("%4u", ih->ih_pri);
1432 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
1433 db_printf("(%p)", ih->ih_argument);
1435 (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
1439 if (ih->ih_flags & IH_EXCLUSIVE) {
1445 if (ih->ih_flags & IH_ENTROPY) {
1448 db_printf("ENTROPY");
1451 if (ih->ih_flags & IH_DEAD) {
1457 if (ih->ih_flags & IH_MPSAFE) {
1460 db_printf("MPSAFE");
1474 * Dump details about a event.
1477 db_dump_intr_event(struct intr_event *ie, int handlers)
1479 struct intr_handler *ih;
1480 struct intr_thread *it;
1483 db_printf("%s ", ie->ie_fullname);
1486 db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
1488 db_printf("(no thread)");
1489 if (ie->ie_cpu != NOCPU)
1490 db_printf(" (CPU %d)", ie->ie_cpu);
1491 if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 ||
1492 (it != NULL && it->it_need)) {
1495 if (ie->ie_flags & IE_SOFT) {
1499 if (ie->ie_flags & IE_ENTROPY) {
1502 db_printf("ENTROPY");
1505 if (ie->ie_flags & IE_ADDING_THREAD) {
1508 db_printf("ADDING_THREAD");
1511 if (it != NULL && it->it_need) {
1521 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
1522 db_dump_intrhand(ih);
1526 * Dump data about interrupt handlers
1528 DB_SHOW_COMMAND(intr, db_show_intr)
1530 struct intr_event *ie;
1533 verbose = index(modif, 'v') != NULL;
1534 all = index(modif, 'a') != NULL;
1535 TAILQ_FOREACH(ie, &event_list, ie_list) {
1536 if (!all && TAILQ_EMPTY(&ie->ie_handlers))
1538 db_dump_intr_event(ie, verbose);
1546 * Start standard software interrupt threads
1549 start_softintr(void *dummy)
1553 if (swi_add(&clk_intr_event, "clock", softclock, NULL, SWI_CLOCK,
1554 INTR_MPSAFE, &softclock_ih) ||
1555 swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih))
1556 panic("died while creating standard software ithreads");
1558 p = clk_intr_event->ie_thread->it_thread->td_proc;
1560 p->p_flag |= P_NOLOAD;
1563 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr,
1567 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
1568 * The data for this machine dependent, and the declarations are in machine
1569 * dependent code. The layout of intrnames and intrcnt however is machine
1572 * We do not know the length of intrcnt and intrnames at compile time, so
1573 * calculate things at run time.
1576 sysctl_intrnames(SYSCTL_HANDLER_ARGS)
1578 return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames,
1582 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
1583 NULL, 0, sysctl_intrnames, "", "Interrupt Names");
1586 sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
1588 return (sysctl_handle_opaque(oidp, intrcnt,
1589 (char *)eintrcnt - (char *)intrcnt, req));
1592 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
1593 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
1597 * DDB command to dump the interrupt statistics.
1599 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt)
1605 for (i = intrcnt; i != eintrcnt && !db_pager_quit; i++) {
1609 db_printf("%s\t%lu\n", cp, *i);
1610 cp += strlen(cp) + 1;