2 * Copyright (c) 2015-2016 Svatopluk Kraus
3 * Copyright (c) 2015-2016 Michal Meloun
5 * Copyright (c) 2015-2016 The FreeBSD Foundation
6 * Copyright (c) 2021 Jessica Clarke <jrtc27@FreeBSD.org>
8 * Portions of this software were developed by Andrew Turner under
9 * sponsorship from the FreeBSD Foundation.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include <sys/cdefs.h>
35 * New-style Interrupt Framework
37 * TODO: - add support for disconnected PICs.
38 * - to support IPI (PPI) enabling on other CPUs if already started.
39 * - to complete things for removable PICs.
43 #include "opt_hwpmc_hooks.h"
44 #include "opt_iommu.h"
46 #include <sys/param.h>
47 #include <sys/systm.h>
49 #include <sys/bitstring.h>
52 #include <sys/cpuset.h>
53 #include <sys/interrupt.h>
54 #include <sys/kernel.h>
56 #include <sys/malloc.h>
58 #include <sys/mutex.h>
60 #include <sys/queue.h>
62 #include <sys/sched.h>
64 #include <sys/sysctl.h>
65 #include <sys/syslog.h>
66 #include <sys/taskqueue.h>
68 #include <sys/vmmeter.h>
70 #include <sys/pmckern.h>
73 #include <machine/atomic.h>
74 #include <machine/cpu.h>
75 #include <machine/intr.h>
76 #include <machine/smp.h>
77 #include <machine/stdarg.h>
84 #include <dev/iommu/iommu_msi.h>
90 #define INTRNAME_LEN (2*MAXCOMLEN + 1)
93 #define debugf(fmt, args...) do { printf("%s(): ", __func__); \
94 printf(fmt,##args); } while (0)
96 #define debugf(fmt, args...)
99 MALLOC_DECLARE(M_INTRNG);
100 MALLOC_DEFINE(M_INTRNG, "intr", "intr interrupt handling");
102 /* Main interrupt handler called from assembler -> 'hidden' for C code. */
103 void intr_irq_handler(struct trapframe *tf);
105 /* Root interrupt controller stuff. */
106 device_t intr_irq_root_dev;
107 static intr_irq_filter_t *irq_root_filter;
108 static void *irq_root_arg;
110 struct intr_pic_child {
111 SLIST_ENTRY(intr_pic_child) pc_next;
112 struct intr_pic *pc_pic;
113 intr_child_irq_filter_t *pc_filter;
119 /* Interrupt controller definition. */
121 SLIST_ENTRY(intr_pic) pic_next;
122 intptr_t pic_xref; /* hardware identification */
124 /* Only one of FLAG_PIC or FLAG_MSI may be set */
125 #define FLAG_PIC (1 << 0)
126 #define FLAG_MSI (1 << 1)
127 #define FLAG_TYPE_MASK (FLAG_PIC | FLAG_MSI)
129 struct mtx pic_child_lock;
130 SLIST_HEAD(, intr_pic_child) pic_children;
134 #define INTR_IPI_NAMELEN (MAXCOMLEN + 1)
137 intr_ipi_handler_t *ii_handler;
138 void *ii_handler_arg;
139 struct intr_irqsrc *ii_isrc;
140 char ii_name[INTR_IPI_NAMELEN];
144 static device_t intr_ipi_dev;
145 static u_int intr_ipi_dev_priority;
146 static bool intr_ipi_dev_frozen;
149 static struct mtx pic_list_lock;
150 static SLIST_HEAD(, intr_pic) pic_list;
152 static struct intr_pic *pic_lookup(device_t dev, intptr_t xref, int flags);
154 /* Interrupt source definition. */
155 static struct mtx isrc_table_lock;
156 static struct intr_irqsrc **irq_sources;
157 static u_int irq_next_free;
160 #ifdef EARLY_AP_STARTUP
161 static bool irq_assign_cpu = true;
163 static bool irq_assign_cpu = false;
166 static struct intr_ipi ipi_sources[INTR_IPI_COUNT];
169 u_int intr_nirq = NIRQ;
170 SYSCTL_UINT(_machdep, OID_AUTO, nirq, CTLFLAG_RDTUN, &intr_nirq, 0,
173 /* Data for MI statistics reporting. */
179 static bitstr_t *intrcnt_bitmap;
181 static struct intr_irqsrc *intr_map_get_isrc(u_int res_id);
182 static void intr_map_set_isrc(u_int res_id, struct intr_irqsrc *isrc);
183 static struct intr_map_data * intr_map_get_map_data(u_int res_id);
184 static void intr_map_copy_map_data(u_int res_id, device_t *dev, intptr_t *xref,
185 struct intr_map_data **data);
188 * Interrupt framework initialization routine.
191 intr_irq_init(void *dummy __unused)
194 SLIST_INIT(&pic_list);
195 mtx_init(&pic_list_lock, "intr pic list", NULL, MTX_DEF);
197 mtx_init(&isrc_table_lock, "intr isrc table", NULL, MTX_DEF);
200 * - 2 counters for each I/O interrupt.
201 * - mp_maxid + 1 counters for each IPI counters for SMP.
203 nintrcnt = intr_nirq * 2;
205 nintrcnt += INTR_IPI_COUNT * (mp_maxid + 1);
208 intrcnt = mallocarray(nintrcnt, sizeof(u_long), M_INTRNG,
210 intrnames = mallocarray(nintrcnt, INTRNAME_LEN, M_INTRNG,
212 sintrcnt = nintrcnt * sizeof(u_long);
213 sintrnames = nintrcnt * INTRNAME_LEN;
215 /* Allocate the bitmap tracking counter allocations. */
216 intrcnt_bitmap = bit_alloc(nintrcnt, M_INTRNG, M_WAITOK | M_ZERO);
218 irq_sources = mallocarray(intr_nirq, sizeof(struct intr_irqsrc*),
219 M_INTRNG, M_WAITOK | M_ZERO);
221 SYSINIT(intr_irq_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_irq_init, NULL);
224 intrcnt_setname(const char *name, int index)
227 snprintf(intrnames + INTRNAME_LEN * index, INTRNAME_LEN, "%-*s",
228 INTRNAME_LEN - 1, name);
232 * Update name for interrupt source with interrupt event.
235 intrcnt_updatename(struct intr_irqsrc *isrc)
238 /* QQQ: What about stray counter name? */
239 mtx_assert(&isrc_table_lock, MA_OWNED);
240 intrcnt_setname(isrc->isrc_event->ie_fullname, isrc->isrc_index);
244 * Virtualization for interrupt source interrupt counter increment.
247 isrc_increment_count(struct intr_irqsrc *isrc)
250 if (isrc->isrc_flags & INTR_ISRCF_PPI)
251 atomic_add_long(&isrc->isrc_count[0], 1);
253 isrc->isrc_count[0]++;
257 * Virtualization for interrupt source interrupt stray counter increment.
260 isrc_increment_straycount(struct intr_irqsrc *isrc)
263 isrc->isrc_count[1]++;
267 * Virtualization for interrupt source interrupt name update.
270 isrc_update_name(struct intr_irqsrc *isrc, const char *name)
272 char str[INTRNAME_LEN];
274 mtx_assert(&isrc_table_lock, MA_OWNED);
277 snprintf(str, INTRNAME_LEN, "%s: %s", isrc->isrc_name, name);
278 intrcnt_setname(str, isrc->isrc_index);
279 snprintf(str, INTRNAME_LEN, "stray %s: %s", isrc->isrc_name,
281 intrcnt_setname(str, isrc->isrc_index + 1);
283 snprintf(str, INTRNAME_LEN, "%s:", isrc->isrc_name);
284 intrcnt_setname(str, isrc->isrc_index);
285 snprintf(str, INTRNAME_LEN, "stray %s:", isrc->isrc_name);
286 intrcnt_setname(str, isrc->isrc_index + 1);
291 * Virtualization for interrupt source interrupt counters setup.
294 isrc_setup_counters(struct intr_irqsrc *isrc)
298 mtx_assert(&isrc_table_lock, MA_OWNED);
301 * Allocate two counter values, the second tracking "stray" interrupts.
303 bit_ffc_area(intrcnt_bitmap, nintrcnt, 2, &index);
305 panic("Failed to allocate 2 counters. Array exhausted?");
306 bit_nset(intrcnt_bitmap, index, index + 1);
307 isrc->isrc_index = index;
308 isrc->isrc_count = &intrcnt[index];
309 isrc_update_name(isrc, NULL);
313 * Virtualization for interrupt source interrupt counters release.
316 isrc_release_counters(struct intr_irqsrc *isrc)
318 int idx = isrc->isrc_index;
320 mtx_assert(&isrc_table_lock, MA_OWNED);
322 bit_nclear(intrcnt_bitmap, idx, idx + 1);
326 * Main interrupt dispatch handler. It's called straight
327 * from the assembler, where CPU interrupt is served.
330 intr_irq_handler(struct trapframe *tf)
332 struct trapframe * oldframe;
335 KASSERT(irq_root_filter != NULL, ("%s: no filter", __func__));
337 kasan_mark(tf, sizeof(*tf), sizeof(*tf), 0);
338 kmsan_mark(tf, sizeof(*tf), KMSAN_STATE_INITED);
343 oldframe = td->td_intr_frame;
344 td->td_intr_frame = tf;
345 irq_root_filter(irq_root_arg);
346 td->td_intr_frame = oldframe;
349 if (pmc_hook && TRAPF_USERMODE(tf) &&
350 (PCPU_GET(curthread)->td_pflags & TDP_CALLCHAIN))
351 pmc_hook(PCPU_GET(curthread), PMC_FN_USER_CALLCHAIN, tf);
356 intr_child_irq_handler(struct intr_pic *parent, uintptr_t irq)
358 struct intr_pic_child *child;
362 mtx_lock_spin(&parent->pic_child_lock);
363 SLIST_FOREACH(child, &parent->pic_children, pc_next) {
364 if (child->pc_start <= irq &&
365 irq < (child->pc_start + child->pc_length)) {
370 mtx_unlock_spin(&parent->pic_child_lock);
373 return (child->pc_filter(child->pc_filter_arg, irq));
375 return (FILTER_STRAY);
379 * interrupt controller dispatch function for interrupts. It should
380 * be called straight from the interrupt controller, when associated interrupt
384 intr_isrc_dispatch(struct intr_irqsrc *isrc, struct trapframe *tf)
387 KASSERT(isrc != NULL, ("%s: no source", __func__));
389 if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0)
390 isrc_increment_count(isrc);
393 if (isrc->isrc_filter != NULL) {
395 error = isrc->isrc_filter(isrc->isrc_arg, tf);
396 PIC_POST_FILTER(isrc->isrc_dev, isrc);
397 if (error == FILTER_HANDLED)
401 if (isrc->isrc_event != NULL) {
402 if (intr_event_handle(isrc->isrc_event, tf) == 0)
406 if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0)
407 isrc_increment_straycount(isrc);
412 * Alloc unique interrupt number (resource handle) for interrupt source.
414 * There could be various strategies how to allocate free interrupt number
415 * (resource handle) for new interrupt source.
417 * 1. Handles are always allocated forward, so handles are not recycled
418 * immediately. However, if only one free handle left which is reused
422 isrc_alloc_irq(struct intr_irqsrc *isrc)
426 mtx_assert(&isrc_table_lock, MA_OWNED);
428 if (irq_next_free >= intr_nirq)
431 for (irq = irq_next_free; irq < intr_nirq; irq++) {
432 if (irq_sources[irq] == NULL)
435 for (irq = 0; irq < irq_next_free; irq++) {
436 if (irq_sources[irq] == NULL)
440 irq_next_free = intr_nirq;
444 isrc->isrc_irq = irq;
445 irq_sources[irq] = isrc;
447 irq_next_free = irq + 1;
448 if (irq_next_free >= intr_nirq)
454 * Free unique interrupt number (resource handle) from interrupt source.
457 isrc_free_irq(struct intr_irqsrc *isrc)
460 mtx_assert(&isrc_table_lock, MA_OWNED);
462 if (isrc->isrc_irq >= intr_nirq)
464 if (irq_sources[isrc->isrc_irq] != isrc)
467 irq_sources[isrc->isrc_irq] = NULL;
468 isrc->isrc_irq = INTR_IRQ_INVALID; /* just to be safe */
471 * If we are recovering from the state irq_sources table is full,
472 * then the following allocation should check the entire table. This
473 * will ensure maximum separation of allocation order from release
476 if (irq_next_free >= intr_nirq)
483 * Initialize interrupt source and register it into global interrupt table.
486 intr_isrc_register(struct intr_irqsrc *isrc, device_t dev, u_int flags,
487 const char *fmt, ...)
492 bzero(isrc, sizeof(struct intr_irqsrc));
493 isrc->isrc_dev = dev;
494 isrc->isrc_irq = INTR_IRQ_INVALID; /* just to be safe */
495 isrc->isrc_flags = flags;
498 vsnprintf(isrc->isrc_name, INTR_ISRC_NAMELEN, fmt, ap);
501 mtx_lock(&isrc_table_lock);
502 error = isrc_alloc_irq(isrc);
504 mtx_unlock(&isrc_table_lock);
508 * Setup interrupt counters, but not for IPI sources. Those are setup
509 * later and only for used ones (up to INTR_IPI_COUNT) to not exhaust
512 if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0)
513 isrc_setup_counters(isrc);
514 mtx_unlock(&isrc_table_lock);
519 * Deregister interrupt source from global interrupt table.
522 intr_isrc_deregister(struct intr_irqsrc *isrc)
526 mtx_lock(&isrc_table_lock);
527 if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0)
528 isrc_release_counters(isrc);
529 error = isrc_free_irq(isrc);
530 mtx_unlock(&isrc_table_lock);
536 * A support function for a PIC to decide if provided ISRC should be inited
537 * on given cpu. The logic of INTR_ISRCF_BOUND flag and isrc_cpu member of
538 * struct intr_irqsrc is the following:
540 * If INTR_ISRCF_BOUND is set, the ISRC should be inited only on cpus
541 * set in isrc_cpu. If not, the ISRC should be inited on every cpu and
542 * isrc_cpu is kept consistent with it. Thus isrc_cpu is always correct.
545 intr_isrc_init_on_cpu(struct intr_irqsrc *isrc, u_int cpu)
548 if (isrc->isrc_handlers == 0)
550 if ((isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI)) == 0)
552 if (isrc->isrc_flags & INTR_ISRCF_BOUND)
553 return (CPU_ISSET(cpu, &isrc->isrc_cpu));
555 CPU_SET(cpu, &isrc->isrc_cpu);
562 * Setup filter into interrupt source.
565 iscr_setup_filter(struct intr_irqsrc *isrc, const char *name,
566 intr_irq_filter_t *filter, void *arg, void **cookiep)
572 mtx_lock(&isrc_table_lock);
574 * Make sure that we do not mix the two ways
575 * how we handle interrupt sources.
577 if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) {
578 mtx_unlock(&isrc_table_lock);
581 isrc->isrc_filter = filter;
582 isrc->isrc_arg = arg;
583 isrc_update_name(isrc, name);
584 mtx_unlock(&isrc_table_lock);
592 * Interrupt source pre_ithread method for MI interrupt framework.
595 intr_isrc_pre_ithread(void *arg)
597 struct intr_irqsrc *isrc = arg;
599 PIC_PRE_ITHREAD(isrc->isrc_dev, isrc);
603 * Interrupt source post_ithread method for MI interrupt framework.
606 intr_isrc_post_ithread(void *arg)
608 struct intr_irqsrc *isrc = arg;
610 PIC_POST_ITHREAD(isrc->isrc_dev, isrc);
614 * Interrupt source post_filter method for MI interrupt framework.
617 intr_isrc_post_filter(void *arg)
619 struct intr_irqsrc *isrc = arg;
621 PIC_POST_FILTER(isrc->isrc_dev, isrc);
625 * Interrupt source assign_cpu method for MI interrupt framework.
628 intr_isrc_assign_cpu(void *arg, int cpu)
631 struct intr_irqsrc *isrc = arg;
634 mtx_lock(&isrc_table_lock);
636 CPU_ZERO(&isrc->isrc_cpu);
637 isrc->isrc_flags &= ~INTR_ISRCF_BOUND;
639 CPU_SETOF(cpu, &isrc->isrc_cpu);
640 isrc->isrc_flags |= INTR_ISRCF_BOUND;
644 * In NOCPU case, it's up to PIC to either leave ISRC on same CPU or
645 * re-balance it to another CPU or enable it on more CPUs. However,
646 * PIC is expected to change isrc_cpu appropriately to keep us well
647 * informed if the call is successful.
649 if (irq_assign_cpu) {
650 error = PIC_BIND_INTR(isrc->isrc_dev, isrc);
652 CPU_ZERO(&isrc->isrc_cpu);
653 mtx_unlock(&isrc_table_lock);
657 mtx_unlock(&isrc_table_lock);
665 * Create interrupt event for interrupt source.
668 isrc_event_create(struct intr_irqsrc *isrc)
670 struct intr_event *ie;
673 error = intr_event_create(&ie, isrc, 0, isrc->isrc_irq,
674 intr_isrc_pre_ithread, intr_isrc_post_ithread, intr_isrc_post_filter,
675 intr_isrc_assign_cpu, "%s:", isrc->isrc_name);
679 mtx_lock(&isrc_table_lock);
681 * Make sure that we do not mix the two ways
682 * how we handle interrupt sources. Let contested event wins.
685 if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) {
687 if (isrc->isrc_event != NULL) {
689 mtx_unlock(&isrc_table_lock);
690 intr_event_destroy(ie);
691 return (isrc->isrc_event != NULL ? EBUSY : 0);
693 isrc->isrc_event = ie;
694 mtx_unlock(&isrc_table_lock);
700 * Destroy interrupt event for interrupt source.
703 isrc_event_destroy(struct intr_irqsrc *isrc)
705 struct intr_event *ie;
707 mtx_lock(&isrc_table_lock);
708 ie = isrc->isrc_event;
709 isrc->isrc_event = NULL;
710 mtx_unlock(&isrc_table_lock);
713 intr_event_destroy(ie);
717 * Add handler to interrupt source.
720 isrc_add_handler(struct intr_irqsrc *isrc, const char *name,
721 driver_filter_t filter, driver_intr_t handler, void *arg,
722 enum intr_type flags, void **cookiep)
726 if (isrc->isrc_event == NULL) {
727 error = isrc_event_create(isrc);
732 error = intr_event_add_handler(isrc->isrc_event, name, filter, handler,
733 arg, intr_priority(flags), flags, cookiep);
735 mtx_lock(&isrc_table_lock);
736 intrcnt_updatename(isrc);
737 mtx_unlock(&isrc_table_lock);
744 * Lookup interrupt controller locked.
746 static inline struct intr_pic *
747 pic_lookup_locked(device_t dev, intptr_t xref, int flags)
749 struct intr_pic *pic;
751 mtx_assert(&pic_list_lock, MA_OWNED);
753 if (dev == NULL && xref == 0)
756 /* Note that pic->pic_dev is never NULL on registered PIC. */
757 SLIST_FOREACH(pic, &pic_list, pic_next) {
758 if ((pic->pic_flags & FLAG_TYPE_MASK) !=
759 (flags & FLAG_TYPE_MASK))
763 if (xref == pic->pic_xref)
765 } else if (xref == 0 || pic->pic_xref == 0) {
766 if (dev == pic->pic_dev)
768 } else if (xref == pic->pic_xref && dev == pic->pic_dev)
775 * Lookup interrupt controller.
777 static struct intr_pic *
778 pic_lookup(device_t dev, intptr_t xref, int flags)
780 struct intr_pic *pic;
782 mtx_lock(&pic_list_lock);
783 pic = pic_lookup_locked(dev, xref, flags);
784 mtx_unlock(&pic_list_lock);
789 * Create interrupt controller.
791 static struct intr_pic *
792 pic_create(device_t dev, intptr_t xref, int flags)
794 struct intr_pic *pic;
796 mtx_lock(&pic_list_lock);
797 pic = pic_lookup_locked(dev, xref, flags);
799 mtx_unlock(&pic_list_lock);
802 pic = malloc(sizeof(*pic), M_INTRNG, M_NOWAIT | M_ZERO);
804 mtx_unlock(&pic_list_lock);
807 pic->pic_xref = xref;
809 pic->pic_flags = flags;
810 mtx_init(&pic->pic_child_lock, "pic child lock", NULL, MTX_SPIN);
811 SLIST_INSERT_HEAD(&pic_list, pic, pic_next);
812 mtx_unlock(&pic_list_lock);
818 * Destroy interrupt controller.
821 pic_destroy(device_t dev, intptr_t xref, int flags)
823 struct intr_pic *pic;
825 mtx_lock(&pic_list_lock);
826 pic = pic_lookup_locked(dev, xref, flags);
828 mtx_unlock(&pic_list_lock);
831 SLIST_REMOVE(&pic_list, pic, intr_pic, pic_next);
832 mtx_unlock(&pic_list_lock);
838 * Register interrupt controller.
841 intr_pic_register(device_t dev, intptr_t xref)
843 struct intr_pic *pic;
847 pic = pic_create(dev, xref, FLAG_PIC);
851 debugf("PIC %p registered for %s <dev %p, xref %jx>\n", pic,
852 device_get_nameunit(dev), dev, (uintmax_t)xref);
857 * Unregister interrupt controller.
860 intr_pic_deregister(device_t dev, intptr_t xref)
863 panic("%s: not implemented", __func__);
867 * Mark interrupt controller (itself) as a root one.
869 * Note that only an interrupt controller can really know its position
870 * in interrupt controller's tree. So root PIC must claim itself as a root.
872 * In FDT case, according to ePAPR approved version 1.1 from 08 April 2011,
874 * "The root of the interrupt tree is determined when traversal
875 * of the interrupt tree reaches an interrupt controller node without
876 * an interrupts property and thus no explicit interrupt parent."
879 intr_pic_claim_root(device_t dev, intptr_t xref, intr_irq_filter_t *filter,
882 struct intr_pic *pic;
884 pic = pic_lookup(dev, xref, FLAG_PIC);
886 device_printf(dev, "not registered\n");
890 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_PIC,
891 ("%s: Found a non-PIC controller: %s", __func__,
892 device_get_name(pic->pic_dev)));
894 if (filter == NULL) {
895 device_printf(dev, "filter missing\n");
900 * Only one interrupt controllers could be on the root for now.
901 * Note that we further suppose that there is not threaded interrupt
902 * routine (handler) on the root. See intr_irq_handler().
904 if (intr_irq_root_dev != NULL) {
905 device_printf(dev, "another root already set\n");
909 intr_irq_root_dev = dev;
910 irq_root_filter = filter;
913 debugf("irq root set to %s\n", device_get_nameunit(dev));
918 * Add a handler to manage a sub range of a parents interrupts.
921 intr_pic_add_handler(device_t parent, struct intr_pic *pic,
922 intr_child_irq_filter_t *filter, void *arg, uintptr_t start,
925 struct intr_pic *parent_pic;
926 struct intr_pic_child *newchild;
928 struct intr_pic_child *child;
931 /* Find the parent PIC */
932 parent_pic = pic_lookup(parent, 0, FLAG_PIC);
933 if (parent_pic == NULL)
936 newchild = malloc(sizeof(*newchild), M_INTRNG, M_WAITOK | M_ZERO);
937 newchild->pc_pic = pic;
938 newchild->pc_filter = filter;
939 newchild->pc_filter_arg = arg;
940 newchild->pc_start = start;
941 newchild->pc_length = length;
943 mtx_lock_spin(&parent_pic->pic_child_lock);
945 SLIST_FOREACH(child, &parent_pic->pic_children, pc_next) {
946 KASSERT(child->pc_pic != pic, ("%s: Adding a child PIC twice",
950 SLIST_INSERT_HEAD(&parent_pic->pic_children, newchild, pc_next);
951 mtx_unlock_spin(&parent_pic->pic_child_lock);
957 intr_resolve_irq(device_t dev, intptr_t xref, struct intr_map_data *data,
958 struct intr_irqsrc **isrc)
960 struct intr_pic *pic;
961 struct intr_map_data_msi *msi;
966 pic = pic_lookup(dev, xref,
967 (data->type == INTR_MAP_DATA_MSI) ? FLAG_MSI : FLAG_PIC);
971 switch (data->type) {
972 case INTR_MAP_DATA_MSI:
973 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI,
974 ("%s: Found a non-MSI controller: %s", __func__,
975 device_get_name(pic->pic_dev)));
976 msi = (struct intr_map_data_msi *)data;
981 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_PIC,
982 ("%s: Found a non-PIC controller: %s", __func__,
983 device_get_name(pic->pic_dev)));
984 return (PIC_MAP_INTR(pic->pic_dev, data, isrc));
989 intr_is_per_cpu(struct resource *res)
992 struct intr_irqsrc *isrc;
994 res_id = (u_int)rman_get_start(res);
995 isrc = intr_map_get_isrc(res_id);
998 panic("Attempt to get isrc for non-active resource id: %u\n",
1000 return ((isrc->isrc_flags & INTR_ISRCF_PPI) != 0);
1004 intr_activate_irq(device_t dev, struct resource *res)
1008 struct intr_map_data *data;
1009 struct intr_irqsrc *isrc;
1013 KASSERT(rman_get_start(res) == rman_get_end(res),
1014 ("%s: more interrupts in resource", __func__));
1016 res_id = (u_int)rman_get_start(res);
1017 if (intr_map_get_isrc(res_id) != NULL)
1018 panic("Attempt to double activation of resource id: %u\n",
1020 intr_map_copy_map_data(res_id, &map_dev, &map_xref, &data);
1021 error = intr_resolve_irq(map_dev, map_xref, data, &isrc);
1023 free(data, M_INTRNG);
1024 /* XXX TODO DISCONECTED PICs */
1025 /* if (error == EINVAL) return(0); */
1028 intr_map_set_isrc(res_id, isrc);
1029 rman_set_virtual(res, data);
1030 return (PIC_ACTIVATE_INTR(isrc->isrc_dev, isrc, res, data));
1034 intr_deactivate_irq(device_t dev, struct resource *res)
1036 struct intr_map_data *data;
1037 struct intr_irqsrc *isrc;
1041 KASSERT(rman_get_start(res) == rman_get_end(res),
1042 ("%s: more interrupts in resource", __func__));
1044 res_id = (u_int)rman_get_start(res);
1045 isrc = intr_map_get_isrc(res_id);
1047 panic("Attempt to deactivate non-active resource id: %u\n",
1050 data = rman_get_virtual(res);
1051 error = PIC_DEACTIVATE_INTR(isrc->isrc_dev, isrc, res, data);
1052 intr_map_set_isrc(res_id, NULL);
1053 rman_set_virtual(res, NULL);
1054 free(data, M_INTRNG);
1059 intr_setup_irq(device_t dev, struct resource *res, driver_filter_t filt,
1060 driver_intr_t hand, void *arg, int flags, void **cookiep)
1063 struct intr_map_data *data;
1064 struct intr_irqsrc *isrc;
1068 KASSERT(rman_get_start(res) == rman_get_end(res),
1069 ("%s: more interrupts in resource", __func__));
1071 res_id = (u_int)rman_get_start(res);
1072 isrc = intr_map_get_isrc(res_id);
1074 /* XXX TODO DISCONECTED PICs */
1078 data = rman_get_virtual(res);
1079 name = device_get_nameunit(dev);
1083 * Standard handling is done through MI interrupt framework. However,
1084 * some interrupts could request solely own special handling. This
1085 * non standard handling can be used for interrupt controllers without
1086 * handler (filter only), so in case that interrupt controllers are
1087 * chained, MI interrupt framework is called only in leaf controller.
1089 * Note that root interrupt controller routine is served as well,
1090 * however in intr_irq_handler(), i.e. main system dispatch routine.
1092 if (flags & INTR_SOLO && hand != NULL) {
1093 debugf("irq %u cannot solo on %s\n", irq, name);
1097 if (flags & INTR_SOLO) {
1098 error = iscr_setup_filter(isrc, name, (intr_irq_filter_t *)filt,
1100 debugf("irq %u setup filter error %d on %s\n", isrc->isrc_irq, error,
1105 error = isrc_add_handler(isrc, name, filt, hand, arg, flags,
1107 debugf("irq %u add handler error %d on %s\n", isrc->isrc_irq, error, name);
1112 mtx_lock(&isrc_table_lock);
1113 error = PIC_SETUP_INTR(isrc->isrc_dev, isrc, res, data);
1115 isrc->isrc_handlers++;
1116 if (isrc->isrc_handlers == 1)
1117 PIC_ENABLE_INTR(isrc->isrc_dev, isrc);
1119 mtx_unlock(&isrc_table_lock);
1121 intr_event_remove_handler(*cookiep);
1126 intr_teardown_irq(device_t dev, struct resource *res, void *cookie)
1129 struct intr_map_data *data;
1130 struct intr_irqsrc *isrc;
1133 KASSERT(rman_get_start(res) == rman_get_end(res),
1134 ("%s: more interrupts in resource", __func__));
1136 res_id = (u_int)rman_get_start(res);
1137 isrc = intr_map_get_isrc(res_id);
1138 if (isrc == NULL || isrc->isrc_handlers == 0)
1141 data = rman_get_virtual(res);
1144 if (isrc->isrc_filter != NULL) {
1148 mtx_lock(&isrc_table_lock);
1149 isrc->isrc_filter = NULL;
1150 isrc->isrc_arg = NULL;
1151 isrc->isrc_handlers = 0;
1152 PIC_DISABLE_INTR(isrc->isrc_dev, isrc);
1153 PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data);
1154 isrc_update_name(isrc, NULL);
1155 mtx_unlock(&isrc_table_lock);
1159 if (isrc != intr_handler_source(cookie))
1162 error = intr_event_remove_handler(cookie);
1164 mtx_lock(&isrc_table_lock);
1165 isrc->isrc_handlers--;
1166 if (isrc->isrc_handlers == 0)
1167 PIC_DISABLE_INTR(isrc->isrc_dev, isrc);
1168 PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data);
1169 intrcnt_updatename(isrc);
1170 mtx_unlock(&isrc_table_lock);
1176 intr_describe_irq(device_t dev, struct resource *res, void *cookie,
1180 struct intr_irqsrc *isrc;
1183 KASSERT(rman_get_start(res) == rman_get_end(res),
1184 ("%s: more interrupts in resource", __func__));
1186 res_id = (u_int)rman_get_start(res);
1187 isrc = intr_map_get_isrc(res_id);
1188 if (isrc == NULL || isrc->isrc_handlers == 0)
1191 if (isrc->isrc_filter != NULL) {
1195 mtx_lock(&isrc_table_lock);
1196 isrc_update_name(isrc, descr);
1197 mtx_unlock(&isrc_table_lock);
1201 error = intr_event_describe_handler(isrc->isrc_event, cookie, descr);
1203 mtx_lock(&isrc_table_lock);
1204 intrcnt_updatename(isrc);
1205 mtx_unlock(&isrc_table_lock);
1212 intr_bind_irq(device_t dev, struct resource *res, int cpu)
1214 struct intr_irqsrc *isrc;
1217 KASSERT(rman_get_start(res) == rman_get_end(res),
1218 ("%s: more interrupts in resource", __func__));
1220 res_id = (u_int)rman_get_start(res);
1221 isrc = intr_map_get_isrc(res_id);
1222 if (isrc == NULL || isrc->isrc_handlers == 0)
1225 if (isrc->isrc_filter != NULL)
1226 return (intr_isrc_assign_cpu(isrc, cpu));
1228 return (intr_event_bind(isrc->isrc_event, cpu));
1232 * Return the CPU that the next interrupt source should use.
1233 * For now just returns the next CPU according to round-robin.
1236 intr_irq_next_cpu(u_int last_cpu, cpuset_t *cpumask)
1240 KASSERT(!CPU_EMPTY(cpumask), ("%s: Empty CPU mask", __func__));
1241 if (!irq_assign_cpu || mp_ncpus == 1) {
1242 cpu = PCPU_GET(cpuid);
1244 if (CPU_ISSET(cpu, cpumask))
1247 return (CPU_FFS(cpumask) - 1);
1252 if (last_cpu > mp_maxid)
1254 } while (!CPU_ISSET(last_cpu, cpumask));
1258 #ifndef EARLY_AP_STARTUP
1260 * Distribute all the interrupt sources among the available
1261 * CPUs once the AP's have been launched.
1264 intr_irq_shuffle(void *arg __unused)
1266 struct intr_irqsrc *isrc;
1272 mtx_lock(&isrc_table_lock);
1273 irq_assign_cpu = true;
1274 for (i = 0; i < intr_nirq; i++) {
1275 isrc = irq_sources[i];
1276 if (isrc == NULL || isrc->isrc_handlers == 0 ||
1277 isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI))
1280 if (isrc->isrc_event != NULL &&
1281 isrc->isrc_flags & INTR_ISRCF_BOUND &&
1282 isrc->isrc_event->ie_cpu != CPU_FFS(&isrc->isrc_cpu) - 1)
1283 panic("%s: CPU inconsistency", __func__);
1285 if ((isrc->isrc_flags & INTR_ISRCF_BOUND) == 0)
1286 CPU_ZERO(&isrc->isrc_cpu); /* start again */
1289 * We are in wicked position here if the following call fails
1290 * for bound ISRC. The best thing we can do is to clear
1291 * isrc_cpu so inconsistency with ie_cpu will be detectable.
1293 if (PIC_BIND_INTR(isrc->isrc_dev, isrc) != 0)
1294 CPU_ZERO(&isrc->isrc_cpu);
1296 mtx_unlock(&isrc_table_lock);
1298 SYSINIT(intr_irq_shuffle, SI_SUB_SMP, SI_ORDER_SECOND, intr_irq_shuffle, NULL);
1299 #endif /* !EARLY_AP_STARTUP */
1303 intr_irq_next_cpu(u_int current_cpu, cpuset_t *cpumask)
1306 return (PCPU_GET(cpuid));
1311 * Allocate memory for new intr_map_data structure.
1312 * Initialize common fields.
1314 struct intr_map_data *
1315 intr_alloc_map_data(enum intr_map_data_type type, size_t len, int flags)
1317 struct intr_map_data *data;
1319 data = malloc(len, M_INTRNG, flags);
1325 void intr_free_intr_map_data(struct intr_map_data *data)
1328 free(data, M_INTRNG);
1332 * Register a MSI/MSI-X interrupt controller
1335 intr_msi_register(device_t dev, intptr_t xref)
1337 struct intr_pic *pic;
1341 pic = pic_create(dev, xref, FLAG_MSI);
1345 debugf("PIC %p registered for %s <dev %p, xref %jx>\n", pic,
1346 device_get_nameunit(dev), dev, (uintmax_t)xref);
1351 intr_alloc_msi(device_t pci, device_t child, intptr_t xref, int count,
1352 int maxcount, int *irqs)
1354 struct iommu_domain *domain;
1355 struct intr_irqsrc **isrc;
1356 struct intr_pic *pic;
1358 struct intr_map_data_msi *msi;
1361 pic = pic_lookup(NULL, xref, FLAG_MSI);
1365 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI,
1366 ("%s: Found a non-MSI controller: %s", __func__,
1367 device_get_name(pic->pic_dev)));
1370 * If this is the first time we have used this context ask the
1371 * interrupt controller to map memory the msi source will need.
1373 err = MSI_IOMMU_INIT(pic->pic_dev, child, &domain);
1377 isrc = malloc(sizeof(*isrc) * count, M_INTRNG, M_WAITOK);
1378 err = MSI_ALLOC_MSI(pic->pic_dev, child, count, maxcount, &pdev, isrc);
1380 free(isrc, M_INTRNG);
1384 for (i = 0; i < count; i++) {
1385 isrc[i]->isrc_iommu = domain;
1386 msi = (struct intr_map_data_msi *)intr_alloc_map_data(
1387 INTR_MAP_DATA_MSI, sizeof(*msi), M_WAITOK | M_ZERO);
1388 msi-> isrc = isrc[i];
1390 irqs[i] = intr_map_irq(pic->pic_dev, xref,
1391 (struct intr_map_data *)msi);
1393 free(isrc, M_INTRNG);
1399 intr_release_msi(device_t pci, device_t child, intptr_t xref, int count,
1402 struct intr_irqsrc **isrc;
1403 struct intr_pic *pic;
1404 struct intr_map_data_msi *msi;
1407 pic = pic_lookup(NULL, xref, FLAG_MSI);
1411 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI,
1412 ("%s: Found a non-MSI controller: %s", __func__,
1413 device_get_name(pic->pic_dev)));
1415 isrc = malloc(sizeof(*isrc) * count, M_INTRNG, M_WAITOK);
1417 for (i = 0; i < count; i++) {
1418 msi = (struct intr_map_data_msi *)
1419 intr_map_get_map_data(irqs[i]);
1420 KASSERT(msi->hdr.type == INTR_MAP_DATA_MSI,
1421 ("%s: irq %d map data is not MSI", __func__,
1423 isrc[i] = msi->isrc;
1426 MSI_IOMMU_DEINIT(pic->pic_dev, child);
1428 err = MSI_RELEASE_MSI(pic->pic_dev, child, count, isrc);
1430 for (i = 0; i < count; i++) {
1431 if (isrc[i] != NULL)
1432 intr_unmap_irq(irqs[i]);
1435 free(isrc, M_INTRNG);
1440 intr_alloc_msix(device_t pci, device_t child, intptr_t xref, int *irq)
1442 struct iommu_domain *domain;
1443 struct intr_irqsrc *isrc;
1444 struct intr_pic *pic;
1446 struct intr_map_data_msi *msi;
1449 pic = pic_lookup(NULL, xref, FLAG_MSI);
1453 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI,
1454 ("%s: Found a non-MSI controller: %s", __func__,
1455 device_get_name(pic->pic_dev)));
1458 * If this is the first time we have used this context ask the
1459 * interrupt controller to map memory the msi source will need.
1461 err = MSI_IOMMU_INIT(pic->pic_dev, child, &domain);
1465 err = MSI_ALLOC_MSIX(pic->pic_dev, child, &pdev, &isrc);
1469 isrc->isrc_iommu = domain;
1470 msi = (struct intr_map_data_msi *)intr_alloc_map_data(
1471 INTR_MAP_DATA_MSI, sizeof(*msi), M_WAITOK | M_ZERO);
1473 *irq = intr_map_irq(pic->pic_dev, xref, (struct intr_map_data *)msi);
1478 intr_release_msix(device_t pci, device_t child, intptr_t xref, int irq)
1480 struct intr_irqsrc *isrc;
1481 struct intr_pic *pic;
1482 struct intr_map_data_msi *msi;
1485 pic = pic_lookup(NULL, xref, FLAG_MSI);
1489 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI,
1490 ("%s: Found a non-MSI controller: %s", __func__,
1491 device_get_name(pic->pic_dev)));
1493 msi = (struct intr_map_data_msi *)
1494 intr_map_get_map_data(irq);
1495 KASSERT(msi->hdr.type == INTR_MAP_DATA_MSI,
1496 ("%s: irq %d map data is not MSI", __func__,
1500 intr_unmap_irq(irq);
1504 MSI_IOMMU_DEINIT(pic->pic_dev, child);
1506 err = MSI_RELEASE_MSIX(pic->pic_dev, child, isrc);
1507 intr_unmap_irq(irq);
1513 intr_map_msi(device_t pci, device_t child, intptr_t xref, int irq,
1514 uint64_t *addr, uint32_t *data)
1516 struct intr_irqsrc *isrc;
1517 struct intr_pic *pic;
1520 pic = pic_lookup(NULL, xref, FLAG_MSI);
1524 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI,
1525 ("%s: Found a non-MSI controller: %s", __func__,
1526 device_get_name(pic->pic_dev)));
1528 isrc = intr_map_get_isrc(irq);
1532 err = MSI_MAP_MSI(pic->pic_dev, child, isrc, addr, data);
1535 if (isrc->isrc_iommu != NULL)
1536 iommu_translate_msi(isrc->isrc_iommu, addr);
1542 void dosoftints(void);
1550 * Init interrupt controller on another CPU.
1553 intr_pic_init_secondary(void)
1557 * QQQ: Only root PIC is aware of other CPUs ???
1559 KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
1561 //mtx_lock(&isrc_table_lock);
1562 PIC_INIT_SECONDARY(intr_irq_root_dev);
1563 //mtx_unlock(&isrc_table_lock);
1568 DB_SHOW_COMMAND_FLAGS(irqs, db_show_irqs, DB_CMD_MEMSAFE)
1572 struct intr_irqsrc *isrc;
1574 for (irqsum = 0, i = 0; i < intr_nirq; i++) {
1575 isrc = irq_sources[i];
1579 num = isrc->isrc_count != NULL ? isrc->isrc_count[0] : 0;
1580 db_printf("irq%-3u <%s>: cpu %02lx%s cnt %lu\n", i,
1581 isrc->isrc_name, isrc->isrc_cpu.__bits[0],
1582 isrc->isrc_flags & INTR_ISRCF_BOUND ? " (bound)" : "", num);
1585 db_printf("irq total %u\n", irqsum);
1590 * Interrupt mapping table functions.
1592 * Please, keep this part separately, it can be transformed to
1593 * extension of standard resources.
1595 struct intr_map_entry
1599 struct intr_map_data *map_data;
1600 struct intr_irqsrc *isrc;
1601 /* XXX TODO DISCONECTED PICs */
1605 /* XXX Convert irq_map[] to dynamicaly expandable one. */
1606 static struct intr_map_entry **irq_map;
1607 static u_int irq_map_count;
1608 static u_int irq_map_first_free_idx;
1609 static struct mtx irq_map_lock;
1611 static struct intr_irqsrc *
1612 intr_map_get_isrc(u_int res_id)
1614 struct intr_irqsrc *isrc;
1617 mtx_lock(&irq_map_lock);
1618 if (res_id < irq_map_count && irq_map[res_id] != NULL)
1619 isrc = irq_map[res_id]->isrc;
1620 mtx_unlock(&irq_map_lock);
1626 intr_map_set_isrc(u_int res_id, struct intr_irqsrc *isrc)
1629 mtx_lock(&irq_map_lock);
1630 if (res_id < irq_map_count && irq_map[res_id] != NULL)
1631 irq_map[res_id]->isrc = isrc;
1632 mtx_unlock(&irq_map_lock);
1636 * Get a copy of intr_map_entry data
1638 static struct intr_map_data *
1639 intr_map_get_map_data(u_int res_id)
1641 struct intr_map_data *data;
1644 mtx_lock(&irq_map_lock);
1645 if (res_id >= irq_map_count || irq_map[res_id] == NULL)
1646 panic("Attempt to copy invalid resource id: %u\n", res_id);
1647 data = irq_map[res_id]->map_data;
1648 mtx_unlock(&irq_map_lock);
1654 * Get a copy of intr_map_entry data
1657 intr_map_copy_map_data(u_int res_id, device_t *map_dev, intptr_t *map_xref,
1658 struct intr_map_data **data)
1663 mtx_lock(&irq_map_lock);
1664 if (res_id >= irq_map_count || irq_map[res_id] == NULL)
1665 panic("Attempt to copy invalid resource id: %u\n", res_id);
1666 if (irq_map[res_id]->map_data != NULL)
1667 len = irq_map[res_id]->map_data->len;
1668 mtx_unlock(&irq_map_lock);
1673 *data = malloc(len, M_INTRNG, M_WAITOK | M_ZERO);
1674 mtx_lock(&irq_map_lock);
1675 if (irq_map[res_id] == NULL)
1676 panic("Attempt to copy invalid resource id: %u\n", res_id);
1678 if (len != irq_map[res_id]->map_data->len)
1679 panic("Resource id: %u has changed.\n", res_id);
1680 memcpy(*data, irq_map[res_id]->map_data, len);
1682 *map_dev = irq_map[res_id]->dev;
1683 *map_xref = irq_map[res_id]->xref;
1684 mtx_unlock(&irq_map_lock);
1688 * Allocate and fill new entry in irq_map table.
1691 intr_map_irq(device_t dev, intptr_t xref, struct intr_map_data *data)
1694 struct intr_map_entry *entry;
1696 /* Prepare new entry first. */
1697 entry = malloc(sizeof(*entry), M_INTRNG, M_WAITOK | M_ZERO);
1701 entry->map_data = data;
1704 mtx_lock(&irq_map_lock);
1705 for (i = irq_map_first_free_idx; i < irq_map_count; i++) {
1706 if (irq_map[i] == NULL) {
1708 irq_map_first_free_idx = i + 1;
1709 mtx_unlock(&irq_map_lock);
1713 for (i = 0; i < irq_map_first_free_idx; i++) {
1714 if (irq_map[i] == NULL) {
1716 irq_map_first_free_idx = i + 1;
1717 mtx_unlock(&irq_map_lock);
1721 mtx_unlock(&irq_map_lock);
1723 /* XXX Expand irq_map table */
1724 panic("IRQ mapping table is full.");
1728 * Remove and free mapping entry.
1731 intr_unmap_irq(u_int res_id)
1733 struct intr_map_entry *entry;
1735 mtx_lock(&irq_map_lock);
1736 if ((res_id >= irq_map_count) || (irq_map[res_id] == NULL))
1737 panic("Attempt to unmap invalid resource id: %u\n", res_id);
1738 entry = irq_map[res_id];
1739 irq_map[res_id] = NULL;
1740 irq_map_first_free_idx = res_id;
1741 mtx_unlock(&irq_map_lock);
1742 intr_free_intr_map_data(entry->map_data);
1743 free(entry, M_INTRNG);
1747 * Clone mapping entry.
1750 intr_map_clone_irq(u_int old_res_id)
1754 struct intr_map_data *data;
1756 intr_map_copy_map_data(old_res_id, &map_dev, &map_xref, &data);
1757 return (intr_map_irq(map_dev, map_xref, data));
1761 intr_map_init(void *dummy __unused)
1764 mtx_init(&irq_map_lock, "intr map table", NULL, MTX_DEF);
1766 irq_map_count = 2 * intr_nirq;
1767 irq_map = mallocarray(irq_map_count, sizeof(struct intr_map_entry*),
1768 M_INTRNG, M_WAITOK | M_ZERO);
1770 SYSINIT(intr_map_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_map_init, NULL);
1773 /* Virtualization for interrupt source IPI counter increment. */
1775 intr_ipi_increment_count(u_long *counter, u_int cpu)
1778 KASSERT(cpu < mp_maxid + 1, ("%s: too big cpu %u", __func__, cpu));
1783 * Virtualization for interrupt source IPI counters setup.
1786 intr_ipi_setup_counters(const char *name)
1789 char str[INTRNAME_LEN];
1791 mtx_lock(&isrc_table_lock);
1794 * We should never have a problem finding mp_maxid + 1 contiguous
1795 * counters, in practice. Interrupts will be allocated sequentially
1796 * during boot, so the array should fill from low to high index. Once
1797 * reserved, the IPI counters will never be released. Similarly, we
1798 * will not need to allocate more IPIs once the system is running.
1800 bit_ffc_area(intrcnt_bitmap, nintrcnt, mp_maxid + 1, &index);
1802 panic("Failed to allocate %d counters. Array exhausted?",
1804 bit_nset(intrcnt_bitmap, index, index + mp_maxid);
1805 for (i = 0; i < mp_maxid + 1; i++) {
1806 snprintf(str, INTRNAME_LEN, "cpu%d:%s", i, name);
1807 intrcnt_setname(str, index + i);
1809 mtx_unlock(&isrc_table_lock);
1810 return (&intrcnt[index]);
1814 * Lookup IPI source.
1816 static struct intr_ipi *
1817 intr_ipi_lookup(u_int ipi)
1820 if (ipi >= INTR_IPI_COUNT)
1821 panic("%s: no such IPI %u", __func__, ipi);
1823 return (&ipi_sources[ipi]);
1827 intr_ipi_pic_register(device_t dev, u_int priority)
1829 if (intr_ipi_dev_frozen) {
1830 device_printf(dev, "IPI device already frozen");
1834 if (intr_ipi_dev == NULL || priority > intr_ipi_dev_priority)
1841 * Setup IPI handler on interrupt controller.
1846 intr_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand,
1849 struct intr_irqsrc *isrc;
1850 struct intr_ipi *ii;
1853 if (!intr_ipi_dev_frozen) {
1854 if (intr_ipi_dev == NULL)
1855 panic("%s: no IPI PIC attached", __func__);
1857 intr_ipi_dev_frozen = true;
1858 device_printf(intr_ipi_dev, "using for IPIs\n");
1861 KASSERT(hand != NULL, ("%s: ipi %u no handler", __func__, ipi));
1863 error = PIC_IPI_SETUP(intr_ipi_dev, ipi, &isrc);
1867 isrc->isrc_handlers++;
1869 ii = intr_ipi_lookup(ipi);
1870 KASSERT(ii->ii_count == NULL, ("%s: ipi %u reused", __func__, ipi));
1872 ii->ii_handler = hand;
1873 ii->ii_handler_arg = arg;
1875 strlcpy(ii->ii_name, name, INTR_IPI_NAMELEN);
1876 ii->ii_count = intr_ipi_setup_counters(name);
1878 PIC_ENABLE_INTR(intr_ipi_dev, isrc);
1882 intr_ipi_send(cpuset_t cpus, u_int ipi)
1884 struct intr_ipi *ii;
1886 KASSERT(intr_ipi_dev_frozen,
1887 ("%s: IPI device not yet frozen", __func__));
1889 ii = intr_ipi_lookup(ipi);
1890 if (ii->ii_count == NULL)
1891 panic("%s: not setup IPI %u", __func__, ipi);
1894 * XXX: Surely needed on other architectures too? Either way should be
1895 * some kind of MI hook defined in an MD header, or the responsibility
1896 * of the MD caller if not widespread.
1900 * Ensure that this CPU's stores will be visible to IPI
1901 * recipients before starting to send the interrupts.
1906 PIC_IPI_SEND(intr_ipi_dev, ii->ii_isrc, cpus, ipi);
1910 * interrupt controller dispatch function for IPIs. It should
1911 * be called straight from the interrupt controller, when associated
1912 * interrupt source is learned. Or from anybody who has an interrupt
1916 intr_ipi_dispatch(u_int ipi)
1918 struct intr_ipi *ii;
1920 ii = intr_ipi_lookup(ipi);
1921 if (ii->ii_count == NULL)
1922 panic("%s: not setup IPI %u", __func__, ipi);
1924 intr_ipi_increment_count(ii->ii_count, PCPU_GET(cpuid));
1926 ii->ii_handler(ii->ii_handler_arg);