2 * Copyright (c) 2015-2016 Svatopluk Kraus
3 * Copyright (c) 2015-2016 Michal Meloun
5 * Copyright (c) 2015-2016 The FreeBSD Foundation
6 * Copyright (c) 2021 Jessica Clarke <jrtc27@FreeBSD.org>
8 * Portions of this software were developed by Andrew Turner under
9 * sponsorship from the FreeBSD Foundation.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include <sys/cdefs.h>
35 * New-style Interrupt Framework
37 * TODO: - add support for disconnected PICs.
38 * - to support IPI (PPI) enabling on other CPUs if already started.
39 * - to complete things for removable PICs.
43 #include "opt_hwpmc_hooks.h"
44 #include "opt_iommu.h"
46 #include <sys/param.h>
47 #include <sys/systm.h>
49 #include <sys/bitstring.h>
52 #include <sys/cpuset.h>
53 #include <sys/interrupt.h>
54 #include <sys/kernel.h>
56 #include <sys/malloc.h>
57 #include <sys/mutex.h>
59 #include <sys/queue.h>
61 #include <sys/sched.h>
63 #include <sys/sysctl.h>
64 #include <sys/syslog.h>
65 #include <sys/taskqueue.h>
67 #include <sys/vmmeter.h>
69 #include <sys/pmckern.h>
72 #include <machine/atomic.h>
73 #include <machine/cpu.h>
74 #include <machine/intr.h>
75 #include <machine/smp.h>
76 #include <machine/stdarg.h>
83 #include <dev/iommu/iommu_msi.h>
89 #define INTRNAME_LEN (2*MAXCOMLEN + 1)
92 #define debugf(fmt, args...) do { printf("%s(): ", __func__); \
93 printf(fmt,##args); } while (0)
95 #define debugf(fmt, args...)
98 MALLOC_DECLARE(M_INTRNG);
99 MALLOC_DEFINE(M_INTRNG, "intr", "intr interrupt handling");
101 /* Main interrupt handler called from assembler -> 'hidden' for C code. */
102 void intr_irq_handler(struct trapframe *tf);
104 /* Root interrupt controller stuff. */
105 device_t intr_irq_root_dev;
106 static intr_irq_filter_t *irq_root_filter;
107 static void *irq_root_arg;
109 struct intr_pic_child {
110 SLIST_ENTRY(intr_pic_child) pc_next;
111 struct intr_pic *pc_pic;
112 intr_child_irq_filter_t *pc_filter;
118 /* Interrupt controller definition. */
120 SLIST_ENTRY(intr_pic) pic_next;
121 intptr_t pic_xref; /* hardware identification */
123 /* Only one of FLAG_PIC or FLAG_MSI may be set */
124 #define FLAG_PIC (1 << 0)
125 #define FLAG_MSI (1 << 1)
126 #define FLAG_TYPE_MASK (FLAG_PIC | FLAG_MSI)
128 struct mtx pic_child_lock;
129 SLIST_HEAD(, intr_pic_child) pic_children;
133 #define INTR_IPI_NAMELEN (MAXCOMLEN + 1)
136 intr_ipi_handler_t *ii_handler;
137 void *ii_handler_arg;
138 struct intr_irqsrc *ii_isrc;
139 char ii_name[INTR_IPI_NAMELEN];
143 static device_t intr_ipi_dev;
144 static u_int intr_ipi_dev_priority;
145 static bool intr_ipi_dev_frozen;
148 static struct mtx pic_list_lock;
149 static SLIST_HEAD(, intr_pic) pic_list;
151 static struct intr_pic *pic_lookup(device_t dev, intptr_t xref, int flags);
153 /* Interrupt source definition. */
154 static struct mtx isrc_table_lock;
155 static struct intr_irqsrc **irq_sources;
156 static u_int irq_next_free;
159 #ifdef EARLY_AP_STARTUP
160 static bool irq_assign_cpu = true;
162 static bool irq_assign_cpu = false;
165 static struct intr_ipi ipi_sources[INTR_IPI_COUNT];
168 u_int intr_nirq = NIRQ;
169 SYSCTL_UINT(_machdep, OID_AUTO, nirq, CTLFLAG_RDTUN, &intr_nirq, 0,
172 /* Data for MI statistics reporting. */
178 static bitstr_t *intrcnt_bitmap;
180 static struct intr_irqsrc *intr_map_get_isrc(u_int res_id);
181 static void intr_map_set_isrc(u_int res_id, struct intr_irqsrc *isrc);
182 static struct intr_map_data * intr_map_get_map_data(u_int res_id);
183 static void intr_map_copy_map_data(u_int res_id, device_t *dev, intptr_t *xref,
184 struct intr_map_data **data);
187 * Interrupt framework initialization routine.
190 intr_irq_init(void *dummy __unused)
193 SLIST_INIT(&pic_list);
194 mtx_init(&pic_list_lock, "intr pic list", NULL, MTX_DEF);
196 mtx_init(&isrc_table_lock, "intr isrc table", NULL, MTX_DEF);
199 * - 2 counters for each I/O interrupt.
200 * - mp_maxid + 1 counters for each IPI counters for SMP.
202 nintrcnt = intr_nirq * 2;
204 nintrcnt += INTR_IPI_COUNT * (mp_maxid + 1);
207 intrcnt = mallocarray(nintrcnt, sizeof(u_long), M_INTRNG,
209 intrnames = mallocarray(nintrcnt, INTRNAME_LEN, M_INTRNG,
211 sintrcnt = nintrcnt * sizeof(u_long);
212 sintrnames = nintrcnt * INTRNAME_LEN;
214 /* Allocate the bitmap tracking counter allocations. */
215 intrcnt_bitmap = bit_alloc(nintrcnt, M_INTRNG, M_WAITOK | M_ZERO);
217 irq_sources = mallocarray(intr_nirq, sizeof(struct intr_irqsrc*),
218 M_INTRNG, M_WAITOK | M_ZERO);
220 SYSINIT(intr_irq_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_irq_init, NULL);
223 intrcnt_setname(const char *name, int index)
226 snprintf(intrnames + INTRNAME_LEN * index, INTRNAME_LEN, "%-*s",
227 INTRNAME_LEN - 1, name);
231 * Update name for interrupt source with interrupt event.
234 intrcnt_updatename(struct intr_irqsrc *isrc)
237 /* QQQ: What about stray counter name? */
238 mtx_assert(&isrc_table_lock, MA_OWNED);
239 intrcnt_setname(isrc->isrc_event->ie_fullname, isrc->isrc_index);
243 * Virtualization for interrupt source interrupt counter increment.
246 isrc_increment_count(struct intr_irqsrc *isrc)
249 if (isrc->isrc_flags & INTR_ISRCF_PPI)
250 atomic_add_long(&isrc->isrc_count[0], 1);
252 isrc->isrc_count[0]++;
256 * Virtualization for interrupt source interrupt stray counter increment.
259 isrc_increment_straycount(struct intr_irqsrc *isrc)
262 isrc->isrc_count[1]++;
266 * Virtualization for interrupt source interrupt name update.
269 isrc_update_name(struct intr_irqsrc *isrc, const char *name)
271 char str[INTRNAME_LEN];
273 mtx_assert(&isrc_table_lock, MA_OWNED);
276 snprintf(str, INTRNAME_LEN, "%s: %s", isrc->isrc_name, name);
277 intrcnt_setname(str, isrc->isrc_index);
278 snprintf(str, INTRNAME_LEN, "stray %s: %s", isrc->isrc_name,
280 intrcnt_setname(str, isrc->isrc_index + 1);
282 snprintf(str, INTRNAME_LEN, "%s:", isrc->isrc_name);
283 intrcnt_setname(str, isrc->isrc_index);
284 snprintf(str, INTRNAME_LEN, "stray %s:", isrc->isrc_name);
285 intrcnt_setname(str, isrc->isrc_index + 1);
290 * Virtualization for interrupt source interrupt counters setup.
293 isrc_setup_counters(struct intr_irqsrc *isrc)
297 mtx_assert(&isrc_table_lock, MA_OWNED);
300 * Allocate two counter values, the second tracking "stray" interrupts.
302 bit_ffc_area(intrcnt_bitmap, nintrcnt, 2, &index);
304 panic("Failed to allocate 2 counters. Array exhausted?");
305 bit_nset(intrcnt_bitmap, index, index + 1);
306 isrc->isrc_index = index;
307 isrc->isrc_count = &intrcnt[index];
308 isrc_update_name(isrc, NULL);
312 * Virtualization for interrupt source interrupt counters release.
315 isrc_release_counters(struct intr_irqsrc *isrc)
317 int idx = isrc->isrc_index;
319 mtx_assert(&isrc_table_lock, MA_OWNED);
321 bit_nclear(intrcnt_bitmap, idx, idx + 1);
325 * Main interrupt dispatch handler. It's called straight
326 * from the assembler, where CPU interrupt is served.
329 intr_irq_handler(struct trapframe *tf)
331 struct trapframe * oldframe;
334 KASSERT(irq_root_filter != NULL, ("%s: no filter", __func__));
336 kasan_mark(tf, sizeof(*tf), sizeof(*tf), 0);
341 oldframe = td->td_intr_frame;
342 td->td_intr_frame = tf;
343 irq_root_filter(irq_root_arg);
344 td->td_intr_frame = oldframe;
347 if (pmc_hook && TRAPF_USERMODE(tf) &&
348 (PCPU_GET(curthread)->td_pflags & TDP_CALLCHAIN))
349 pmc_hook(PCPU_GET(curthread), PMC_FN_USER_CALLCHAIN, tf);
354 intr_child_irq_handler(struct intr_pic *parent, uintptr_t irq)
356 struct intr_pic_child *child;
360 mtx_lock_spin(&parent->pic_child_lock);
361 SLIST_FOREACH(child, &parent->pic_children, pc_next) {
362 if (child->pc_start <= irq &&
363 irq < (child->pc_start + child->pc_length)) {
368 mtx_unlock_spin(&parent->pic_child_lock);
371 return (child->pc_filter(child->pc_filter_arg, irq));
373 return (FILTER_STRAY);
377 * interrupt controller dispatch function for interrupts. It should
378 * be called straight from the interrupt controller, when associated interrupt
382 intr_isrc_dispatch(struct intr_irqsrc *isrc, struct trapframe *tf)
385 KASSERT(isrc != NULL, ("%s: no source", __func__));
387 if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0)
388 isrc_increment_count(isrc);
391 if (isrc->isrc_filter != NULL) {
393 error = isrc->isrc_filter(isrc->isrc_arg, tf);
394 PIC_POST_FILTER(isrc->isrc_dev, isrc);
395 if (error == FILTER_HANDLED)
399 if (isrc->isrc_event != NULL) {
400 if (intr_event_handle(isrc->isrc_event, tf) == 0)
404 if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0)
405 isrc_increment_straycount(isrc);
410 * Alloc unique interrupt number (resource handle) for interrupt source.
412 * There could be various strategies how to allocate free interrupt number
413 * (resource handle) for new interrupt source.
415 * 1. Handles are always allocated forward, so handles are not recycled
416 * immediately. However, if only one free handle left which is reused
420 isrc_alloc_irq(struct intr_irqsrc *isrc)
424 mtx_assert(&isrc_table_lock, MA_OWNED);
426 if (irq_next_free >= intr_nirq)
429 for (irq = irq_next_free; irq < intr_nirq; irq++) {
430 if (irq_sources[irq] == NULL)
433 for (irq = 0; irq < irq_next_free; irq++) {
434 if (irq_sources[irq] == NULL)
438 irq_next_free = intr_nirq;
442 isrc->isrc_irq = irq;
443 irq_sources[irq] = isrc;
445 irq_next_free = irq + 1;
446 if (irq_next_free >= intr_nirq)
452 * Free unique interrupt number (resource handle) from interrupt source.
455 isrc_free_irq(struct intr_irqsrc *isrc)
458 mtx_assert(&isrc_table_lock, MA_OWNED);
460 if (isrc->isrc_irq >= intr_nirq)
462 if (irq_sources[isrc->isrc_irq] != isrc)
465 irq_sources[isrc->isrc_irq] = NULL;
466 isrc->isrc_irq = INTR_IRQ_INVALID; /* just to be safe */
469 * If we are recovering from the state irq_sources table is full,
470 * then the following allocation should check the entire table. This
471 * will ensure maximum separation of allocation order from release
474 if (irq_next_free >= intr_nirq)
481 * Initialize interrupt source and register it into global interrupt table.
484 intr_isrc_register(struct intr_irqsrc *isrc, device_t dev, u_int flags,
485 const char *fmt, ...)
490 bzero(isrc, sizeof(struct intr_irqsrc));
491 isrc->isrc_dev = dev;
492 isrc->isrc_irq = INTR_IRQ_INVALID; /* just to be safe */
493 isrc->isrc_flags = flags;
496 vsnprintf(isrc->isrc_name, INTR_ISRC_NAMELEN, fmt, ap);
499 mtx_lock(&isrc_table_lock);
500 error = isrc_alloc_irq(isrc);
502 mtx_unlock(&isrc_table_lock);
506 * Setup interrupt counters, but not for IPI sources. Those are setup
507 * later and only for used ones (up to INTR_IPI_COUNT) to not exhaust
510 if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0)
511 isrc_setup_counters(isrc);
512 mtx_unlock(&isrc_table_lock);
517 * Deregister interrupt source from global interrupt table.
520 intr_isrc_deregister(struct intr_irqsrc *isrc)
524 mtx_lock(&isrc_table_lock);
525 if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0)
526 isrc_release_counters(isrc);
527 error = isrc_free_irq(isrc);
528 mtx_unlock(&isrc_table_lock);
534 * A support function for a PIC to decide if provided ISRC should be inited
535 * on given cpu. The logic of INTR_ISRCF_BOUND flag and isrc_cpu member of
536 * struct intr_irqsrc is the following:
538 * If INTR_ISRCF_BOUND is set, the ISRC should be inited only on cpus
539 * set in isrc_cpu. If not, the ISRC should be inited on every cpu and
540 * isrc_cpu is kept consistent with it. Thus isrc_cpu is always correct.
543 intr_isrc_init_on_cpu(struct intr_irqsrc *isrc, u_int cpu)
546 if (isrc->isrc_handlers == 0)
548 if ((isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI)) == 0)
550 if (isrc->isrc_flags & INTR_ISRCF_BOUND)
551 return (CPU_ISSET(cpu, &isrc->isrc_cpu));
553 CPU_SET(cpu, &isrc->isrc_cpu);
560 * Setup filter into interrupt source.
563 iscr_setup_filter(struct intr_irqsrc *isrc, const char *name,
564 intr_irq_filter_t *filter, void *arg, void **cookiep)
570 mtx_lock(&isrc_table_lock);
572 * Make sure that we do not mix the two ways
573 * how we handle interrupt sources.
575 if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) {
576 mtx_unlock(&isrc_table_lock);
579 isrc->isrc_filter = filter;
580 isrc->isrc_arg = arg;
581 isrc_update_name(isrc, name);
582 mtx_unlock(&isrc_table_lock);
590 * Interrupt source pre_ithread method for MI interrupt framework.
593 intr_isrc_pre_ithread(void *arg)
595 struct intr_irqsrc *isrc = arg;
597 PIC_PRE_ITHREAD(isrc->isrc_dev, isrc);
601 * Interrupt source post_ithread method for MI interrupt framework.
604 intr_isrc_post_ithread(void *arg)
606 struct intr_irqsrc *isrc = arg;
608 PIC_POST_ITHREAD(isrc->isrc_dev, isrc);
612 * Interrupt source post_filter method for MI interrupt framework.
615 intr_isrc_post_filter(void *arg)
617 struct intr_irqsrc *isrc = arg;
619 PIC_POST_FILTER(isrc->isrc_dev, isrc);
623 * Interrupt source assign_cpu method for MI interrupt framework.
626 intr_isrc_assign_cpu(void *arg, int cpu)
629 struct intr_irqsrc *isrc = arg;
632 mtx_lock(&isrc_table_lock);
634 CPU_ZERO(&isrc->isrc_cpu);
635 isrc->isrc_flags &= ~INTR_ISRCF_BOUND;
637 CPU_SETOF(cpu, &isrc->isrc_cpu);
638 isrc->isrc_flags |= INTR_ISRCF_BOUND;
642 * In NOCPU case, it's up to PIC to either leave ISRC on same CPU or
643 * re-balance it to another CPU or enable it on more CPUs. However,
644 * PIC is expected to change isrc_cpu appropriately to keep us well
645 * informed if the call is successful.
647 if (irq_assign_cpu) {
648 error = PIC_BIND_INTR(isrc->isrc_dev, isrc);
650 CPU_ZERO(&isrc->isrc_cpu);
651 mtx_unlock(&isrc_table_lock);
655 mtx_unlock(&isrc_table_lock);
663 * Create interrupt event for interrupt source.
666 isrc_event_create(struct intr_irqsrc *isrc)
668 struct intr_event *ie;
671 error = intr_event_create(&ie, isrc, 0, isrc->isrc_irq,
672 intr_isrc_pre_ithread, intr_isrc_post_ithread, intr_isrc_post_filter,
673 intr_isrc_assign_cpu, "%s:", isrc->isrc_name);
677 mtx_lock(&isrc_table_lock);
679 * Make sure that we do not mix the two ways
680 * how we handle interrupt sources. Let contested event wins.
683 if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) {
685 if (isrc->isrc_event != NULL) {
687 mtx_unlock(&isrc_table_lock);
688 intr_event_destroy(ie);
689 return (isrc->isrc_event != NULL ? EBUSY : 0);
691 isrc->isrc_event = ie;
692 mtx_unlock(&isrc_table_lock);
698 * Destroy interrupt event for interrupt source.
701 isrc_event_destroy(struct intr_irqsrc *isrc)
703 struct intr_event *ie;
705 mtx_lock(&isrc_table_lock);
706 ie = isrc->isrc_event;
707 isrc->isrc_event = NULL;
708 mtx_unlock(&isrc_table_lock);
711 intr_event_destroy(ie);
715 * Add handler to interrupt source.
718 isrc_add_handler(struct intr_irqsrc *isrc, const char *name,
719 driver_filter_t filter, driver_intr_t handler, void *arg,
720 enum intr_type flags, void **cookiep)
724 if (isrc->isrc_event == NULL) {
725 error = isrc_event_create(isrc);
730 error = intr_event_add_handler(isrc->isrc_event, name, filter, handler,
731 arg, intr_priority(flags), flags, cookiep);
733 mtx_lock(&isrc_table_lock);
734 intrcnt_updatename(isrc);
735 mtx_unlock(&isrc_table_lock);
742 * Lookup interrupt controller locked.
744 static inline struct intr_pic *
745 pic_lookup_locked(device_t dev, intptr_t xref, int flags)
747 struct intr_pic *pic;
749 mtx_assert(&pic_list_lock, MA_OWNED);
751 if (dev == NULL && xref == 0)
754 /* Note that pic->pic_dev is never NULL on registered PIC. */
755 SLIST_FOREACH(pic, &pic_list, pic_next) {
756 if ((pic->pic_flags & FLAG_TYPE_MASK) !=
757 (flags & FLAG_TYPE_MASK))
761 if (xref == pic->pic_xref)
763 } else if (xref == 0 || pic->pic_xref == 0) {
764 if (dev == pic->pic_dev)
766 } else if (xref == pic->pic_xref && dev == pic->pic_dev)
773 * Lookup interrupt controller.
775 static struct intr_pic *
776 pic_lookup(device_t dev, intptr_t xref, int flags)
778 struct intr_pic *pic;
780 mtx_lock(&pic_list_lock);
781 pic = pic_lookup_locked(dev, xref, flags);
782 mtx_unlock(&pic_list_lock);
787 * Create interrupt controller.
789 static struct intr_pic *
790 pic_create(device_t dev, intptr_t xref, int flags)
792 struct intr_pic *pic;
794 mtx_lock(&pic_list_lock);
795 pic = pic_lookup_locked(dev, xref, flags);
797 mtx_unlock(&pic_list_lock);
800 pic = malloc(sizeof(*pic), M_INTRNG, M_NOWAIT | M_ZERO);
802 mtx_unlock(&pic_list_lock);
805 pic->pic_xref = xref;
807 pic->pic_flags = flags;
808 mtx_init(&pic->pic_child_lock, "pic child lock", NULL, MTX_SPIN);
809 SLIST_INSERT_HEAD(&pic_list, pic, pic_next);
810 mtx_unlock(&pic_list_lock);
816 * Destroy interrupt controller.
819 pic_destroy(device_t dev, intptr_t xref, int flags)
821 struct intr_pic *pic;
823 mtx_lock(&pic_list_lock);
824 pic = pic_lookup_locked(dev, xref, flags);
826 mtx_unlock(&pic_list_lock);
829 SLIST_REMOVE(&pic_list, pic, intr_pic, pic_next);
830 mtx_unlock(&pic_list_lock);
836 * Register interrupt controller.
839 intr_pic_register(device_t dev, intptr_t xref)
841 struct intr_pic *pic;
845 pic = pic_create(dev, xref, FLAG_PIC);
849 debugf("PIC %p registered for %s <dev %p, xref %jx>\n", pic,
850 device_get_nameunit(dev), dev, (uintmax_t)xref);
855 * Unregister interrupt controller.
858 intr_pic_deregister(device_t dev, intptr_t xref)
861 panic("%s: not implemented", __func__);
865 * Mark interrupt controller (itself) as a root one.
867 * Note that only an interrupt controller can really know its position
868 * in interrupt controller's tree. So root PIC must claim itself as a root.
870 * In FDT case, according to ePAPR approved version 1.1 from 08 April 2011,
872 * "The root of the interrupt tree is determined when traversal
873 * of the interrupt tree reaches an interrupt controller node without
874 * an interrupts property and thus no explicit interrupt parent."
877 intr_pic_claim_root(device_t dev, intptr_t xref, intr_irq_filter_t *filter,
880 struct intr_pic *pic;
882 pic = pic_lookup(dev, xref, FLAG_PIC);
884 device_printf(dev, "not registered\n");
888 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_PIC,
889 ("%s: Found a non-PIC controller: %s", __func__,
890 device_get_name(pic->pic_dev)));
892 if (filter == NULL) {
893 device_printf(dev, "filter missing\n");
898 * Only one interrupt controllers could be on the root for now.
899 * Note that we further suppose that there is not threaded interrupt
900 * routine (handler) on the root. See intr_irq_handler().
902 if (intr_irq_root_dev != NULL) {
903 device_printf(dev, "another root already set\n");
907 intr_irq_root_dev = dev;
908 irq_root_filter = filter;
911 debugf("irq root set to %s\n", device_get_nameunit(dev));
916 * Add a handler to manage a sub range of a parents interrupts.
919 intr_pic_add_handler(device_t parent, struct intr_pic *pic,
920 intr_child_irq_filter_t *filter, void *arg, uintptr_t start,
923 struct intr_pic *parent_pic;
924 struct intr_pic_child *newchild;
926 struct intr_pic_child *child;
929 /* Find the parent PIC */
930 parent_pic = pic_lookup(parent, 0, FLAG_PIC);
931 if (parent_pic == NULL)
934 newchild = malloc(sizeof(*newchild), M_INTRNG, M_WAITOK | M_ZERO);
935 newchild->pc_pic = pic;
936 newchild->pc_filter = filter;
937 newchild->pc_filter_arg = arg;
938 newchild->pc_start = start;
939 newchild->pc_length = length;
941 mtx_lock_spin(&parent_pic->pic_child_lock);
943 SLIST_FOREACH(child, &parent_pic->pic_children, pc_next) {
944 KASSERT(child->pc_pic != pic, ("%s: Adding a child PIC twice",
948 SLIST_INSERT_HEAD(&parent_pic->pic_children, newchild, pc_next);
949 mtx_unlock_spin(&parent_pic->pic_child_lock);
955 intr_resolve_irq(device_t dev, intptr_t xref, struct intr_map_data *data,
956 struct intr_irqsrc **isrc)
958 struct intr_pic *pic;
959 struct intr_map_data_msi *msi;
964 pic = pic_lookup(dev, xref,
965 (data->type == INTR_MAP_DATA_MSI) ? FLAG_MSI : FLAG_PIC);
969 switch (data->type) {
970 case INTR_MAP_DATA_MSI:
971 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI,
972 ("%s: Found a non-MSI controller: %s", __func__,
973 device_get_name(pic->pic_dev)));
974 msi = (struct intr_map_data_msi *)data;
979 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_PIC,
980 ("%s: Found a non-PIC controller: %s", __func__,
981 device_get_name(pic->pic_dev)));
982 return (PIC_MAP_INTR(pic->pic_dev, data, isrc));
987 intr_is_per_cpu(struct resource *res)
990 struct intr_irqsrc *isrc;
992 res_id = (u_int)rman_get_start(res);
993 isrc = intr_map_get_isrc(res_id);
996 panic("Attempt to get isrc for non-active resource id: %u\n",
998 return ((isrc->isrc_flags & INTR_ISRCF_PPI) != 0);
1002 intr_activate_irq(device_t dev, struct resource *res)
1006 struct intr_map_data *data;
1007 struct intr_irqsrc *isrc;
1011 KASSERT(rman_get_start(res) == rman_get_end(res),
1012 ("%s: more interrupts in resource", __func__));
1014 res_id = (u_int)rman_get_start(res);
1015 if (intr_map_get_isrc(res_id) != NULL)
1016 panic("Attempt to double activation of resource id: %u\n",
1018 intr_map_copy_map_data(res_id, &map_dev, &map_xref, &data);
1019 error = intr_resolve_irq(map_dev, map_xref, data, &isrc);
1021 free(data, M_INTRNG);
1022 /* XXX TODO DISCONECTED PICs */
1023 /* if (error == EINVAL) return(0); */
1026 intr_map_set_isrc(res_id, isrc);
1027 rman_set_virtual(res, data);
1028 return (PIC_ACTIVATE_INTR(isrc->isrc_dev, isrc, res, data));
1032 intr_deactivate_irq(device_t dev, struct resource *res)
1034 struct intr_map_data *data;
1035 struct intr_irqsrc *isrc;
1039 KASSERT(rman_get_start(res) == rman_get_end(res),
1040 ("%s: more interrupts in resource", __func__));
1042 res_id = (u_int)rman_get_start(res);
1043 isrc = intr_map_get_isrc(res_id);
1045 panic("Attempt to deactivate non-active resource id: %u\n",
1048 data = rman_get_virtual(res);
1049 error = PIC_DEACTIVATE_INTR(isrc->isrc_dev, isrc, res, data);
1050 intr_map_set_isrc(res_id, NULL);
1051 rman_set_virtual(res, NULL);
1052 free(data, M_INTRNG);
1057 intr_setup_irq(device_t dev, struct resource *res, driver_filter_t filt,
1058 driver_intr_t hand, void *arg, int flags, void **cookiep)
1061 struct intr_map_data *data;
1062 struct intr_irqsrc *isrc;
1066 KASSERT(rman_get_start(res) == rman_get_end(res),
1067 ("%s: more interrupts in resource", __func__));
1069 res_id = (u_int)rman_get_start(res);
1070 isrc = intr_map_get_isrc(res_id);
1072 /* XXX TODO DISCONECTED PICs */
1076 data = rman_get_virtual(res);
1077 name = device_get_nameunit(dev);
1081 * Standard handling is done through MI interrupt framework. However,
1082 * some interrupts could request solely own special handling. This
1083 * non standard handling can be used for interrupt controllers without
1084 * handler (filter only), so in case that interrupt controllers are
1085 * chained, MI interrupt framework is called only in leaf controller.
1087 * Note that root interrupt controller routine is served as well,
1088 * however in intr_irq_handler(), i.e. main system dispatch routine.
1090 if (flags & INTR_SOLO && hand != NULL) {
1091 debugf("irq %u cannot solo on %s\n", irq, name);
1095 if (flags & INTR_SOLO) {
1096 error = iscr_setup_filter(isrc, name, (intr_irq_filter_t *)filt,
1098 debugf("irq %u setup filter error %d on %s\n", isrc->isrc_irq, error,
1103 error = isrc_add_handler(isrc, name, filt, hand, arg, flags,
1105 debugf("irq %u add handler error %d on %s\n", isrc->isrc_irq, error, name);
1110 mtx_lock(&isrc_table_lock);
1111 error = PIC_SETUP_INTR(isrc->isrc_dev, isrc, res, data);
1113 isrc->isrc_handlers++;
1114 if (isrc->isrc_handlers == 1)
1115 PIC_ENABLE_INTR(isrc->isrc_dev, isrc);
1117 mtx_unlock(&isrc_table_lock);
1119 intr_event_remove_handler(*cookiep);
1124 intr_teardown_irq(device_t dev, struct resource *res, void *cookie)
1127 struct intr_map_data *data;
1128 struct intr_irqsrc *isrc;
1131 KASSERT(rman_get_start(res) == rman_get_end(res),
1132 ("%s: more interrupts in resource", __func__));
1134 res_id = (u_int)rman_get_start(res);
1135 isrc = intr_map_get_isrc(res_id);
1136 if (isrc == NULL || isrc->isrc_handlers == 0)
1139 data = rman_get_virtual(res);
1142 if (isrc->isrc_filter != NULL) {
1146 mtx_lock(&isrc_table_lock);
1147 isrc->isrc_filter = NULL;
1148 isrc->isrc_arg = NULL;
1149 isrc->isrc_handlers = 0;
1150 PIC_DISABLE_INTR(isrc->isrc_dev, isrc);
1151 PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data);
1152 isrc_update_name(isrc, NULL);
1153 mtx_unlock(&isrc_table_lock);
1157 if (isrc != intr_handler_source(cookie))
1160 error = intr_event_remove_handler(cookie);
1162 mtx_lock(&isrc_table_lock);
1163 isrc->isrc_handlers--;
1164 if (isrc->isrc_handlers == 0)
1165 PIC_DISABLE_INTR(isrc->isrc_dev, isrc);
1166 PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data);
1167 intrcnt_updatename(isrc);
1168 mtx_unlock(&isrc_table_lock);
1174 intr_describe_irq(device_t dev, struct resource *res, void *cookie,
1178 struct intr_irqsrc *isrc;
1181 KASSERT(rman_get_start(res) == rman_get_end(res),
1182 ("%s: more interrupts in resource", __func__));
1184 res_id = (u_int)rman_get_start(res);
1185 isrc = intr_map_get_isrc(res_id);
1186 if (isrc == NULL || isrc->isrc_handlers == 0)
1189 if (isrc->isrc_filter != NULL) {
1193 mtx_lock(&isrc_table_lock);
1194 isrc_update_name(isrc, descr);
1195 mtx_unlock(&isrc_table_lock);
1199 error = intr_event_describe_handler(isrc->isrc_event, cookie, descr);
1201 mtx_lock(&isrc_table_lock);
1202 intrcnt_updatename(isrc);
1203 mtx_unlock(&isrc_table_lock);
1210 intr_bind_irq(device_t dev, struct resource *res, int cpu)
1212 struct intr_irqsrc *isrc;
1215 KASSERT(rman_get_start(res) == rman_get_end(res),
1216 ("%s: more interrupts in resource", __func__));
1218 res_id = (u_int)rman_get_start(res);
1219 isrc = intr_map_get_isrc(res_id);
1220 if (isrc == NULL || isrc->isrc_handlers == 0)
1223 if (isrc->isrc_filter != NULL)
1224 return (intr_isrc_assign_cpu(isrc, cpu));
1226 return (intr_event_bind(isrc->isrc_event, cpu));
1230 * Return the CPU that the next interrupt source should use.
1231 * For now just returns the next CPU according to round-robin.
1234 intr_irq_next_cpu(u_int last_cpu, cpuset_t *cpumask)
1238 KASSERT(!CPU_EMPTY(cpumask), ("%s: Empty CPU mask", __func__));
1239 if (!irq_assign_cpu || mp_ncpus == 1) {
1240 cpu = PCPU_GET(cpuid);
1242 if (CPU_ISSET(cpu, cpumask))
1245 return (CPU_FFS(cpumask) - 1);
1250 if (last_cpu > mp_maxid)
1252 } while (!CPU_ISSET(last_cpu, cpumask));
1256 #ifndef EARLY_AP_STARTUP
1258 * Distribute all the interrupt sources among the available
1259 * CPUs once the AP's have been launched.
1262 intr_irq_shuffle(void *arg __unused)
1264 struct intr_irqsrc *isrc;
1270 mtx_lock(&isrc_table_lock);
1271 irq_assign_cpu = true;
1272 for (i = 0; i < intr_nirq; i++) {
1273 isrc = irq_sources[i];
1274 if (isrc == NULL || isrc->isrc_handlers == 0 ||
1275 isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI))
1278 if (isrc->isrc_event != NULL &&
1279 isrc->isrc_flags & INTR_ISRCF_BOUND &&
1280 isrc->isrc_event->ie_cpu != CPU_FFS(&isrc->isrc_cpu) - 1)
1281 panic("%s: CPU inconsistency", __func__);
1283 if ((isrc->isrc_flags & INTR_ISRCF_BOUND) == 0)
1284 CPU_ZERO(&isrc->isrc_cpu); /* start again */
1287 * We are in wicked position here if the following call fails
1288 * for bound ISRC. The best thing we can do is to clear
1289 * isrc_cpu so inconsistency with ie_cpu will be detectable.
1291 if (PIC_BIND_INTR(isrc->isrc_dev, isrc) != 0)
1292 CPU_ZERO(&isrc->isrc_cpu);
1294 mtx_unlock(&isrc_table_lock);
1296 SYSINIT(intr_irq_shuffle, SI_SUB_SMP, SI_ORDER_SECOND, intr_irq_shuffle, NULL);
1297 #endif /* !EARLY_AP_STARTUP */
1301 intr_irq_next_cpu(u_int current_cpu, cpuset_t *cpumask)
1304 return (PCPU_GET(cpuid));
1309 * Allocate memory for new intr_map_data structure.
1310 * Initialize common fields.
1312 struct intr_map_data *
1313 intr_alloc_map_data(enum intr_map_data_type type, size_t len, int flags)
1315 struct intr_map_data *data;
1317 data = malloc(len, M_INTRNG, flags);
1323 void intr_free_intr_map_data(struct intr_map_data *data)
1326 free(data, M_INTRNG);
1330 * Register a MSI/MSI-X interrupt controller
1333 intr_msi_register(device_t dev, intptr_t xref)
1335 struct intr_pic *pic;
1339 pic = pic_create(dev, xref, FLAG_MSI);
1343 debugf("PIC %p registered for %s <dev %p, xref %jx>\n", pic,
1344 device_get_nameunit(dev), dev, (uintmax_t)xref);
1349 intr_alloc_msi(device_t pci, device_t child, intptr_t xref, int count,
1350 int maxcount, int *irqs)
1352 struct iommu_domain *domain;
1353 struct intr_irqsrc **isrc;
1354 struct intr_pic *pic;
1356 struct intr_map_data_msi *msi;
1359 pic = pic_lookup(NULL, xref, FLAG_MSI);
1363 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI,
1364 ("%s: Found a non-MSI controller: %s", __func__,
1365 device_get_name(pic->pic_dev)));
1368 * If this is the first time we have used this context ask the
1369 * interrupt controller to map memory the msi source will need.
1371 err = MSI_IOMMU_INIT(pic->pic_dev, child, &domain);
1375 isrc = malloc(sizeof(*isrc) * count, M_INTRNG, M_WAITOK);
1376 err = MSI_ALLOC_MSI(pic->pic_dev, child, count, maxcount, &pdev, isrc);
1378 free(isrc, M_INTRNG);
1382 for (i = 0; i < count; i++) {
1383 isrc[i]->isrc_iommu = domain;
1384 msi = (struct intr_map_data_msi *)intr_alloc_map_data(
1385 INTR_MAP_DATA_MSI, sizeof(*msi), M_WAITOK | M_ZERO);
1386 msi-> isrc = isrc[i];
1388 irqs[i] = intr_map_irq(pic->pic_dev, xref,
1389 (struct intr_map_data *)msi);
1391 free(isrc, M_INTRNG);
1397 intr_release_msi(device_t pci, device_t child, intptr_t xref, int count,
1400 struct intr_irqsrc **isrc;
1401 struct intr_pic *pic;
1402 struct intr_map_data_msi *msi;
1405 pic = pic_lookup(NULL, xref, FLAG_MSI);
1409 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI,
1410 ("%s: Found a non-MSI controller: %s", __func__,
1411 device_get_name(pic->pic_dev)));
1413 isrc = malloc(sizeof(*isrc) * count, M_INTRNG, M_WAITOK);
1415 for (i = 0; i < count; i++) {
1416 msi = (struct intr_map_data_msi *)
1417 intr_map_get_map_data(irqs[i]);
1418 KASSERT(msi->hdr.type == INTR_MAP_DATA_MSI,
1419 ("%s: irq %d map data is not MSI", __func__,
1421 isrc[i] = msi->isrc;
1424 MSI_IOMMU_DEINIT(pic->pic_dev, child);
1426 err = MSI_RELEASE_MSI(pic->pic_dev, child, count, isrc);
1428 for (i = 0; i < count; i++) {
1429 if (isrc[i] != NULL)
1430 intr_unmap_irq(irqs[i]);
1433 free(isrc, M_INTRNG);
1438 intr_alloc_msix(device_t pci, device_t child, intptr_t xref, int *irq)
1440 struct iommu_domain *domain;
1441 struct intr_irqsrc *isrc;
1442 struct intr_pic *pic;
1444 struct intr_map_data_msi *msi;
1447 pic = pic_lookup(NULL, xref, FLAG_MSI);
1451 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI,
1452 ("%s: Found a non-MSI controller: %s", __func__,
1453 device_get_name(pic->pic_dev)));
1456 * If this is the first time we have used this context ask the
1457 * interrupt controller to map memory the msi source will need.
1459 err = MSI_IOMMU_INIT(pic->pic_dev, child, &domain);
1463 err = MSI_ALLOC_MSIX(pic->pic_dev, child, &pdev, &isrc);
1467 isrc->isrc_iommu = domain;
1468 msi = (struct intr_map_data_msi *)intr_alloc_map_data(
1469 INTR_MAP_DATA_MSI, sizeof(*msi), M_WAITOK | M_ZERO);
1471 *irq = intr_map_irq(pic->pic_dev, xref, (struct intr_map_data *)msi);
1476 intr_release_msix(device_t pci, device_t child, intptr_t xref, int irq)
1478 struct intr_irqsrc *isrc;
1479 struct intr_pic *pic;
1480 struct intr_map_data_msi *msi;
1483 pic = pic_lookup(NULL, xref, FLAG_MSI);
1487 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI,
1488 ("%s: Found a non-MSI controller: %s", __func__,
1489 device_get_name(pic->pic_dev)));
1491 msi = (struct intr_map_data_msi *)
1492 intr_map_get_map_data(irq);
1493 KASSERT(msi->hdr.type == INTR_MAP_DATA_MSI,
1494 ("%s: irq %d map data is not MSI", __func__,
1498 intr_unmap_irq(irq);
1502 MSI_IOMMU_DEINIT(pic->pic_dev, child);
1504 err = MSI_RELEASE_MSIX(pic->pic_dev, child, isrc);
1505 intr_unmap_irq(irq);
1511 intr_map_msi(device_t pci, device_t child, intptr_t xref, int irq,
1512 uint64_t *addr, uint32_t *data)
1514 struct intr_irqsrc *isrc;
1515 struct intr_pic *pic;
1518 pic = pic_lookup(NULL, xref, FLAG_MSI);
1522 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI,
1523 ("%s: Found a non-MSI controller: %s", __func__,
1524 device_get_name(pic->pic_dev)));
1526 isrc = intr_map_get_isrc(irq);
1530 err = MSI_MAP_MSI(pic->pic_dev, child, isrc, addr, data);
1533 if (isrc->isrc_iommu != NULL)
1534 iommu_translate_msi(isrc->isrc_iommu, addr);
1540 void dosoftints(void);
1548 * Init interrupt controller on another CPU.
1551 intr_pic_init_secondary(void)
1555 * QQQ: Only root PIC is aware of other CPUs ???
1557 KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
1559 //mtx_lock(&isrc_table_lock);
1560 PIC_INIT_SECONDARY(intr_irq_root_dev);
1561 //mtx_unlock(&isrc_table_lock);
1566 DB_SHOW_COMMAND_FLAGS(irqs, db_show_irqs, DB_CMD_MEMSAFE)
1570 struct intr_irqsrc *isrc;
1572 for (irqsum = 0, i = 0; i < intr_nirq; i++) {
1573 isrc = irq_sources[i];
1577 num = isrc->isrc_count != NULL ? isrc->isrc_count[0] : 0;
1578 db_printf("irq%-3u <%s>: cpu %02lx%s cnt %lu\n", i,
1579 isrc->isrc_name, isrc->isrc_cpu.__bits[0],
1580 isrc->isrc_flags & INTR_ISRCF_BOUND ? " (bound)" : "", num);
1583 db_printf("irq total %u\n", irqsum);
1588 * Interrupt mapping table functions.
1590 * Please, keep this part separately, it can be transformed to
1591 * extension of standard resources.
1593 struct intr_map_entry
1597 struct intr_map_data *map_data;
1598 struct intr_irqsrc *isrc;
1599 /* XXX TODO DISCONECTED PICs */
1603 /* XXX Convert irq_map[] to dynamicaly expandable one. */
1604 static struct intr_map_entry **irq_map;
1605 static u_int irq_map_count;
1606 static u_int irq_map_first_free_idx;
1607 static struct mtx irq_map_lock;
1609 static struct intr_irqsrc *
1610 intr_map_get_isrc(u_int res_id)
1612 struct intr_irqsrc *isrc;
1615 mtx_lock(&irq_map_lock);
1616 if (res_id < irq_map_count && irq_map[res_id] != NULL)
1617 isrc = irq_map[res_id]->isrc;
1618 mtx_unlock(&irq_map_lock);
1624 intr_map_set_isrc(u_int res_id, struct intr_irqsrc *isrc)
1627 mtx_lock(&irq_map_lock);
1628 if (res_id < irq_map_count && irq_map[res_id] != NULL)
1629 irq_map[res_id]->isrc = isrc;
1630 mtx_unlock(&irq_map_lock);
1634 * Get a copy of intr_map_entry data
1636 static struct intr_map_data *
1637 intr_map_get_map_data(u_int res_id)
1639 struct intr_map_data *data;
1642 mtx_lock(&irq_map_lock);
1643 if (res_id >= irq_map_count || irq_map[res_id] == NULL)
1644 panic("Attempt to copy invalid resource id: %u\n", res_id);
1645 data = irq_map[res_id]->map_data;
1646 mtx_unlock(&irq_map_lock);
1652 * Get a copy of intr_map_entry data
1655 intr_map_copy_map_data(u_int res_id, device_t *map_dev, intptr_t *map_xref,
1656 struct intr_map_data **data)
1661 mtx_lock(&irq_map_lock);
1662 if (res_id >= irq_map_count || irq_map[res_id] == NULL)
1663 panic("Attempt to copy invalid resource id: %u\n", res_id);
1664 if (irq_map[res_id]->map_data != NULL)
1665 len = irq_map[res_id]->map_data->len;
1666 mtx_unlock(&irq_map_lock);
1671 *data = malloc(len, M_INTRNG, M_WAITOK | M_ZERO);
1672 mtx_lock(&irq_map_lock);
1673 if (irq_map[res_id] == NULL)
1674 panic("Attempt to copy invalid resource id: %u\n", res_id);
1676 if (len != irq_map[res_id]->map_data->len)
1677 panic("Resource id: %u has changed.\n", res_id);
1678 memcpy(*data, irq_map[res_id]->map_data, len);
1680 *map_dev = irq_map[res_id]->dev;
1681 *map_xref = irq_map[res_id]->xref;
1682 mtx_unlock(&irq_map_lock);
1686 * Allocate and fill new entry in irq_map table.
1689 intr_map_irq(device_t dev, intptr_t xref, struct intr_map_data *data)
1692 struct intr_map_entry *entry;
1694 /* Prepare new entry first. */
1695 entry = malloc(sizeof(*entry), M_INTRNG, M_WAITOK | M_ZERO);
1699 entry->map_data = data;
1702 mtx_lock(&irq_map_lock);
1703 for (i = irq_map_first_free_idx; i < irq_map_count; i++) {
1704 if (irq_map[i] == NULL) {
1706 irq_map_first_free_idx = i + 1;
1707 mtx_unlock(&irq_map_lock);
1711 for (i = 0; i < irq_map_first_free_idx; i++) {
1712 if (irq_map[i] == NULL) {
1714 irq_map_first_free_idx = i + 1;
1715 mtx_unlock(&irq_map_lock);
1719 mtx_unlock(&irq_map_lock);
1721 /* XXX Expand irq_map table */
1722 panic("IRQ mapping table is full.");
1726 * Remove and free mapping entry.
1729 intr_unmap_irq(u_int res_id)
1731 struct intr_map_entry *entry;
1733 mtx_lock(&irq_map_lock);
1734 if ((res_id >= irq_map_count) || (irq_map[res_id] == NULL))
1735 panic("Attempt to unmap invalid resource id: %u\n", res_id);
1736 entry = irq_map[res_id];
1737 irq_map[res_id] = NULL;
1738 irq_map_first_free_idx = res_id;
1739 mtx_unlock(&irq_map_lock);
1740 intr_free_intr_map_data(entry->map_data);
1741 free(entry, M_INTRNG);
1745 * Clone mapping entry.
1748 intr_map_clone_irq(u_int old_res_id)
1752 struct intr_map_data *data;
1754 intr_map_copy_map_data(old_res_id, &map_dev, &map_xref, &data);
1755 return (intr_map_irq(map_dev, map_xref, data));
1759 intr_map_init(void *dummy __unused)
1762 mtx_init(&irq_map_lock, "intr map table", NULL, MTX_DEF);
1764 irq_map_count = 2 * intr_nirq;
1765 irq_map = mallocarray(irq_map_count, sizeof(struct intr_map_entry*),
1766 M_INTRNG, M_WAITOK | M_ZERO);
1768 SYSINIT(intr_map_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_map_init, NULL);
1771 /* Virtualization for interrupt source IPI counter increment. */
1773 intr_ipi_increment_count(u_long *counter, u_int cpu)
1776 KASSERT(cpu < mp_maxid + 1, ("%s: too big cpu %u", __func__, cpu));
1781 * Virtualization for interrupt source IPI counters setup.
1784 intr_ipi_setup_counters(const char *name)
1787 char str[INTRNAME_LEN];
1789 mtx_lock(&isrc_table_lock);
1792 * We should never have a problem finding mp_maxid + 1 contiguous
1793 * counters, in practice. Interrupts will be allocated sequentially
1794 * during boot, so the array should fill from low to high index. Once
1795 * reserved, the IPI counters will never be released. Similarly, we
1796 * will not need to allocate more IPIs once the system is running.
1798 bit_ffc_area(intrcnt_bitmap, nintrcnt, mp_maxid + 1, &index);
1800 panic("Failed to allocate %d counters. Array exhausted?",
1802 bit_nset(intrcnt_bitmap, index, index + mp_maxid);
1803 for (i = 0; i < mp_maxid + 1; i++) {
1804 snprintf(str, INTRNAME_LEN, "cpu%d:%s", i, name);
1805 intrcnt_setname(str, index + i);
1807 mtx_unlock(&isrc_table_lock);
1808 return (&intrcnt[index]);
1812 * Lookup IPI source.
1814 static struct intr_ipi *
1815 intr_ipi_lookup(u_int ipi)
1818 if (ipi >= INTR_IPI_COUNT)
1819 panic("%s: no such IPI %u", __func__, ipi);
1821 return (&ipi_sources[ipi]);
1825 intr_ipi_pic_register(device_t dev, u_int priority)
1827 if (intr_ipi_dev_frozen) {
1828 device_printf(dev, "IPI device already frozen");
1832 if (intr_ipi_dev == NULL || priority > intr_ipi_dev_priority)
1839 * Setup IPI handler on interrupt controller.
1844 intr_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand,
1847 struct intr_irqsrc *isrc;
1848 struct intr_ipi *ii;
1851 if (!intr_ipi_dev_frozen) {
1852 if (intr_ipi_dev == NULL)
1853 panic("%s: no IPI PIC attached", __func__);
1855 intr_ipi_dev_frozen = true;
1856 device_printf(intr_ipi_dev, "using for IPIs\n");
1859 KASSERT(hand != NULL, ("%s: ipi %u no handler", __func__, ipi));
1861 error = PIC_IPI_SETUP(intr_ipi_dev, ipi, &isrc);
1865 isrc->isrc_handlers++;
1867 ii = intr_ipi_lookup(ipi);
1868 KASSERT(ii->ii_count == NULL, ("%s: ipi %u reused", __func__, ipi));
1870 ii->ii_handler = hand;
1871 ii->ii_handler_arg = arg;
1873 strlcpy(ii->ii_name, name, INTR_IPI_NAMELEN);
1874 ii->ii_count = intr_ipi_setup_counters(name);
1876 PIC_ENABLE_INTR(intr_ipi_dev, isrc);
1880 intr_ipi_send(cpuset_t cpus, u_int ipi)
1882 struct intr_ipi *ii;
1884 KASSERT(intr_ipi_dev_frozen,
1885 ("%s: IPI device not yet frozen", __func__));
1887 ii = intr_ipi_lookup(ipi);
1888 if (ii->ii_count == NULL)
1889 panic("%s: not setup IPI %u", __func__, ipi);
1892 * XXX: Surely needed on other architectures too? Either way should be
1893 * some kind of MI hook defined in an MD header, or the responsibility
1894 * of the MD caller if not widespread.
1898 * Ensure that this CPU's stores will be visible to IPI
1899 * recipients before starting to send the interrupts.
1904 PIC_IPI_SEND(intr_ipi_dev, ii->ii_isrc, cpus, ipi);
1908 * interrupt controller dispatch function for IPIs. It should
1909 * be called straight from the interrupt controller, when associated
1910 * interrupt source is learned. Or from anybody who has an interrupt
1914 intr_ipi_dispatch(u_int ipi)
1916 struct intr_ipi *ii;
1918 ii = intr_ipi_lookup(ipi);
1919 if (ii->ii_count == NULL)
1920 panic("%s: not setup IPI %u", __func__, ipi);
1922 intr_ipi_increment_count(ii->ii_count, PCPU_GET(cpuid));
1924 ii->ii_handler(ii->ii_handler_arg);