2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2011 The FreeBSD Foundation
7 * Developed by Damjan Marion <damjan.marion@gmail.com>
9 * Based on OMAP4 GIC code by Ben Gray
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The name of the company nor the name of the author may be used to
20 * endorse or promote products derived from this software without specific
21 * prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
40 #include "opt_platform.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
45 #include <sys/kernel.h>
47 #include <sys/module.h>
48 #include <sys/malloc.h>
52 #include <sys/cpuset.h>
54 #include <sys/mutex.h>
57 #include <sys/sched.h>
63 #include <machine/bus.h>
64 #include <machine/intr.h>
65 #include <machine/smp.h>
68 #include <dev/fdt/fdt_intr.h>
69 #include <dev/ofw/ofw_bus_subr.h>
73 #include <contrib/dev/acpica/include/acpi.h>
74 #include <dev/acpica/acpivar.h>
77 #include <arm/arm/gic.h>
78 #include <arm/arm/gic_common.h>
85 /* We are using GICv2 register naming */
87 /* Distributor Registers */
90 #define GICC_CTLR 0x0000 /* v1 ICCICR */
91 #define GICC_PMR 0x0004 /* v1 ICCPMR */
92 #define GICC_BPR 0x0008 /* v1 ICCBPR */
93 #define GICC_IAR 0x000C /* v1 ICCIAR */
94 #define GICC_EOIR 0x0010 /* v1 ICCEOIR */
95 #define GICC_RPR 0x0014 /* v1 ICCRPR */
96 #define GICC_HPPIR 0x0018 /* v1 ICCHPIR */
97 #define GICC_ABPR 0x001C /* v1 ICCABPR */
98 #define GICC_IIDR 0x00FC /* v1 ICCIIDR*/
100 /* TYPER Registers */
101 #define GICD_TYPER_SECURITYEXT 0x400
102 #define GIC_SUPPORT_SECEXT(_sc) \
103 ((_sc->typer & GICD_TYPER_SECURITYEXT) == GICD_TYPER_SECURITYEXT)
106 #ifndef GIC_DEFAULT_ICFGR_INIT
107 #define GIC_DEFAULT_ICFGR_INIT 0x00000000
112 struct intr_irqsrc gi_isrc;
114 enum intr_polarity gi_pol;
115 enum intr_trigger gi_trig;
116 #define GI_FLAG_EARLY_EOI (1 << 0)
117 #define GI_FLAG_MSI (1 << 1) /* This interrupt source should only */
118 /* be used for MSI/MSI-X interrupts */
119 #define GI_FLAG_MSI_USED (1 << 2) /* This irq is already allocated */
120 /* for a MSI/MSI-X interrupt */
124 static u_int gic_irq_cpu;
125 static int arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc);
128 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
129 static u_int sgi_first_unused = GIC_FIRST_SGI;
132 #define GIC_INTR_ISRC(sc, irq) (&sc->gic_irqs[irq].gi_isrc)
134 static struct ofw_compat_data compat_data[] = {
135 {"arm,gic", true}, /* Non-standard, used in FreeBSD dts. */
136 {"arm,gic-400", true},
137 {"arm,cortex-a15-gic", true},
138 {"arm,cortex-a9-gic", true},
139 {"arm,cortex-a7-gic", true},
140 {"arm,arm11mp-gic", true},
141 {"brcm,brahma-b15-gic", true},
142 {"qcom,msm-qgic2", true},
147 static struct resource_spec arm_gic_spec[] = {
148 { SYS_RES_MEMORY, 0, RF_ACTIVE }, /* Distributor registers */
149 { SYS_RES_MEMORY, 1, RF_ACTIVE }, /* CPU Interrupt Intf. registers */
151 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_OPTIONAL }, /* Parent interrupt */
157 #if defined(__arm__) && defined(INVARIANTS)
158 static int gic_debug_spurious = 1;
160 static int gic_debug_spurious = 0;
162 TUNABLE_INT("hw.gic.debug_spurious", &gic_debug_spurious);
164 static u_int arm_gic_map[MAXCPU];
166 static struct arm_gic_softc *gic_sc = NULL;
168 #define gic_c_read_4(_sc, _reg) \
169 bus_space_read_4((_sc)->gic_c_bst, (_sc)->gic_c_bsh, (_reg))
170 #define gic_c_write_4(_sc, _reg, _val) \
171 bus_space_write_4((_sc)->gic_c_bst, (_sc)->gic_c_bsh, (_reg), (_val))
172 #define gic_d_read_4(_sc, _reg) \
173 bus_space_read_4((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg))
174 #define gic_d_write_1(_sc, _reg, _val) \
175 bus_space_write_1((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg), (_val))
176 #define gic_d_write_4(_sc, _reg, _val) \
177 bus_space_write_4((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg), (_val))
180 static int gic_config_irq(int irq, enum intr_trigger trig,
181 enum intr_polarity pol);
182 static void gic_post_filter(void *);
187 gic_irq_unmask(struct arm_gic_softc *sc, u_int irq)
190 gic_d_write_4(sc, GICD_ISENABLER(irq), GICD_I_MASK(irq));
194 gic_irq_mask(struct arm_gic_softc *sc, u_int irq)
197 gic_d_write_4(sc, GICD_ICENABLER(irq), GICD_I_MASK(irq));
202 gic_cpu_mask(struct arm_gic_softc *sc)
207 /* Read the current cpuid mask by reading ITARGETSR{0..7} */
208 for (i = 0; i < 8; i++) {
209 mask = gic_d_read_4(sc, GICD_ITARGETSR(4 * i));
213 /* No mask found, assume we are on CPU interface 0 */
217 /* Collect the mask in the lower byte */
227 arm_gic_init_secondary(device_t dev)
229 struct arm_gic_softc *sc = device_get_softc(dev);
232 /* Set the mask so we can find this CPU to send it IPIs */
233 cpu = PCPU_GET(cpuid);
234 arm_gic_map[cpu] = gic_cpu_mask(sc);
236 for (irq = 0; irq < sc->nirqs; irq += 4)
237 gic_d_write_4(sc, GICD_IPRIORITYR(irq), 0);
239 /* Set all the interrupts to be in Group 0 (secure) */
240 for (irq = 0; GIC_SUPPORT_SECEXT(sc) && irq < sc->nirqs; irq += 32) {
241 gic_d_write_4(sc, GICD_IGROUPR(irq), 0);
244 /* Enable CPU interface */
245 gic_c_write_4(sc, GICC_CTLR, 1);
247 /* Set priority mask register. */
248 gic_c_write_4(sc, GICC_PMR, 0xff);
250 /* Enable interrupt distribution */
251 gic_d_write_4(sc, GICD_CTLR, 0x01);
253 /* Unmask attached SGI interrupts. */
254 for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++)
255 if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu))
256 gic_irq_unmask(sc, irq);
258 /* Unmask attached PPI interrupts. */
259 for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++)
260 if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu))
261 gic_irq_unmask(sc, irq);
265 arm_gic_init_secondary(device_t dev)
267 struct arm_gic_softc *sc = device_get_softc(dev);
270 /* Set the mask so we can find this CPU to send it IPIs */
271 arm_gic_map[PCPU_GET(cpuid)] = gic_cpu_mask(sc);
273 for (i = 0; i < sc->nirqs; i += 4)
274 gic_d_write_4(sc, GICD_IPRIORITYR(i), 0);
276 /* Set all the interrupts to be in Group 0 (secure) */
277 for (i = 0; GIC_SUPPORT_SECEXT(sc) && i < sc->nirqs; i += 32) {
278 gic_d_write_4(sc, GICD_IGROUPR(i), 0);
281 /* Enable CPU interface */
282 gic_c_write_4(sc, GICC_CTLR, 1);
284 /* Set priority mask register. */
285 gic_c_write_4(sc, GICC_PMR, 0xff);
287 /* Enable interrupt distribution */
288 gic_d_write_4(sc, GICD_CTLR, 0x01);
291 * Activate the timer interrupts: virtual, secure, and non-secure.
293 gic_d_write_4(sc, GICD_ISENABLER(27), GICD_I_MASK(27));
294 gic_d_write_4(sc, GICD_ISENABLER(29), GICD_I_MASK(29));
295 gic_d_write_4(sc, GICD_ISENABLER(30), GICD_I_MASK(30));
302 gic_decode_fdt(phandle_t iparent, pcell_t *intr, int *interrupt,
305 static u_int num_intr_cells;
306 static phandle_t self;
307 struct ofw_compat_data *ocd;
310 for (ocd = compat_data; ocd->ocd_str != NULL; ocd++) {
311 if (ofw_bus_node_is_compatible(iparent, ocd->ocd_str)) {
320 if (num_intr_cells == 0) {
321 if (OF_searchencprop(OF_node_from_xref(iparent),
322 "#interrupt-cells", &num_intr_cells,
323 sizeof(num_intr_cells)) == -1) {
328 if (num_intr_cells == 1) {
329 *interrupt = fdt32_to_cpu(intr[0]);
330 *trig = INTR_TRIGGER_CONFORM;
331 *pol = INTR_POLARITY_CONFORM;
333 if (fdt32_to_cpu(intr[0]) == 0)
334 *interrupt = fdt32_to_cpu(intr[1]) + GIC_FIRST_SPI;
336 *interrupt = fdt32_to_cpu(intr[1]) + GIC_FIRST_PPI;
338 * In intr[2], bits[3:0] are trigger type and level flags.
339 * 1 = low-to-high edge triggered
340 * 2 = high-to-low edge triggered
341 * 4 = active high level-sensitive
342 * 8 = active low level-sensitive
343 * The hardware only supports active-high-level or rising-edge
346 if (*interrupt >= GIC_FIRST_SPI &&
347 fdt32_to_cpu(intr[2]) & 0x0a) {
348 printf("unsupported trigger/polarity configuration "
349 "0x%02x\n", fdt32_to_cpu(intr[2]) & 0x0f);
351 *pol = INTR_POLARITY_CONFORM;
352 if (fdt32_to_cpu(intr[2]) & 0x03)
353 *trig = INTR_TRIGGER_EDGE;
355 *trig = INTR_TRIGGER_LEVEL;
363 arm_gic_register_isrcs(struct arm_gic_softc *sc, uint32_t num)
367 struct gic_irqsrc *irqs;
368 struct intr_irqsrc *isrc;
371 irqs = malloc(num * sizeof(struct gic_irqsrc), M_DEVBUF,
374 name = device_get_nameunit(sc->gic_dev);
375 for (irq = 0; irq < num; irq++) {
376 irqs[irq].gi_irq = irq;
377 irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
378 irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
380 isrc = &irqs[irq].gi_isrc;
381 if (irq <= GIC_LAST_SGI) {
382 error = intr_isrc_register(isrc, sc->gic_dev,
383 INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
384 } else if (irq <= GIC_LAST_PPI) {
385 error = intr_isrc_register(isrc, sc->gic_dev,
386 INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
388 error = intr_isrc_register(isrc, sc->gic_dev, 0,
389 "%s,s%u", name, irq - GIC_FIRST_SPI);
392 /* XXX call intr_isrc_deregister() */
393 free(irqs, M_DEVBUF);
403 arm_gic_reserve_msi_range(device_t dev, u_int start, u_int count)
405 struct arm_gic_softc *sc;
408 sc = device_get_softc(dev);
410 KASSERT((start + count) < sc->nirqs,
411 ("%s: Trying to allocate too many MSI IRQs: %d + %d > %d", __func__,
412 start, count, sc->nirqs));
413 for (i = 0; i < count; i++) {
414 KASSERT(sc->gic_irqs[start + i].gi_isrc.isrc_handlers == 0,
415 ("%s: MSI interrupt %d already has a handler", __func__,
417 KASSERT(sc->gic_irqs[start + i].gi_pol == INTR_POLARITY_CONFORM,
418 ("%s: MSI interrupt %d already has a polarity", __func__,
420 KASSERT(sc->gic_irqs[start + i].gi_trig == INTR_TRIGGER_CONFORM,
421 ("%s: MSI interrupt %d already has a trigger", __func__,
423 sc->gic_irqs[start + i].gi_pol = INTR_POLARITY_HIGH;
424 sc->gic_irqs[start + i].gi_trig = INTR_TRIGGER_EDGE;
425 sc->gic_irqs[start + i].gi_flags |= GI_FLAG_MSI;
431 arm_gic_attach(device_t dev)
433 struct arm_gic_softc *sc;
435 uint32_t icciidr, mask, nirqs;
440 sc = device_get_softc(dev);
442 if (bus_alloc_resources(dev, arm_gic_spec, sc->gic_res)) {
443 device_printf(dev, "could not allocate resources\n");
450 /* Initialize mutex */
451 mtx_init(&sc->mutex, "GIC lock", NULL, MTX_SPIN);
453 /* Distributor Interface */
454 sc->gic_d_bst = rman_get_bustag(sc->gic_res[0]);
455 sc->gic_d_bsh = rman_get_bushandle(sc->gic_res[0]);
458 sc->gic_c_bst = rman_get_bustag(sc->gic_res[1]);
459 sc->gic_c_bsh = rman_get_bushandle(sc->gic_res[1]);
461 /* Disable interrupt forwarding to the CPU interface */
462 gic_d_write_4(sc, GICD_CTLR, 0x00);
464 /* Get the number of interrupts */
465 sc->typer = gic_d_read_4(sc, GICD_TYPER);
466 nirqs = GICD_TYPER_I_NUM(sc->typer);
469 if (arm_gic_register_isrcs(sc, nirqs)) {
470 device_printf(dev, "could not register irqs\n");
476 /* Set up function pointers */
477 arm_post_filter = gic_post_filter;
478 arm_config_irq = gic_config_irq;
481 icciidr = gic_c_read_4(sc, GICC_IIDR);
483 "pn 0x%x, arch 0x%x, rev 0x%x, implementer 0x%x irqs %u\n",
484 GICD_IIDR_PROD(icciidr), GICD_IIDR_VAR(icciidr),
485 GICD_IIDR_REV(icciidr), GICD_IIDR_IMPL(icciidr), sc->nirqs);
487 sc->gic_iidr = icciidr;
490 /* Set all global interrupts to be level triggered, active low. */
491 for (i = 32; i < sc->nirqs; i += 16) {
492 gic_d_write_4(sc, GICD_ICFGR(i), GIC_DEFAULT_ICFGR_INIT);
495 /* Disable all interrupts. */
496 for (i = 32; i < sc->nirqs; i += 32) {
497 gic_d_write_4(sc, GICD_ICENABLER(i), 0xFFFFFFFF);
500 /* Find the current cpu mask */
501 mask = gic_cpu_mask(sc);
502 /* Set the mask so we can find this CPU to send it IPIs */
503 arm_gic_map[PCPU_GET(cpuid)] = mask;
504 /* Set all four targets to this cpu */
508 for (i = 0; i < sc->nirqs; i += 4) {
509 gic_d_write_4(sc, GICD_IPRIORITYR(i), 0);
511 gic_d_write_4(sc, GICD_ITARGETSR(i), mask);
515 /* Set all the interrupts to be in Group 0 (secure) */
516 for (i = 0; GIC_SUPPORT_SECEXT(sc) && i < sc->nirqs; i += 32) {
517 gic_d_write_4(sc, GICD_IGROUPR(i), 0);
520 /* Enable CPU interface */
521 gic_c_write_4(sc, GICC_CTLR, 1);
523 /* Set priority mask register. */
524 gic_c_write_4(sc, GICC_PMR, 0xff);
526 /* Enable interrupt distribution */
527 gic_d_write_4(sc, GICD_CTLR, 0x01);
538 arm_gic_detach(device_t dev)
541 struct arm_gic_softc *sc;
543 sc = device_get_softc(dev);
545 if (sc->gic_irqs != NULL)
546 free(sc->gic_irqs, M_DEVBUF);
548 bus_release_resources(dev, arm_gic_spec, sc->gic_res);
556 arm_gic_print_child(device_t bus, device_t child)
558 struct resource_list *rl;
561 rv = bus_print_child_header(bus, child);
563 rl = BUS_GET_RESOURCE_LIST(bus, child);
565 rv += resource_list_print_type(rl, "mem", SYS_RES_MEMORY,
567 rv += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd");
570 rv += bus_print_child_footer(bus, child);
575 static struct resource *
576 arm_gic_alloc_resource(device_t bus, device_t child, int type, int *rid,
577 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
579 struct arm_gic_softc *sc;
580 struct resource_list_entry *rle;
581 struct resource_list *rl;
584 KASSERT(type == SYS_RES_MEMORY, ("Invalid resoure type %x", type));
586 sc = device_get_softc(bus);
589 * Request for the default allocation with a given rid: use resource
590 * list stored in the local device info.
592 if (RMAN_IS_DEFAULT_RANGE(start, end)) {
593 rl = BUS_GET_RESOURCE_LIST(bus, child);
595 if (type == SYS_RES_IOPORT)
596 type = SYS_RES_MEMORY;
598 rle = resource_list_find(rl, type, *rid);
601 device_printf(bus, "no default resources for "
602 "rid = %d, type = %d\n", *rid, type);
610 /* Remap through ranges property */
611 for (j = 0; j < sc->nranges; j++) {
612 if (start >= sc->ranges[j].bus && end <
613 sc->ranges[j].bus + sc->ranges[j].size) {
614 start -= sc->ranges[j].bus;
615 start += sc->ranges[j].host;
616 end -= sc->ranges[j].bus;
617 end += sc->ranges[j].host;
621 if (j == sc->nranges && sc->nranges != 0) {
623 device_printf(bus, "Could not map resource "
624 "%#jx-%#jx\n", (uintmax_t)start, (uintmax_t)end);
629 return (bus_generic_alloc_resource(bus, child, type, rid, start, end,
634 arm_gic_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
636 struct arm_gic_softc *sc;
638 sc = device_get_softc(dev);
641 case GIC_IVAR_HW_REV:
642 KASSERT(GICD_IIDR_VAR(sc->gic_iidr) < 3 &&
643 GICD_IIDR_VAR(sc->gic_iidr) != 0,
644 ("arm_gic_read_ivar: Unknown IIDR revision %u (%.08x)",
645 GICD_IIDR_VAR(sc->gic_iidr), sc->gic_iidr));
646 *result = GICD_IIDR_VAR(sc->gic_iidr);
649 KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN,
650 ("arm_gic_read_ivar: Unknown bus type"));
651 KASSERT(sc->gic_bus <= GIC_BUS_MAX,
652 ("arm_gic_read_ivar: Invalid bus type %u", sc->gic_bus));
653 *result = sc->gic_bus;
661 arm_gic_intr(void *arg)
663 struct arm_gic_softc *sc = arg;
664 struct gic_irqsrc *gi;
665 uint32_t irq_active_reg, irq;
666 struct trapframe *tf;
668 irq_active_reg = gic_c_read_4(sc, GICC_IAR);
669 irq = irq_active_reg & 0x3FF;
672 * 1. We do EOI here because recent read value from active interrupt
673 * register must be used for it. Another approach is to save this
674 * value into associated interrupt source.
675 * 2. EOI must be done on same CPU where interrupt has fired. Thus
676 * we must ensure that interrupted thread does not migrate to
678 * 3. EOI cannot be delayed by any preemption which could happen on
679 * critical_exit() used in MI intr code, when interrupt thread is
680 * scheduled. See next point.
681 * 4. IPI_RENDEZVOUS assumes that no preemption is permitted during
682 * an action and any use of critical_exit() could break this
683 * assumption. See comments within smp_rendezvous_action().
684 * 5. We always return FILTER_HANDLED as this is an interrupt
685 * controller dispatch function. Otherwise, in cascaded interrupt
686 * case, the whole interrupt subtree would be masked.
689 if (irq >= sc->nirqs) {
690 if (gic_debug_spurious)
691 device_printf(sc->gic_dev,
692 "Spurious interrupt detected: last irq: %d on CPU%d\n",
693 sc->last_irq[PCPU_GET(cpuid)], PCPU_GET(cpuid));
694 return (FILTER_HANDLED);
697 tf = curthread->td_intr_frame;
699 gi = sc->gic_irqs + irq;
701 * Note that GIC_FIRST_SGI is zero and is not used in 'if' statement
702 * as compiler complains that comparing u_int >= 0 is always true.
704 if (irq <= GIC_LAST_SGI) {
706 /* Call EOI for all IPI before dispatch. */
707 gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
708 intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf);
711 device_printf(sc->gic_dev, "SGI %u on UP system detected\n",
712 irq - GIC_FIRST_SGI);
713 gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
718 if (gic_debug_spurious)
719 sc->last_irq[PCPU_GET(cpuid)] = irq;
720 if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI)
721 gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
723 if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
724 gic_irq_mask(sc, irq);
725 if ((gi->gi_flags & GI_FLAG_EARLY_EOI) != GI_FLAG_EARLY_EOI)
726 gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
727 device_printf(sc->gic_dev, "Stray irq %u disabled\n", irq);
731 arm_irq_memory_barrier(irq);
732 irq_active_reg = gic_c_read_4(sc, GICC_IAR);
733 irq = irq_active_reg & 0x3FF;
737 return (FILTER_HANDLED);
741 gic_config(struct arm_gic_softc *sc, u_int irq, enum intr_trigger trig,
742 enum intr_polarity pol)
747 if (irq < GIC_FIRST_SPI)
750 mtx_lock_spin(&sc->mutex);
752 reg = gic_d_read_4(sc, GICD_ICFGR(irq));
753 mask = (reg >> 2*(irq % 16)) & 0x3;
755 if (pol == INTR_POLARITY_LOW) {
756 mask &= ~GICD_ICFGR_POL_MASK;
757 mask |= GICD_ICFGR_POL_LOW;
758 } else if (pol == INTR_POLARITY_HIGH) {
759 mask &= ~GICD_ICFGR_POL_MASK;
760 mask |= GICD_ICFGR_POL_HIGH;
763 if (trig == INTR_TRIGGER_LEVEL) {
764 mask &= ~GICD_ICFGR_TRIG_MASK;
765 mask |= GICD_ICFGR_TRIG_LVL;
766 } else if (trig == INTR_TRIGGER_EDGE) {
767 mask &= ~GICD_ICFGR_TRIG_MASK;
768 mask |= GICD_ICFGR_TRIG_EDGE;
772 reg = reg & ~(0x3 << 2*(irq % 16));
773 reg = reg | (mask << 2*(irq % 16));
774 gic_d_write_4(sc, GICD_ICFGR(irq), reg);
776 mtx_unlock_spin(&sc->mutex);
780 gic_bind(struct arm_gic_softc *sc, u_int irq, cpuset_t *cpus)
782 uint32_t cpu, end, mask;
784 end = min(mp_ncpus, 8);
785 for (cpu = end; cpu < MAXCPU; cpu++)
786 if (CPU_ISSET(cpu, cpus))
789 for (mask = 0, cpu = 0; cpu < end; cpu++)
790 if (CPU_ISSET(cpu, cpus))
791 mask |= arm_gic_map[cpu];
793 gic_d_write_1(sc, GICD_ITARGETSR(0) + irq, mask);
799 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
800 enum intr_polarity *polp, enum intr_trigger *trigp)
805 *polp = INTR_POLARITY_CONFORM;
806 *trigp = INTR_TRIGGER_CONFORM;
813 * The 1st cell is the interrupt type:
816 * The 2nd cell contains the interrupt number:
819 * The 3rd cell is the flags, encoded as follows:
820 * bits[3:0] trigger type and level flags
821 * 1 = low-to-high edge triggered
822 * 2 = high-to-low edge triggered
823 * 4 = active high level-sensitive
824 * 8 = active low level-sensitive
825 * bits[15:8] PPI interrupt cpu mask
826 * Each bit corresponds to each of the 8 possible cpus
827 * attached to the GIC. A bit set to '1' indicated
828 * the interrupt is wired to that CPU.
832 irq = GIC_FIRST_SPI + cells[1];
833 /* SPI irq is checked later. */
836 irq = GIC_FIRST_PPI + cells[1];
837 if (irq > GIC_LAST_PPI) {
838 device_printf(dev, "unsupported PPI interrupt "
839 "number %u\n", cells[1]);
844 device_printf(dev, "unsupported interrupt type "
845 "configuration %u\n", cells[0]);
849 tripol = cells[2] & 0xff;
850 if (tripol & 0xf0 || (tripol & FDT_INTR_LOW_MASK &&
852 device_printf(dev, "unsupported trigger/polarity "
853 "configuration 0x%02x\n", tripol);
856 *polp = INTR_POLARITY_CONFORM;
857 *trigp = tripol & FDT_INTR_EDGE_MASK ?
858 INTR_TRIGGER_EDGE : INTR_TRIGGER_LEVEL;
866 gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
867 enum intr_polarity *polp, enum intr_trigger *trigp)
869 struct gic_irqsrc *gi;
871 /* Map a non-GICv2m MSI */
872 gi = (struct gic_irqsrc *)msi_data->isrc;
878 /* MSI/MSI-X interrupts are always edge triggered with high polarity */
879 *polp = INTR_POLARITY_HIGH;
880 *trigp = INTR_TRIGGER_EDGE;
886 gic_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
887 enum intr_polarity *polp, enum intr_trigger *trigp)
890 enum intr_polarity pol;
891 enum intr_trigger trig;
892 struct arm_gic_softc *sc;
893 struct intr_map_data_msi *dam;
895 struct intr_map_data_fdt *daf;
898 struct intr_map_data_acpi *daa;
901 sc = device_get_softc(dev);
902 switch (data->type) {
904 case INTR_MAP_DATA_FDT:
905 daf = (struct intr_map_data_fdt *)data;
906 if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
909 KASSERT(irq >= sc->nirqs ||
910 (sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) == 0,
911 ("%s: Attempting to map a MSI interrupt from FDT",
916 case INTR_MAP_DATA_ACPI:
917 daa = (struct intr_map_data_acpi *)data;
923 case INTR_MAP_DATA_MSI:
925 dam = (struct intr_map_data_msi *)data;
926 if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
933 if (irq >= sc->nirqs)
935 if (pol != INTR_POLARITY_CONFORM && pol != INTR_POLARITY_LOW &&
936 pol != INTR_POLARITY_HIGH)
938 if (trig != INTR_TRIGGER_CONFORM && trig != INTR_TRIGGER_EDGE &&
939 trig != INTR_TRIGGER_LEVEL)
951 arm_gic_map_intr(device_t dev, struct intr_map_data *data,
952 struct intr_irqsrc **isrcp)
956 struct arm_gic_softc *sc;
958 error = gic_map_intr(dev, data, &irq, NULL, NULL);
960 sc = device_get_softc(dev);
961 *isrcp = GIC_INTR_ISRC(sc, irq);
967 arm_gic_setup_intr(device_t dev, struct intr_irqsrc *isrc,
968 struct resource *res, struct intr_map_data *data)
970 struct arm_gic_softc *sc = device_get_softc(dev);
971 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
972 enum intr_trigger trig;
973 enum intr_polarity pol;
975 if ((gi->gi_flags & GI_FLAG_MSI) == GI_FLAG_MSI) {
979 KASSERT(pol == INTR_POLARITY_HIGH,
980 ("%s: MSI interrupts must be active-high", __func__));
981 KASSERT(trig == INTR_TRIGGER_EDGE,
982 ("%s: MSI interrupts must be edge triggered", __func__));
983 } else if (data != NULL) {
986 /* Get config for resource. */
987 if (gic_map_intr(dev, data, &irq, &pol, &trig) ||
991 pol = INTR_POLARITY_CONFORM;
992 trig = INTR_TRIGGER_CONFORM;
995 /* Compare config if this is not first setup. */
996 if (isrc->isrc_handlers != 0) {
997 if ((pol != INTR_POLARITY_CONFORM && pol != gi->gi_pol) ||
998 (trig != INTR_TRIGGER_CONFORM && trig != gi->gi_trig))
1004 /* For MSI/MSI-X we should have already configured these */
1005 if ((gi->gi_flags & GI_FLAG_MSI) == 0) {
1006 if (pol == INTR_POLARITY_CONFORM)
1007 pol = INTR_POLARITY_LOW; /* just pick some */
1008 if (trig == INTR_TRIGGER_CONFORM)
1009 trig = INTR_TRIGGER_EDGE; /* just pick some */
1014 /* Edge triggered interrupts need an early EOI sent */
1015 if (gi->gi_trig == INTR_TRIGGER_EDGE)
1016 gi->gi_flags |= GI_FLAG_EARLY_EOI;
1020 * XXX - In case that per CPU interrupt is going to be enabled in time
1021 * when SMP is already started, we need some IPI call which
1022 * enables it on others CPUs. Further, it's more complicated as
1023 * pic_enable_source() and pic_disable_source() should act on
1024 * per CPU basis only. Thus, it should be solved here somehow.
1026 if (isrc->isrc_flags & INTR_ISRCF_PPI)
1027 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
1029 gic_config(sc, gi->gi_irq, gi->gi_trig, gi->gi_pol);
1030 arm_gic_bind_intr(dev, isrc);
1035 arm_gic_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
1036 struct resource *res, struct intr_map_data *data)
1038 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1040 if (isrc->isrc_handlers == 0 && (gi->gi_flags & GI_FLAG_MSI) == 0) {
1041 gi->gi_pol = INTR_POLARITY_CONFORM;
1042 gi->gi_trig = INTR_TRIGGER_CONFORM;
1048 arm_gic_enable_intr(device_t dev, struct intr_irqsrc *isrc)
1050 struct arm_gic_softc *sc = device_get_softc(dev);
1051 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1053 arm_irq_memory_barrier(gi->gi_irq);
1054 gic_irq_unmask(sc, gi->gi_irq);
1058 arm_gic_disable_intr(device_t dev, struct intr_irqsrc *isrc)
1060 struct arm_gic_softc *sc = device_get_softc(dev);
1061 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1063 gic_irq_mask(sc, gi->gi_irq);
1067 arm_gic_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
1069 struct arm_gic_softc *sc = device_get_softc(dev);
1070 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1072 arm_gic_disable_intr(dev, isrc);
1073 gic_c_write_4(sc, GICC_EOIR, gi->gi_irq);
1077 arm_gic_post_ithread(device_t dev, struct intr_irqsrc *isrc)
1080 arm_irq_memory_barrier(0);
1081 arm_gic_enable_intr(dev, isrc);
1085 arm_gic_post_filter(device_t dev, struct intr_irqsrc *isrc)
1087 struct arm_gic_softc *sc = device_get_softc(dev);
1088 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1090 /* EOI for edge-triggered done earlier. */
1091 if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI)
1094 arm_irq_memory_barrier(0);
1095 gic_c_write_4(sc, GICC_EOIR, gi->gi_irq);
1099 arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc)
1101 struct arm_gic_softc *sc = device_get_softc(dev);
1102 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1104 if (gi->gi_irq < GIC_FIRST_SPI)
1107 if (CPU_EMPTY(&isrc->isrc_cpu)) {
1108 gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
1109 CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
1111 return (gic_bind(sc, gi->gi_irq, &isrc->isrc_cpu));
1116 arm_gic_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
1119 struct arm_gic_softc *sc = device_get_softc(dev);
1120 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1121 uint32_t val = 0, i;
1123 for (i = 0; i < MAXCPU; i++)
1124 if (CPU_ISSET(i, &cpus))
1125 val |= arm_gic_map[i] << GICD_SGI_TARGET_SHIFT;
1127 gic_d_write_4(sc, GICD_SGIR, val | gi->gi_irq);
1131 arm_gic_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
1133 struct intr_irqsrc *isrc;
1134 struct arm_gic_softc *sc = device_get_softc(dev);
1136 if (sgi_first_unused > GIC_LAST_SGI)
1139 isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
1140 sgi_to_ipi[sgi_first_unused++] = ipi;
1142 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
1150 arm_gic_next_irq(struct arm_gic_softc *sc, int last_irq)
1152 uint32_t active_irq;
1154 active_irq = gic_c_read_4(sc, GICC_IAR);
1157 * Immediately EOIR the SGIs, because doing so requires the other
1158 * bits (ie CPU number), not just the IRQ number, and we do not
1159 * have this information later.
1161 if ((active_irq & 0x3ff) <= GIC_LAST_SGI)
1162 gic_c_write_4(sc, GICC_EOIR, active_irq);
1163 active_irq &= 0x3FF;
1165 if (active_irq == 0x3FF) {
1167 device_printf(sc->gic_dev,
1168 "Spurious interrupt detected\n");
1176 arm_gic_config(device_t dev, int irq, enum intr_trigger trig,
1177 enum intr_polarity pol)
1179 struct arm_gic_softc *sc = device_get_softc(dev);
1183 /* Function is public-accessible, so validate input arguments */
1184 if ((irq < 0) || (irq >= sc->nirqs))
1186 if ((trig != INTR_TRIGGER_EDGE) && (trig != INTR_TRIGGER_LEVEL) &&
1187 (trig != INTR_TRIGGER_CONFORM))
1189 if ((pol != INTR_POLARITY_HIGH) && (pol != INTR_POLARITY_LOW) &&
1190 (pol != INTR_POLARITY_CONFORM))
1193 mtx_lock_spin(&sc->mutex);
1195 reg = gic_d_read_4(sc, GICD_ICFGR(irq));
1196 mask = (reg >> 2*(irq % 16)) & 0x3;
1198 if (pol == INTR_POLARITY_LOW) {
1199 mask &= ~GICD_ICFGR_POL_MASK;
1200 mask |= GICD_ICFGR_POL_LOW;
1201 } else if (pol == INTR_POLARITY_HIGH) {
1202 mask &= ~GICD_ICFGR_POL_MASK;
1203 mask |= GICD_ICFGR_POL_HIGH;
1206 if (trig == INTR_TRIGGER_LEVEL) {
1207 mask &= ~GICD_ICFGR_TRIG_MASK;
1208 mask |= GICD_ICFGR_TRIG_LVL;
1209 } else if (trig == INTR_TRIGGER_EDGE) {
1210 mask &= ~GICD_ICFGR_TRIG_MASK;
1211 mask |= GICD_ICFGR_TRIG_EDGE;
1215 reg = reg & ~(0x3 << 2*(irq % 16));
1216 reg = reg | (mask << 2*(irq % 16));
1217 gic_d_write_4(sc, GICD_ICFGR(irq), reg);
1219 mtx_unlock_spin(&sc->mutex);
1224 device_printf(dev, "gic_config_irg, invalid parameters\n");
1230 arm_gic_mask(device_t dev, int irq)
1232 struct arm_gic_softc *sc = device_get_softc(dev);
1234 gic_d_write_4(sc, GICD_ICENABLER(irq), (1UL << (irq & 0x1F)));
1235 gic_c_write_4(sc, GICC_EOIR, irq); /* XXX - not allowed */
1239 arm_gic_unmask(device_t dev, int irq)
1241 struct arm_gic_softc *sc = device_get_softc(dev);
1243 if (irq > GIC_LAST_SGI)
1244 arm_irq_memory_barrier(irq);
1246 gic_d_write_4(sc, GICD_ISENABLER(irq), (1UL << (irq & 0x1F)));
1251 arm_gic_ipi_send(device_t dev, cpuset_t cpus, u_int ipi)
1253 struct arm_gic_softc *sc = device_get_softc(dev);
1254 uint32_t val = 0, i;
1256 for (i = 0; i < MAXCPU; i++)
1257 if (CPU_ISSET(i, &cpus))
1258 val |= arm_gic_map[i] << GICD_SGI_TARGET_SHIFT;
1260 gic_d_write_4(sc, GICD_SGIR, val | ipi);
1264 arm_gic_ipi_read(device_t dev, int i)
1269 * The intr code will automagically give the frame pointer
1270 * if the interrupt argument is 0.
1272 if ((unsigned int)i > 16)
1281 arm_gic_ipi_clear(device_t dev, int ipi)
1288 gic_post_filter(void *arg)
1290 struct arm_gic_softc *sc = gic_sc;
1291 uintptr_t irq = (uintptr_t) arg;
1293 if (irq > GIC_LAST_SGI)
1294 arm_irq_memory_barrier(irq);
1295 gic_c_write_4(sc, GICC_EOIR, irq);
1299 gic_config_irq(int irq, enum intr_trigger trig, enum intr_polarity pol)
1302 return (arm_gic_config(gic_sc->gic_dev, irq, trig, pol));
1306 arm_mask_irq(uintptr_t nb)
1309 arm_gic_mask(gic_sc->gic_dev, nb);
1313 arm_unmask_irq(uintptr_t nb)
1316 arm_gic_unmask(gic_sc->gic_dev, nb);
1320 arm_get_next_irq(int last_irq)
1323 return (arm_gic_next_irq(gic_sc, last_irq));
1328 intr_pic_init_secondary(void)
1331 arm_gic_init_secondary(gic_sc->gic_dev);
1335 pic_ipi_send(cpuset_t cpus, u_int ipi)
1338 arm_gic_ipi_send(gic_sc->gic_dev, cpus, ipi);
1345 return (arm_gic_ipi_read(gic_sc->gic_dev, i));
1349 pic_ipi_clear(int ipi)
1352 arm_gic_ipi_clear(gic_sc->gic_dev, ipi);
1357 static device_method_t arm_gic_methods[] = {
1360 DEVMETHOD(bus_print_child, arm_gic_print_child),
1361 DEVMETHOD(bus_add_child, bus_generic_add_child),
1362 DEVMETHOD(bus_alloc_resource, arm_gic_alloc_resource),
1363 DEVMETHOD(bus_release_resource, bus_generic_release_resource),
1364 DEVMETHOD(bus_activate_resource,bus_generic_activate_resource),
1365 DEVMETHOD(bus_read_ivar, arm_gic_read_ivar),
1367 /* Interrupt controller interface */
1368 DEVMETHOD(pic_disable_intr, arm_gic_disable_intr),
1369 DEVMETHOD(pic_enable_intr, arm_gic_enable_intr),
1370 DEVMETHOD(pic_map_intr, arm_gic_map_intr),
1371 DEVMETHOD(pic_setup_intr, arm_gic_setup_intr),
1372 DEVMETHOD(pic_teardown_intr, arm_gic_teardown_intr),
1373 DEVMETHOD(pic_post_filter, arm_gic_post_filter),
1374 DEVMETHOD(pic_post_ithread, arm_gic_post_ithread),
1375 DEVMETHOD(pic_pre_ithread, arm_gic_pre_ithread),
1377 DEVMETHOD(pic_bind_intr, arm_gic_bind_intr),
1378 DEVMETHOD(pic_init_secondary, arm_gic_init_secondary),
1379 DEVMETHOD(pic_ipi_send, arm_gic_ipi_send),
1380 DEVMETHOD(pic_ipi_setup, arm_gic_ipi_setup),
1386 DEFINE_CLASS_0(gic, arm_gic_driver, arm_gic_methods,
1387 sizeof(struct arm_gic_softc));
1391 * GICv2m support -- the GICv2 MSI/MSI-X controller.
1394 #define GICV2M_MSI_TYPER 0x008
1395 #define MSI_TYPER_SPI_BASE(x) (((x) >> 16) & 0x3ff)
1396 #define MSI_TYPER_SPI_COUNT(x) (((x) >> 0) & 0x3ff)
1397 #define GICv2M_MSI_SETSPI_NS 0x040
1398 #define GICV2M_MSI_IIDR 0xFCC
1401 arm_gicv2m_attach(device_t dev)
1403 struct arm_gicv2m_softc *sc;
1407 sc = device_get_softc(dev);
1410 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1412 if (sc->sc_mem == NULL) {
1413 device_printf(dev, "Unable to allocate resources\n");
1417 typer = bus_read_4(sc->sc_mem, GICV2M_MSI_TYPER);
1418 sc->sc_spi_start = MSI_TYPER_SPI_BASE(typer);
1419 sc->sc_spi_count = MSI_TYPER_SPI_COUNT(typer);
1420 sc->sc_spi_end = sc->sc_spi_start + sc->sc_spi_count;
1422 /* Reserve these interrupts for MSI/MSI-X use */
1423 arm_gic_reserve_msi_range(device_get_parent(dev), sc->sc_spi_start,
1426 mtx_init(&sc->sc_mutex, "GICv2m lock", NULL, MTX_DEF);
1428 intr_msi_register(dev, sc->sc_xref);
1431 device_printf(dev, "using spi %u to %u\n", sc->sc_spi_start,
1432 sc->sc_spi_start + sc->sc_spi_count - 1);
1438 arm_gicv2m_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1439 device_t *pic, struct intr_irqsrc **srcs)
1441 struct arm_gic_softc *psc;
1442 struct arm_gicv2m_softc *sc;
1443 int i, irq, end_irq;
1446 KASSERT(powerof2(count), ("%s: bad count", __func__));
1447 KASSERT(powerof2(maxcount), ("%s: bad maxcount", __func__));
1449 psc = device_get_softc(device_get_parent(dev));
1450 sc = device_get_softc(dev);
1452 mtx_lock(&sc->sc_mutex);
1455 for (irq = sc->sc_spi_start; irq < sc->sc_spi_end; irq++) {
1456 /* Start on an aligned interrupt */
1457 if ((irq & (maxcount - 1)) != 0)
1460 /* Assume we found a valid range until shown otherwise */
1463 /* Check this range is valid */
1464 for (end_irq = irq; end_irq != irq + count; end_irq++) {
1465 /* No free interrupts */
1466 if (end_irq == sc->sc_spi_end) {
1471 KASSERT((psc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI)!= 0,
1472 ("%s: Non-MSI interrupt found", __func__));
1474 /* This is already used */
1475 if ((psc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI_USED) ==
1485 /* Not enough interrupts were found */
1486 if (!found || irq == sc->sc_spi_end) {
1487 mtx_unlock(&sc->sc_mutex);
1491 for (i = 0; i < count; i++) {
1492 /* Mark the interrupt as used */
1493 psc->gic_irqs[irq + i].gi_flags |= GI_FLAG_MSI_USED;
1496 mtx_unlock(&sc->sc_mutex);
1498 for (i = 0; i < count; i++)
1499 srcs[i] = (struct intr_irqsrc *)&psc->gic_irqs[irq + i];
1500 *pic = device_get_parent(dev);
1506 arm_gicv2m_release_msi(device_t dev, device_t child, int count,
1507 struct intr_irqsrc **isrc)
1509 struct arm_gicv2m_softc *sc;
1510 struct gic_irqsrc *gi;
1513 sc = device_get_softc(dev);
1515 mtx_lock(&sc->sc_mutex);
1516 for (i = 0; i < count; i++) {
1517 gi = (struct gic_irqsrc *)isrc[i];
1519 KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1520 ("%s: Trying to release an unused MSI-X interrupt",
1523 gi->gi_flags &= ~GI_FLAG_MSI_USED;
1525 mtx_unlock(&sc->sc_mutex);
1531 arm_gicv2m_alloc_msix(device_t dev, device_t child, device_t *pic,
1532 struct intr_irqsrc **isrcp)
1534 struct arm_gicv2m_softc *sc;
1535 struct arm_gic_softc *psc;
1538 psc = device_get_softc(device_get_parent(dev));
1539 sc = device_get_softc(dev);
1541 mtx_lock(&sc->sc_mutex);
1542 /* Find an unused interrupt */
1543 for (irq = sc->sc_spi_start; irq < sc->sc_spi_end; irq++) {
1544 KASSERT((psc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) != 0,
1545 ("%s: Non-MSI interrupt found", __func__));
1546 if ((psc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) == 0)
1549 /* No free interrupt was found */
1550 if (irq == sc->sc_spi_end) {
1551 mtx_unlock(&sc->sc_mutex);
1555 /* Mark the interrupt as used */
1556 psc->gic_irqs[irq].gi_flags |= GI_FLAG_MSI_USED;
1557 mtx_unlock(&sc->sc_mutex);
1559 *isrcp = (struct intr_irqsrc *)&psc->gic_irqs[irq];
1560 *pic = device_get_parent(dev);
1566 arm_gicv2m_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1568 struct arm_gicv2m_softc *sc;
1569 struct gic_irqsrc *gi;
1571 sc = device_get_softc(dev);
1572 gi = (struct gic_irqsrc *)isrc;
1574 KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1575 ("%s: Trying to release an unused MSI-X interrupt", __func__));
1577 mtx_lock(&sc->sc_mutex);
1578 gi->gi_flags &= ~GI_FLAG_MSI_USED;
1579 mtx_unlock(&sc->sc_mutex);
1585 arm_gicv2m_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1586 uint64_t *addr, uint32_t *data)
1588 struct arm_gicv2m_softc *sc = device_get_softc(dev);
1589 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1591 *addr = vtophys(rman_get_virtual(sc->sc_mem)) + GICv2M_MSI_SETSPI_NS;
1597 static device_method_t arm_gicv2m_methods[] = {
1598 /* Device interface */
1599 DEVMETHOD(device_attach, arm_gicv2m_attach),
1602 DEVMETHOD(msi_alloc_msi, arm_gicv2m_alloc_msi),
1603 DEVMETHOD(msi_release_msi, arm_gicv2m_release_msi),
1604 DEVMETHOD(msi_alloc_msix, arm_gicv2m_alloc_msix),
1605 DEVMETHOD(msi_release_msix, arm_gicv2m_release_msix),
1606 DEVMETHOD(msi_map_msi, arm_gicv2m_map_msi),
1612 DEFINE_CLASS_0(gicv2m, arm_gicv2m_driver, arm_gicv2m_methods,
1613 sizeof(struct arm_gicv2m_softc));