2 * Copyright (c) 2011 The FreeBSD Foundation
5 * Developed by Damjan Marion <damjan.marion@gmail.com>
7 * Based on OMAP4 GIC code by Ben Gray
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. The name of the company nor the name of the author may be used to
18 * endorse or promote products derived from this software without specific
19 * prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include "opt_platform.h"
39 #include <sys/param.h>
40 #include <sys/systm.h>
42 #include <sys/kernel.h>
44 #include <sys/module.h>
45 #include <sys/malloc.h>
49 #include <sys/cpuset.h>
51 #include <sys/mutex.h>
54 #include <sys/sched.h>
60 #include <machine/bus.h>
61 #include <machine/intr.h>
62 #include <machine/smp.h>
64 #include <dev/fdt/fdt_intr.h>
65 #include <dev/ofw/ofw_bus_subr.h>
67 #include <arm/arm/gic.h>
74 /* We are using GICv2 register naming */
76 /* Distributor Registers */
77 #define GICD_CTLR 0x000 /* v1 ICDDCR */
78 #define GICD_TYPER 0x004 /* v1 ICDICTR */
79 #define GICD_IIDR 0x008 /* v1 ICDIIDR */
80 #define GICD_IGROUPR(n) (0x0080 + ((n) * 4)) /* v1 ICDISER */
81 #define GICD_ISENABLER(n) (0x0100 + ((n) * 4)) /* v1 ICDISER */
82 #define GICD_ICENABLER(n) (0x0180 + ((n) * 4)) /* v1 ICDICER */
83 #define GICD_ISPENDR(n) (0x0200 + ((n) * 4)) /* v1 ICDISPR */
84 #define GICD_ICPENDR(n) (0x0280 + ((n) * 4)) /* v1 ICDICPR */
85 #define GICD_ICACTIVER(n) (0x0380 + ((n) * 4)) /* v1 ICDABR */
86 #define GICD_IPRIORITYR(n) (0x0400 + ((n) * 4)) /* v1 ICDIPR */
87 #define GICD_ITARGETSR(n) (0x0800 + ((n) * 4)) /* v1 ICDIPTR */
88 #define GICD_ICFGR(n) (0x0C00 + ((n) * 4)) /* v1 ICDICFR */
89 #define GICD_SGIR(n) (0x0F00 + ((n) * 4)) /* v1 ICDSGIR */
90 #define GICD_SGI_TARGET_SHIFT 16
93 #define GICC_CTLR 0x0000 /* v1 ICCICR */
94 #define GICC_PMR 0x0004 /* v1 ICCPMR */
95 #define GICC_BPR 0x0008 /* v1 ICCBPR */
96 #define GICC_IAR 0x000C /* v1 ICCIAR */
97 #define GICC_EOIR 0x0010 /* v1 ICCEOIR */
98 #define GICC_RPR 0x0014 /* v1 ICCRPR */
99 #define GICC_HPPIR 0x0018 /* v1 ICCHPIR */
100 #define GICC_ABPR 0x001C /* v1 ICCABPR */
101 #define GICC_IIDR 0x00FC /* v1 ICCIIDR*/
103 /* TYPER Registers */
104 #define GICD_TYPER_SECURITYEXT 0x400
105 #define GIC_SUPPORT_SECEXT(_sc) \
106 ((_sc->typer & GICD_TYPER_SECURITYEXT) == GICD_TYPER_SECURITYEXT)
108 /* First bit is a polarity bit (0 - low, 1 - high) */
109 #define GICD_ICFGR_POL_LOW (0 << 0)
110 #define GICD_ICFGR_POL_HIGH (1 << 0)
111 #define GICD_ICFGR_POL_MASK 0x1
112 /* Second bit is a trigger bit (0 - level, 1 - edge) */
113 #define GICD_ICFGR_TRIG_LVL (0 << 1)
114 #define GICD_ICFGR_TRIG_EDGE (1 << 1)
115 #define GICD_ICFGR_TRIG_MASK 0x2
117 #ifndef GIC_DEFAULT_ICFGR_INIT
118 #define GIC_DEFAULT_ICFGR_INIT 0x00000000
123 struct intr_irqsrc gi_isrc;
125 enum intr_polarity gi_pol;
126 enum intr_trigger gi_trig;
127 #define GI_FLAG_EARLY_EOI (1 << 0)
128 #define GI_FLAG_MSI (1 << 1) /* This interrupt source should only */
129 /* be used for MSI/MSI-X interrupts */
130 #define GI_FLAG_MSI_USED (1 << 2) /* This irq is already allocated */
131 /* for a MSI/MSI-X interrupt */
135 static u_int gic_irq_cpu;
136 static int arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc);
139 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
140 static u_int sgi_first_unused = GIC_FIRST_SGI;
143 #define GIC_INTR_ISRC(sc, irq) (&sc->gic_irqs[irq].gi_isrc)
145 static struct ofw_compat_data compat_data[] = {
146 {"arm,gic", true}, /* Non-standard, used in FreeBSD dts. */
147 {"arm,gic-400", true},
148 {"arm,cortex-a15-gic", true},
149 {"arm,cortex-a9-gic", true},
150 {"arm,cortex-a7-gic", true},
151 {"arm,arm11mp-gic", true},
152 {"brcm,brahma-b15-gic", true},
153 {"qcom,msm-qgic2", true},
158 static struct resource_spec arm_gic_spec[] = {
159 { SYS_RES_MEMORY, 0, RF_ACTIVE }, /* Distributor registers */
160 { SYS_RES_MEMORY, 1, RF_ACTIVE }, /* CPU Interrupt Intf. registers */
162 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_OPTIONAL }, /* Parent interrupt */
167 static u_int arm_gic_map[MAXCPU];
169 static struct arm_gic_softc *gic_sc = NULL;
171 #define gic_c_read_4(_sc, _reg) \
172 bus_space_read_4((_sc)->gic_c_bst, (_sc)->gic_c_bsh, (_reg))
173 #define gic_c_write_4(_sc, _reg, _val) \
174 bus_space_write_4((_sc)->gic_c_bst, (_sc)->gic_c_bsh, (_reg), (_val))
175 #define gic_d_read_4(_sc, _reg) \
176 bus_space_read_4((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg))
177 #define gic_d_write_1(_sc, _reg, _val) \
178 bus_space_write_1((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg), (_val))
179 #define gic_d_write_4(_sc, _reg, _val) \
180 bus_space_write_4((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg), (_val))
183 static int gic_config_irq(int irq, enum intr_trigger trig,
184 enum intr_polarity pol);
185 static void gic_post_filter(void *);
190 gic_irq_unmask(struct arm_gic_softc *sc, u_int irq)
193 gic_d_write_4(sc, GICD_ISENABLER(irq >> 5), (1UL << (irq & 0x1F)));
197 gic_irq_mask(struct arm_gic_softc *sc, u_int irq)
200 gic_d_write_4(sc, GICD_ICENABLER(irq >> 5), (1UL << (irq & 0x1F)));
205 gic_cpu_mask(struct arm_gic_softc *sc)
210 /* Read the current cpuid mask by reading ITARGETSR{0..7} */
211 for (i = 0; i < 8; i++) {
212 mask = gic_d_read_4(sc, GICD_ITARGETSR(i));
216 /* No mask found, assume we are on CPU interface 0 */
220 /* Collect the mask in the lower byte */
230 arm_gic_init_secondary(device_t dev)
232 struct arm_gic_softc *sc = device_get_softc(dev);
235 /* Set the mask so we can find this CPU to send it IPIs */
236 cpu = PCPU_GET(cpuid);
237 arm_gic_map[cpu] = gic_cpu_mask(sc);
239 for (irq = 0; irq < sc->nirqs; irq += 4)
240 gic_d_write_4(sc, GICD_IPRIORITYR(irq >> 2), 0);
242 /* Set all the interrupts to be in Group 0 (secure) */
243 for (irq = 0; GIC_SUPPORT_SECEXT(sc) && irq < sc->nirqs; irq += 32) {
244 gic_d_write_4(sc, GICD_IGROUPR(irq >> 5), 0);
247 /* Enable CPU interface */
248 gic_c_write_4(sc, GICC_CTLR, 1);
250 /* Set priority mask register. */
251 gic_c_write_4(sc, GICC_PMR, 0xff);
253 /* Enable interrupt distribution */
254 gic_d_write_4(sc, GICD_CTLR, 0x01);
256 /* Unmask attached SGI interrupts. */
257 for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++)
258 if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu))
259 gic_irq_unmask(sc, irq);
261 /* Unmask attached PPI interrupts. */
262 for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++)
263 if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu))
264 gic_irq_unmask(sc, irq);
268 arm_gic_init_secondary(device_t dev)
270 struct arm_gic_softc *sc = device_get_softc(dev);
273 /* Set the mask so we can find this CPU to send it IPIs */
274 arm_gic_map[PCPU_GET(cpuid)] = gic_cpu_mask(sc);
276 for (i = 0; i < sc->nirqs; i += 4)
277 gic_d_write_4(sc, GICD_IPRIORITYR(i >> 2), 0);
279 /* Set all the interrupts to be in Group 0 (secure) */
280 for (i = 0; GIC_SUPPORT_SECEXT(sc) && i < sc->nirqs; i += 32) {
281 gic_d_write_4(sc, GICD_IGROUPR(i >> 5), 0);
284 /* Enable CPU interface */
285 gic_c_write_4(sc, GICC_CTLR, 1);
287 /* Set priority mask register. */
288 gic_c_write_4(sc, GICC_PMR, 0xff);
290 /* Enable interrupt distribution */
291 gic_d_write_4(sc, GICD_CTLR, 0x01);
294 * Activate the timer interrupts: virtual, secure, and non-secure.
296 gic_d_write_4(sc, GICD_ISENABLER(27 >> 5), (1UL << (27 & 0x1F)));
297 gic_d_write_4(sc, GICD_ISENABLER(29 >> 5), (1UL << (29 & 0x1F)));
298 gic_d_write_4(sc, GICD_ISENABLER(30 >> 5), (1UL << (30 & 0x1F)));
305 gic_decode_fdt(phandle_t iparent, pcell_t *intr, int *interrupt,
308 static u_int num_intr_cells;
309 static phandle_t self;
310 struct ofw_compat_data *ocd;
313 for (ocd = compat_data; ocd->ocd_str != NULL; ocd++) {
314 if (ofw_bus_node_is_compatible(iparent, ocd->ocd_str)) {
323 if (num_intr_cells == 0) {
324 if (OF_searchencprop(OF_node_from_xref(iparent),
325 "#interrupt-cells", &num_intr_cells,
326 sizeof(num_intr_cells)) == -1) {
331 if (num_intr_cells == 1) {
332 *interrupt = fdt32_to_cpu(intr[0]);
333 *trig = INTR_TRIGGER_CONFORM;
334 *pol = INTR_POLARITY_CONFORM;
336 if (fdt32_to_cpu(intr[0]) == 0)
337 *interrupt = fdt32_to_cpu(intr[1]) + GIC_FIRST_SPI;
339 *interrupt = fdt32_to_cpu(intr[1]) + GIC_FIRST_PPI;
341 * In intr[2], bits[3:0] are trigger type and level flags.
342 * 1 = low-to-high edge triggered
343 * 2 = high-to-low edge triggered
344 * 4 = active high level-sensitive
345 * 8 = active low level-sensitive
346 * The hardware only supports active-high-level or rising-edge
349 if (*interrupt >= GIC_FIRST_SPI &&
350 fdt32_to_cpu(intr[2]) & 0x0a) {
351 printf("unsupported trigger/polarity configuration "
352 "0x%02x\n", fdt32_to_cpu(intr[2]) & 0x0f);
354 *pol = INTR_POLARITY_CONFORM;
355 if (fdt32_to_cpu(intr[2]) & 0x03)
356 *trig = INTR_TRIGGER_EDGE;
358 *trig = INTR_TRIGGER_LEVEL;
366 arm_gic_register_isrcs(struct arm_gic_softc *sc, uint32_t num)
370 struct gic_irqsrc *irqs;
371 struct intr_irqsrc *isrc;
374 irqs = malloc(num * sizeof(struct gic_irqsrc), M_DEVBUF,
377 name = device_get_nameunit(sc->gic_dev);
378 for (irq = 0; irq < num; irq++) {
379 irqs[irq].gi_irq = irq;
380 irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
381 irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
383 isrc = &irqs[irq].gi_isrc;
384 if (irq <= GIC_LAST_SGI) {
385 error = intr_isrc_register(isrc, sc->gic_dev,
386 INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
387 } else if (irq <= GIC_LAST_PPI) {
388 error = intr_isrc_register(isrc, sc->gic_dev,
389 INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
391 error = intr_isrc_register(isrc, sc->gic_dev, 0,
392 "%s,s%u", name, irq - GIC_FIRST_SPI);
395 /* XXX call intr_isrc_deregister() */
396 free(irqs, M_DEVBUF);
406 arm_gic_reserve_msi_range(device_t dev, u_int start, u_int count)
408 struct arm_gic_softc *sc;
411 sc = device_get_softc(dev);
413 KASSERT((start + count) < sc->nirqs,
414 ("%s: Trying to allocate too many MSI IRQs: %d + %d > %d", __func__,
415 start, count, sc->nirqs));
416 for (i = 0; i < count; i++) {
417 KASSERT(sc->gic_irqs[start + i].gi_isrc.isrc_handlers == 0,
418 ("%s: MSI interrupt %d already has a handler", __func__,
420 KASSERT(sc->gic_irqs[start + i].gi_pol == INTR_POLARITY_CONFORM,
421 ("%s: MSI interrupt %d already has a polarity", __func__,
423 KASSERT(sc->gic_irqs[start + i].gi_trig == INTR_TRIGGER_CONFORM,
424 ("%s: MSI interrupt %d already has a trigger", __func__,
426 sc->gic_irqs[start + i].gi_pol = INTR_POLARITY_HIGH;
427 sc->gic_irqs[start + i].gi_trig = INTR_TRIGGER_EDGE;
428 sc->gic_irqs[start + i].gi_flags |= GI_FLAG_MSI;
434 arm_gic_attach(device_t dev)
436 struct arm_gic_softc *sc;
438 uint32_t icciidr, mask, nirqs;
443 sc = device_get_softc(dev);
445 if (bus_alloc_resources(dev, arm_gic_spec, sc->gic_res)) {
446 device_printf(dev, "could not allocate resources\n");
453 /* Initialize mutex */
454 mtx_init(&sc->mutex, "GIC lock", "", MTX_SPIN);
456 /* Distributor Interface */
457 sc->gic_d_bst = rman_get_bustag(sc->gic_res[0]);
458 sc->gic_d_bsh = rman_get_bushandle(sc->gic_res[0]);
461 sc->gic_c_bst = rman_get_bustag(sc->gic_res[1]);
462 sc->gic_c_bsh = rman_get_bushandle(sc->gic_res[1]);
464 /* Disable interrupt forwarding to the CPU interface */
465 gic_d_write_4(sc, GICD_CTLR, 0x00);
467 /* Get the number of interrupts */
468 sc->typer = gic_d_read_4(sc, GICD_TYPER);
469 nirqs = 32 * ((sc->typer & 0x1f) + 1);
472 if (arm_gic_register_isrcs(sc, nirqs)) {
473 device_printf(dev, "could not register irqs\n");
479 /* Set up function pointers */
480 arm_post_filter = gic_post_filter;
481 arm_config_irq = gic_config_irq;
484 icciidr = gic_c_read_4(sc, GICC_IIDR);
485 device_printf(dev,"pn 0x%x, arch 0x%x, rev 0x%x, implementer 0x%x irqs %u\n",
486 icciidr>>20, (icciidr>>16) & 0xF, (icciidr>>12) & 0xf,
487 (icciidr & 0xfff), sc->nirqs);
489 /* Set all global interrupts to be level triggered, active low. */
490 for (i = 32; i < sc->nirqs; i += 16) {
491 gic_d_write_4(sc, GICD_ICFGR(i >> 4), GIC_DEFAULT_ICFGR_INIT);
494 /* Disable all interrupts. */
495 for (i = 32; i < sc->nirqs; i += 32) {
496 gic_d_write_4(sc, GICD_ICENABLER(i >> 5), 0xFFFFFFFF);
499 /* Find the current cpu mask */
500 mask = gic_cpu_mask(sc);
501 /* Set the mask so we can find this CPU to send it IPIs */
502 arm_gic_map[PCPU_GET(cpuid)] = mask;
503 /* Set all four targets to this cpu */
507 for (i = 0; i < sc->nirqs; i += 4) {
508 gic_d_write_4(sc, GICD_IPRIORITYR(i >> 2), 0);
510 gic_d_write_4(sc, GICD_ITARGETSR(i >> 2), mask);
514 /* Set all the interrupts to be in Group 0 (secure) */
515 for (i = 0; GIC_SUPPORT_SECEXT(sc) && i < sc->nirqs; i += 32) {
516 gic_d_write_4(sc, GICD_IGROUPR(i >> 5), 0);
519 /* Enable CPU interface */
520 gic_c_write_4(sc, GICC_CTLR, 1);
522 /* Set priority mask register. */
523 gic_c_write_4(sc, GICC_PMR, 0xff);
525 /* Enable interrupt distribution */
526 gic_d_write_4(sc, GICD_CTLR, 0x01);
537 arm_gic_detach(device_t dev)
540 struct arm_gic_softc *sc;
542 sc = device_get_softc(dev);
544 if (sc->gic_irqs != NULL)
545 free(sc->gic_irqs, M_DEVBUF);
547 bus_release_resources(dev, arm_gic_spec, sc->gic_res);
555 arm_gic_print_child(device_t bus, device_t child)
557 struct resource_list *rl;
560 rv = bus_print_child_header(bus, child);
562 rl = BUS_GET_RESOURCE_LIST(bus, child);
564 rv += resource_list_print_type(rl, "mem", SYS_RES_MEMORY,
566 rv += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd");
569 rv += bus_print_child_footer(bus, child);
574 static struct resource *
575 arm_gic_alloc_resource(device_t bus, device_t child, int type, int *rid,
576 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
578 struct arm_gic_softc *sc;
579 struct resource_list_entry *rle;
580 struct resource_list *rl;
583 KASSERT(type == SYS_RES_MEMORY, ("Invalid resoure type %x", type));
585 sc = device_get_softc(bus);
588 * Request for the default allocation with a given rid: use resource
589 * list stored in the local device info.
591 if (RMAN_IS_DEFAULT_RANGE(start, end)) {
592 rl = BUS_GET_RESOURCE_LIST(bus, child);
594 if (type == SYS_RES_IOPORT)
595 type = SYS_RES_MEMORY;
597 rle = resource_list_find(rl, type, *rid);
600 device_printf(bus, "no default resources for "
601 "rid = %d, type = %d\n", *rid, type);
609 /* Remap through ranges property */
610 for (j = 0; j < sc->nranges; j++) {
611 if (start >= sc->ranges[j].bus && end <
612 sc->ranges[j].bus + sc->ranges[j].size) {
613 start -= sc->ranges[j].bus;
614 start += sc->ranges[j].host;
615 end -= sc->ranges[j].bus;
616 end += sc->ranges[j].host;
620 if (j == sc->nranges && sc->nranges != 0) {
622 device_printf(bus, "Could not map resource "
623 "%#jx-%#jx\n", (uintmax_t)start, (uintmax_t)end);
628 return (bus_generic_alloc_resource(bus, child, type, rid, start, end,
633 arm_gic_intr(void *arg)
635 struct arm_gic_softc *sc = arg;
636 struct gic_irqsrc *gi;
637 uint32_t irq_active_reg, irq;
638 struct trapframe *tf;
640 irq_active_reg = gic_c_read_4(sc, GICC_IAR);
641 irq = irq_active_reg & 0x3FF;
644 * 1. We do EOI here because recent read value from active interrupt
645 * register must be used for it. Another approach is to save this
646 * value into associated interrupt source.
647 * 2. EOI must be done on same CPU where interrupt has fired. Thus
648 * we must ensure that interrupted thread does not migrate to
650 * 3. EOI cannot be delayed by any preemption which could happen on
651 * critical_exit() used in MI intr code, when interrupt thread is
652 * scheduled. See next point.
653 * 4. IPI_RENDEZVOUS assumes that no preemption is permitted during
654 * an action and any use of critical_exit() could break this
655 * assumption. See comments within smp_rendezvous_action().
656 * 5. We always return FILTER_HANDLED as this is an interrupt
657 * controller dispatch function. Otherwise, in cascaded interrupt
658 * case, the whole interrupt subtree would be masked.
661 if (irq >= sc->nirqs) {
662 #ifdef GIC_DEBUG_SPURIOUS
663 device_printf(sc->gic_dev,
664 "Spurious interrupt detected: last irq: %d on CPU%d\n",
665 sc->last_irq[PCPU_GET(cpuid)], PCPU_GET(cpuid));
667 return (FILTER_HANDLED);
670 tf = curthread->td_intr_frame;
672 gi = sc->gic_irqs + irq;
674 * Note that GIC_FIRST_SGI is zero and is not used in 'if' statement
675 * as compiler complains that comparing u_int >= 0 is always true.
677 if (irq <= GIC_LAST_SGI) {
679 /* Call EOI for all IPI before dispatch. */
680 gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
681 intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf);
684 device_printf(sc->gic_dev, "SGI %u on UP system detected\n",
685 irq - GIC_FIRST_SGI);
686 gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
691 #ifdef GIC_DEBUG_SPURIOUS
692 sc->last_irq[PCPU_GET(cpuid)] = irq;
694 if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI)
695 gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
697 if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
698 gic_irq_mask(sc, irq);
699 if ((gi->gi_flags & GI_FLAG_EARLY_EOI) != GI_FLAG_EARLY_EOI)
700 gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
701 device_printf(sc->gic_dev, "Stray irq %u disabled\n", irq);
705 arm_irq_memory_barrier(irq);
706 irq_active_reg = gic_c_read_4(sc, GICC_IAR);
707 irq = irq_active_reg & 0x3FF;
711 return (FILTER_HANDLED);
715 gic_config(struct arm_gic_softc *sc, u_int irq, enum intr_trigger trig,
716 enum intr_polarity pol)
721 if (irq < GIC_FIRST_SPI)
724 mtx_lock_spin(&sc->mutex);
726 reg = gic_d_read_4(sc, GICD_ICFGR(irq >> 4));
727 mask = (reg >> 2*(irq % 16)) & 0x3;
729 if (pol == INTR_POLARITY_LOW) {
730 mask &= ~GICD_ICFGR_POL_MASK;
731 mask |= GICD_ICFGR_POL_LOW;
732 } else if (pol == INTR_POLARITY_HIGH) {
733 mask &= ~GICD_ICFGR_POL_MASK;
734 mask |= GICD_ICFGR_POL_HIGH;
737 if (trig == INTR_TRIGGER_LEVEL) {
738 mask &= ~GICD_ICFGR_TRIG_MASK;
739 mask |= GICD_ICFGR_TRIG_LVL;
740 } else if (trig == INTR_TRIGGER_EDGE) {
741 mask &= ~GICD_ICFGR_TRIG_MASK;
742 mask |= GICD_ICFGR_TRIG_EDGE;
746 reg = reg & ~(0x3 << 2*(irq % 16));
747 reg = reg | (mask << 2*(irq % 16));
748 gic_d_write_4(sc, GICD_ICFGR(irq >> 4), reg);
750 mtx_unlock_spin(&sc->mutex);
754 gic_bind(struct arm_gic_softc *sc, u_int irq, cpuset_t *cpus)
756 uint32_t cpu, end, mask;
758 end = min(mp_ncpus, 8);
759 for (cpu = end; cpu < MAXCPU; cpu++)
760 if (CPU_ISSET(cpu, cpus))
763 for (mask = 0, cpu = 0; cpu < end; cpu++)
764 if (CPU_ISSET(cpu, cpus))
765 mask |= arm_gic_map[cpu];
767 gic_d_write_1(sc, GICD_ITARGETSR(0) + irq, mask);
773 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
774 enum intr_polarity *polp, enum intr_trigger *trigp)
779 *polp = INTR_POLARITY_CONFORM;
780 *trigp = INTR_TRIGGER_CONFORM;
787 * The 1st cell is the interrupt type:
790 * The 2nd cell contains the interrupt number:
793 * The 3rd cell is the flags, encoded as follows:
794 * bits[3:0] trigger type and level flags
795 * 1 = low-to-high edge triggered
796 * 2 = high-to-low edge triggered
797 * 4 = active high level-sensitive
798 * 8 = active low level-sensitive
799 * bits[15:8] PPI interrupt cpu mask
800 * Each bit corresponds to each of the 8 possible cpus
801 * attached to the GIC. A bit set to '1' indicated
802 * the interrupt is wired to that CPU.
806 irq = GIC_FIRST_SPI + cells[1];
807 /* SPI irq is checked later. */
810 irq = GIC_FIRST_PPI + cells[1];
811 if (irq > GIC_LAST_PPI) {
812 device_printf(dev, "unsupported PPI interrupt "
813 "number %u\n", cells[1]);
818 device_printf(dev, "unsupported interrupt type "
819 "configuration %u\n", cells[0]);
823 tripol = cells[2] & 0xff;
824 if (tripol & 0xf0 || (tripol & FDT_INTR_LOW_MASK &&
826 device_printf(dev, "unsupported trigger/polarity "
827 "configuration 0x%02x\n", tripol);
830 *polp = INTR_POLARITY_CONFORM;
831 *trigp = tripol & FDT_INTR_EDGE_MASK ?
832 INTR_TRIGGER_EDGE : INTR_TRIGGER_LEVEL;
840 gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
841 enum intr_polarity *polp, enum intr_trigger *trigp)
843 struct gic_irqsrc *gi;
845 /* Map a non-GICv2m MSI */
846 gi = (struct gic_irqsrc *)msi_data->isrc;
852 /* MSI/MSI-X interrupts are always edge triggered with high polarity */
853 *polp = INTR_POLARITY_HIGH;
854 *trigp = INTR_TRIGGER_EDGE;
860 gic_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
861 enum intr_polarity *polp, enum intr_trigger *trigp)
864 enum intr_polarity pol;
865 enum intr_trigger trig;
866 struct arm_gic_softc *sc;
867 struct intr_map_data_msi *dam;
869 struct intr_map_data_fdt *daf;
872 sc = device_get_softc(dev);
873 switch (data->type) {
875 case INTR_MAP_DATA_FDT:
876 daf = (struct intr_map_data_fdt *)data;
877 if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
880 KASSERT(irq >= sc->nirqs ||
881 (sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) == 0,
882 ("%s: Attempting to map a MSI interrupt from FDT",
886 case INTR_MAP_DATA_MSI:
888 dam = (struct intr_map_data_msi *)data;
889 if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
896 if (irq >= sc->nirqs)
898 if (pol != INTR_POLARITY_CONFORM && pol != INTR_POLARITY_LOW &&
899 pol != INTR_POLARITY_HIGH)
901 if (trig != INTR_TRIGGER_CONFORM && trig != INTR_TRIGGER_EDGE &&
902 trig != INTR_TRIGGER_LEVEL)
914 arm_gic_map_intr(device_t dev, struct intr_map_data *data,
915 struct intr_irqsrc **isrcp)
919 struct arm_gic_softc *sc;
921 error = gic_map_intr(dev, data, &irq, NULL, NULL);
923 sc = device_get_softc(dev);
924 *isrcp = GIC_INTR_ISRC(sc, irq);
930 arm_gic_setup_intr(device_t dev, struct intr_irqsrc *isrc,
931 struct resource *res, struct intr_map_data *data)
933 struct arm_gic_softc *sc = device_get_softc(dev);
934 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
935 enum intr_trigger trig;
936 enum intr_polarity pol;
938 if ((gi->gi_flags & GI_FLAG_MSI) == GI_FLAG_MSI) {
942 KASSERT(pol == INTR_POLARITY_HIGH,
943 ("%s: MSI interrupts must be active-high", __func__));
944 KASSERT(trig == INTR_TRIGGER_EDGE,
945 ("%s: MSI interrupts must be edge triggered", __func__));
946 } else if (data != NULL) {
949 /* Get config for resource. */
950 if (gic_map_intr(dev, data, &irq, &pol, &trig) ||
954 pol = INTR_POLARITY_CONFORM;
955 trig = INTR_TRIGGER_CONFORM;
958 /* Compare config if this is not first setup. */
959 if (isrc->isrc_handlers != 0) {
960 if ((pol != INTR_POLARITY_CONFORM && pol != gi->gi_pol) ||
961 (trig != INTR_TRIGGER_CONFORM && trig != gi->gi_trig))
967 /* For MSI/MSI-X we should have already configured these */
968 if ((gi->gi_flags & GI_FLAG_MSI) == 0) {
969 if (pol == INTR_POLARITY_CONFORM)
970 pol = INTR_POLARITY_LOW; /* just pick some */
971 if (trig == INTR_TRIGGER_CONFORM)
972 trig = INTR_TRIGGER_EDGE; /* just pick some */
977 /* Edge triggered interrupts need an early EOI sent */
978 if (gi->gi_pol == INTR_TRIGGER_EDGE)
979 gi->gi_flags |= GI_FLAG_EARLY_EOI;
983 * XXX - In case that per CPU interrupt is going to be enabled in time
984 * when SMP is already started, we need some IPI call which
985 * enables it on others CPUs. Further, it's more complicated as
986 * pic_enable_source() and pic_disable_source() should act on
987 * per CPU basis only. Thus, it should be solved here somehow.
989 if (isrc->isrc_flags & INTR_ISRCF_PPI)
990 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
992 gic_config(sc, gi->gi_irq, gi->gi_trig, gi->gi_pol);
993 arm_gic_bind_intr(dev, isrc);
998 arm_gic_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
999 struct resource *res, struct intr_map_data *data)
1001 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1003 if (isrc->isrc_handlers == 0 && (gi->gi_flags & GI_FLAG_MSI) == 0) {
1004 gi->gi_pol = INTR_POLARITY_CONFORM;
1005 gi->gi_trig = INTR_TRIGGER_CONFORM;
1011 arm_gic_enable_intr(device_t dev, struct intr_irqsrc *isrc)
1013 struct arm_gic_softc *sc = device_get_softc(dev);
1014 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1016 arm_irq_memory_barrier(gi->gi_irq);
1017 gic_irq_unmask(sc, gi->gi_irq);
1021 arm_gic_disable_intr(device_t dev, struct intr_irqsrc *isrc)
1023 struct arm_gic_softc *sc = device_get_softc(dev);
1024 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1026 gic_irq_mask(sc, gi->gi_irq);
1030 arm_gic_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
1032 struct arm_gic_softc *sc = device_get_softc(dev);
1033 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1035 arm_gic_disable_intr(dev, isrc);
1036 gic_c_write_4(sc, GICC_EOIR, gi->gi_irq);
1040 arm_gic_post_ithread(device_t dev, struct intr_irqsrc *isrc)
1043 arm_irq_memory_barrier(0);
1044 arm_gic_enable_intr(dev, isrc);
1048 arm_gic_post_filter(device_t dev, struct intr_irqsrc *isrc)
1050 struct arm_gic_softc *sc = device_get_softc(dev);
1051 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1053 /* EOI for edge-triggered done earlier. */
1054 if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI)
1057 arm_irq_memory_barrier(0);
1058 gic_c_write_4(sc, GICC_EOIR, gi->gi_irq);
1062 arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc)
1064 struct arm_gic_softc *sc = device_get_softc(dev);
1065 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1067 if (gi->gi_irq < GIC_FIRST_SPI)
1070 if (CPU_EMPTY(&isrc->isrc_cpu)) {
1071 gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
1072 CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
1074 return (gic_bind(sc, gi->gi_irq, &isrc->isrc_cpu));
1079 arm_gic_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
1082 struct arm_gic_softc *sc = device_get_softc(dev);
1083 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1084 uint32_t val = 0, i;
1086 for (i = 0; i < MAXCPU; i++)
1087 if (CPU_ISSET(i, &cpus))
1088 val |= arm_gic_map[i] << GICD_SGI_TARGET_SHIFT;
1090 gic_d_write_4(sc, GICD_SGIR(0), val | gi->gi_irq);
1094 arm_gic_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
1096 struct intr_irqsrc *isrc;
1097 struct arm_gic_softc *sc = device_get_softc(dev);
1099 if (sgi_first_unused > GIC_LAST_SGI)
1102 isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
1103 sgi_to_ipi[sgi_first_unused++] = ipi;
1105 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
1113 arm_gic_next_irq(struct arm_gic_softc *sc, int last_irq)
1115 uint32_t active_irq;
1117 active_irq = gic_c_read_4(sc, GICC_IAR);
1120 * Immediately EOIR the SGIs, because doing so requires the other
1121 * bits (ie CPU number), not just the IRQ number, and we do not
1122 * have this information later.
1124 if ((active_irq & 0x3ff) <= GIC_LAST_SGI)
1125 gic_c_write_4(sc, GICC_EOIR, active_irq);
1126 active_irq &= 0x3FF;
1128 if (active_irq == 0x3FF) {
1130 device_printf(sc->gic_dev,
1131 "Spurious interrupt detected\n");
1139 arm_gic_config(device_t dev, int irq, enum intr_trigger trig,
1140 enum intr_polarity pol)
1142 struct arm_gic_softc *sc = device_get_softc(dev);
1146 /* Function is public-accessible, so validate input arguments */
1147 if ((irq < 0) || (irq >= sc->nirqs))
1149 if ((trig != INTR_TRIGGER_EDGE) && (trig != INTR_TRIGGER_LEVEL) &&
1150 (trig != INTR_TRIGGER_CONFORM))
1152 if ((pol != INTR_POLARITY_HIGH) && (pol != INTR_POLARITY_LOW) &&
1153 (pol != INTR_POLARITY_CONFORM))
1156 mtx_lock_spin(&sc->mutex);
1158 reg = gic_d_read_4(sc, GICD_ICFGR(irq >> 4));
1159 mask = (reg >> 2*(irq % 16)) & 0x3;
1161 if (pol == INTR_POLARITY_LOW) {
1162 mask &= ~GICD_ICFGR_POL_MASK;
1163 mask |= GICD_ICFGR_POL_LOW;
1164 } else if (pol == INTR_POLARITY_HIGH) {
1165 mask &= ~GICD_ICFGR_POL_MASK;
1166 mask |= GICD_ICFGR_POL_HIGH;
1169 if (trig == INTR_TRIGGER_LEVEL) {
1170 mask &= ~GICD_ICFGR_TRIG_MASK;
1171 mask |= GICD_ICFGR_TRIG_LVL;
1172 } else if (trig == INTR_TRIGGER_EDGE) {
1173 mask &= ~GICD_ICFGR_TRIG_MASK;
1174 mask |= GICD_ICFGR_TRIG_EDGE;
1178 reg = reg & ~(0x3 << 2*(irq % 16));
1179 reg = reg | (mask << 2*(irq % 16));
1180 gic_d_write_4(sc, GICD_ICFGR(irq >> 4), reg);
1182 mtx_unlock_spin(&sc->mutex);
1187 device_printf(dev, "gic_config_irg, invalid parameters\n");
1193 arm_gic_mask(device_t dev, int irq)
1195 struct arm_gic_softc *sc = device_get_softc(dev);
1197 gic_d_write_4(sc, GICD_ICENABLER(irq >> 5), (1UL << (irq & 0x1F)));
1198 gic_c_write_4(sc, GICC_EOIR, irq); /* XXX - not allowed */
1202 arm_gic_unmask(device_t dev, int irq)
1204 struct arm_gic_softc *sc = device_get_softc(dev);
1206 if (irq > GIC_LAST_SGI)
1207 arm_irq_memory_barrier(irq);
1209 gic_d_write_4(sc, GICD_ISENABLER(irq >> 5), (1UL << (irq & 0x1F)));
1214 arm_gic_ipi_send(device_t dev, cpuset_t cpus, u_int ipi)
1216 struct arm_gic_softc *sc = device_get_softc(dev);
1217 uint32_t val = 0, i;
1219 for (i = 0; i < MAXCPU; i++)
1220 if (CPU_ISSET(i, &cpus))
1221 val |= arm_gic_map[i] << GICD_SGI_TARGET_SHIFT;
1223 gic_d_write_4(sc, GICD_SGIR(0), val | ipi);
1227 arm_gic_ipi_read(device_t dev, int i)
1232 * The intr code will automagically give the frame pointer
1233 * if the interrupt argument is 0.
1235 if ((unsigned int)i > 16)
1244 arm_gic_ipi_clear(device_t dev, int ipi)
1251 gic_post_filter(void *arg)
1253 struct arm_gic_softc *sc = gic_sc;
1254 uintptr_t irq = (uintptr_t) arg;
1256 if (irq > GIC_LAST_SGI)
1257 arm_irq_memory_barrier(irq);
1258 gic_c_write_4(sc, GICC_EOIR, irq);
1262 gic_config_irq(int irq, enum intr_trigger trig, enum intr_polarity pol)
1265 return (arm_gic_config(gic_sc->gic_dev, irq, trig, pol));
1269 arm_mask_irq(uintptr_t nb)
1272 arm_gic_mask(gic_sc->gic_dev, nb);
1276 arm_unmask_irq(uintptr_t nb)
1279 arm_gic_unmask(gic_sc->gic_dev, nb);
1283 arm_get_next_irq(int last_irq)
1286 return (arm_gic_next_irq(gic_sc, last_irq));
1291 intr_pic_init_secondary(void)
1294 arm_gic_init_secondary(gic_sc->gic_dev);
1298 pic_ipi_send(cpuset_t cpus, u_int ipi)
1301 arm_gic_ipi_send(gic_sc->gic_dev, cpus, ipi);
1308 return (arm_gic_ipi_read(gic_sc->gic_dev, i));
1312 pic_ipi_clear(int ipi)
1315 arm_gic_ipi_clear(gic_sc->gic_dev, ipi);
1320 static device_method_t arm_gic_methods[] = {
1323 DEVMETHOD(bus_print_child, arm_gic_print_child),
1324 DEVMETHOD(bus_add_child, bus_generic_add_child),
1325 DEVMETHOD(bus_alloc_resource, arm_gic_alloc_resource),
1326 DEVMETHOD(bus_release_resource, bus_generic_release_resource),
1327 DEVMETHOD(bus_activate_resource,bus_generic_activate_resource),
1329 /* Interrupt controller interface */
1330 DEVMETHOD(pic_disable_intr, arm_gic_disable_intr),
1331 DEVMETHOD(pic_enable_intr, arm_gic_enable_intr),
1332 DEVMETHOD(pic_map_intr, arm_gic_map_intr),
1333 DEVMETHOD(pic_setup_intr, arm_gic_setup_intr),
1334 DEVMETHOD(pic_teardown_intr, arm_gic_teardown_intr),
1335 DEVMETHOD(pic_post_filter, arm_gic_post_filter),
1336 DEVMETHOD(pic_post_ithread, arm_gic_post_ithread),
1337 DEVMETHOD(pic_pre_ithread, arm_gic_pre_ithread),
1339 DEVMETHOD(pic_bind_intr, arm_gic_bind_intr),
1340 DEVMETHOD(pic_init_secondary, arm_gic_init_secondary),
1341 DEVMETHOD(pic_ipi_send, arm_gic_ipi_send),
1342 DEVMETHOD(pic_ipi_setup, arm_gic_ipi_setup),
1348 DEFINE_CLASS_0(gic, arm_gic_driver, arm_gic_methods,
1349 sizeof(struct arm_gic_softc));
1353 * GICv2m support -- the GICv2 MSI/MSI-X controller.
1356 #define GICV2M_MSI_TYPER 0x008
1357 #define MSI_TYPER_SPI_BASE(x) (((x) >> 16) & 0x3ff)
1358 #define MSI_TYPER_SPI_COUNT(x) (((x) >> 0) & 0x3ff)
1359 #define GICv2M_MSI_SETSPI_NS 0x040
1360 #define GICV2M_MSI_IIDR 0xFCC
1363 arm_gicv2m_attach(device_t dev)
1365 struct arm_gicv2m_softc *sc;
1366 struct arm_gic_softc *psc;
1370 psc = device_get_softc(device_get_parent(dev));
1371 sc = device_get_softc(dev);
1374 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1376 if (sc->sc_mem == NULL) {
1377 device_printf(dev, "Unable to allocate resources\n");
1381 typer = bus_read_4(sc->sc_mem, GICV2M_MSI_TYPER);
1382 sc->sc_spi_start = MSI_TYPER_SPI_BASE(typer);
1383 sc->sc_spi_count = MSI_TYPER_SPI_COUNT(typer);
1384 sc->sc_spi_end = sc->sc_spi_start + sc->sc_spi_count;
1386 /* Reserve these interrupts for MSI/MSI-X use */
1387 arm_gic_reserve_msi_range(device_get_parent(dev), sc->sc_spi_start,
1390 mtx_init(&sc->sc_mutex, "GICv2m lock", "", MTX_DEF);
1392 intr_msi_register(dev, sc->sc_xref);
1395 device_printf(dev, "using spi %u to %u\n", sc->sc_spi_start,
1396 sc->sc_spi_start + sc->sc_spi_count - 1);
1402 arm_gicv2m_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1403 device_t *pic, struct intr_irqsrc **srcs)
1405 struct arm_gic_softc *psc;
1406 struct arm_gicv2m_softc *sc;
1407 int i, irq, end_irq;
1410 KASSERT(powerof2(count), ("%s: bad count", __func__));
1411 KASSERT(powerof2(maxcount), ("%s: bad maxcount", __func__));
1413 psc = device_get_softc(device_get_parent(dev));
1414 sc = device_get_softc(dev);
1416 mtx_lock(&sc->sc_mutex);
1419 for (irq = sc->sc_spi_start; irq < sc->sc_spi_end && !found; irq++) {
1420 /* Start on an aligned interrupt */
1421 if ((irq & (maxcount - 1)) != 0)
1424 /* Assume we found a valid range until shown otherwise */
1427 /* Check this range is valid */
1428 for (end_irq = irq; end_irq != irq + count - 1; end_irq++) {
1429 /* No free interrupts */
1430 if (end_irq == sc->sc_spi_end) {
1435 KASSERT((psc->gic_irqs[irq].gi_flags & GI_FLAG_MSI)!= 0,
1436 ("%s: Non-MSI interrupt found", __func__));
1438 /* This is already used */
1439 if ((psc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) ==
1447 /* Not enough interrupts were found */
1448 if (!found || irq == sc->sc_spi_end) {
1449 mtx_unlock(&sc->sc_mutex);
1453 for (i = 0; i < count; i++) {
1454 /* Mark the interrupt as used */
1455 psc->gic_irqs[irq + i].gi_flags |= GI_FLAG_MSI_USED;
1458 mtx_unlock(&sc->sc_mutex);
1460 for (i = 0; i < count; i++)
1461 srcs[i] = (struct intr_irqsrc *)&psc->gic_irqs[irq + i];
1462 *pic = device_get_parent(dev);
1468 arm_gicv2m_release_msi(device_t dev, device_t child, int count,
1469 struct intr_irqsrc **isrc)
1471 struct arm_gicv2m_softc *sc;
1472 struct gic_irqsrc *gi;
1475 sc = device_get_softc(dev);
1477 mtx_lock(&sc->sc_mutex);
1478 for (i = 0; i < count; i++) {
1479 gi = (struct gic_irqsrc *)isrc[i];
1481 KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1482 ("%s: Trying to release an unused MSI-X interrupt",
1485 gi->gi_flags &= ~GI_FLAG_MSI_USED;
1487 mtx_unlock(&sc->sc_mutex);
1493 arm_gicv2m_alloc_msix(device_t dev, device_t child, device_t *pic,
1494 struct intr_irqsrc **isrcp)
1496 struct arm_gicv2m_softc *sc;
1497 struct arm_gic_softc *psc;
1500 psc = device_get_softc(device_get_parent(dev));
1501 sc = device_get_softc(dev);
1503 mtx_lock(&sc->sc_mutex);
1504 /* Find an unused interrupt */
1505 for (irq = sc->sc_spi_start; irq < sc->sc_spi_end; irq++) {
1506 KASSERT((psc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) != 0,
1507 ("%s: Non-MSI interrupt found", __func__));
1508 if ((psc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) == 0)
1511 /* No free interrupt was found */
1512 if (irq == sc->sc_spi_end) {
1513 mtx_unlock(&sc->sc_mutex);
1517 /* Mark the interrupt as used */
1518 psc->gic_irqs[irq].gi_flags |= GI_FLAG_MSI_USED;
1519 mtx_unlock(&sc->sc_mutex);
1521 *isrcp = (struct intr_irqsrc *)&psc->gic_irqs[irq];
1522 *pic = device_get_parent(dev);
1528 arm_gicv2m_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1530 struct arm_gicv2m_softc *sc;
1531 struct gic_irqsrc *gi;
1533 sc = device_get_softc(dev);
1534 gi = (struct gic_irqsrc *)isrc;
1536 KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1537 ("%s: Trying to release an unused MSI-X interrupt", __func__));
1539 mtx_lock(&sc->sc_mutex);
1540 gi->gi_flags &= ~GI_FLAG_MSI_USED;
1541 mtx_unlock(&sc->sc_mutex);
1547 arm_gicv2m_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1548 uint64_t *addr, uint32_t *data)
1550 struct arm_gicv2m_softc *sc = device_get_softc(dev);
1551 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1553 *addr = vtophys(rman_get_virtual(sc->sc_mem)) + GICv2M_MSI_SETSPI_NS;
1559 static device_method_t arm_gicv2m_methods[] = {
1560 /* Device interface */
1561 DEVMETHOD(device_attach, arm_gicv2m_attach),
1564 DEVMETHOD(msi_alloc_msi, arm_gicv2m_alloc_msi),
1565 DEVMETHOD(msi_release_msi, arm_gicv2m_release_msi),
1566 DEVMETHOD(msi_alloc_msix, arm_gicv2m_alloc_msix),
1567 DEVMETHOD(msi_release_msix, arm_gicv2m_release_msix),
1568 DEVMETHOD(msi_map_msi, arm_gicv2m_map_msi),
1574 DEFINE_CLASS_0(gicv2m, arm_gicv2m_driver, arm_gicv2m_methods,
1575 sizeof(struct arm_gicv2m_softc));