2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2011 The FreeBSD Foundation
7 * Developed by Damjan Marion <damjan.marion@gmail.com>
9 * Based on OMAP4 GIC code by Ben Gray
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The name of the company nor the name of the author may be used to
20 * endorse or promote products derived from this software without specific
21 * prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
41 #include "opt_platform.h"
43 #include <sys/param.h>
44 #include <sys/systm.h>
46 #include <sys/kernel.h>
48 #include <sys/module.h>
49 #include <sys/malloc.h>
53 #include <sys/cpuset.h>
55 #include <sys/mutex.h>
57 #include <sys/sched.h>
62 #include <machine/bus.h>
63 #include <machine/intr.h>
64 #include <machine/smp.h>
67 #include <dev/fdt/fdt_intr.h>
68 #include <dev/ofw/ofw_bus_subr.h>
72 #include <contrib/dev/acpica/include/acpi.h>
73 #include <dev/acpica/acpivar.h>
78 #include <ddb/db_lex.h>
81 #include <arm/arm/gic.h>
82 #include <arm/arm/gic_common.h>
88 /* We are using GICv2 register naming */
90 /* Distributor Registers */
93 #define GICC_CTLR 0x0000 /* v1 ICCICR */
94 #define GICC_PMR 0x0004 /* v1 ICCPMR */
95 #define GICC_BPR 0x0008 /* v1 ICCBPR */
96 #define GICC_IAR 0x000C /* v1 ICCIAR */
97 #define GICC_EOIR 0x0010 /* v1 ICCEOIR */
98 #define GICC_RPR 0x0014 /* v1 ICCRPR */
99 #define GICC_HPPIR 0x0018 /* v1 ICCHPIR */
100 #define GICC_ABPR 0x001C /* v1 ICCABPR */
101 #define GICC_IIDR 0x00FC /* v1 ICCIIDR*/
103 /* TYPER Registers */
104 #define GICD_TYPER_SECURITYEXT 0x400
105 #define GIC_SUPPORT_SECEXT(_sc) \
106 ((_sc->typer & GICD_TYPER_SECURITYEXT) == GICD_TYPER_SECURITYEXT)
108 #ifndef GIC_DEFAULT_ICFGR_INIT
109 #define GIC_DEFAULT_ICFGR_INIT 0x00000000
113 struct intr_irqsrc gi_isrc;
115 enum intr_polarity gi_pol;
116 enum intr_trigger gi_trig;
117 #define GI_FLAG_EARLY_EOI (1 << 0)
118 #define GI_FLAG_MSI (1 << 1) /* This interrupt source should only */
119 /* be used for MSI/MSI-X interrupts */
120 #define GI_FLAG_MSI_USED (1 << 2) /* This irq is already allocated */
121 /* for a MSI/MSI-X interrupt */
125 static u_int gic_irq_cpu;
126 static int arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc);
129 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
130 static u_int sgi_first_unused = GIC_FIRST_SGI;
133 #define GIC_INTR_ISRC(sc, irq) (&sc->gic_irqs[irq].gi_isrc)
135 static struct resource_spec arm_gic_spec[] = {
136 { SYS_RES_MEMORY, 0, RF_ACTIVE }, /* Distributor registers */
137 { SYS_RES_MEMORY, 1, RF_ACTIVE }, /* CPU Interrupt Intf. registers */
138 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_OPTIONAL }, /* Parent interrupt */
142 #if defined(__arm__) && defined(INVARIANTS)
143 static int gic_debug_spurious = 1;
145 static int gic_debug_spurious = 0;
147 TUNABLE_INT("hw.gic.debug_spurious", &gic_debug_spurious);
149 static u_int arm_gic_map[MAXCPU];
151 static struct arm_gic_softc *gic_sc = NULL;
154 #define gic_c_read_4(_sc, _reg) \
155 bus_read_4((_sc)->gic_res[GIC_RES_CPU], (_reg))
156 #define gic_c_write_4(_sc, _reg, _val) \
157 bus_write_4((_sc)->gic_res[GIC_RES_CPU], (_reg), (_val))
158 /* Distributor Interface */
159 #define gic_d_read_4(_sc, _reg) \
160 bus_read_4((_sc)->gic_res[GIC_RES_DIST], (_reg))
161 #define gic_d_write_1(_sc, _reg, _val) \
162 bus_write_1((_sc)->gic_res[GIC_RES_DIST], (_reg), (_val))
163 #define gic_d_write_4(_sc, _reg, _val) \
164 bus_write_4((_sc)->gic_res[GIC_RES_DIST], (_reg), (_val))
167 gic_irq_unmask(struct arm_gic_softc *sc, u_int irq)
170 gic_d_write_4(sc, GICD_ISENABLER(irq), GICD_I_MASK(irq));
174 gic_irq_mask(struct arm_gic_softc *sc, u_int irq)
177 gic_d_write_4(sc, GICD_ICENABLER(irq), GICD_I_MASK(irq));
181 gic_cpu_mask(struct arm_gic_softc *sc)
186 /* Read the current cpuid mask by reading ITARGETSR{0..7} */
187 for (i = 0; i < 8; i++) {
188 mask = gic_d_read_4(sc, GICD_ITARGETSR(4 * i));
192 /* No mask found, assume we are on CPU interface 0 */
196 /* Collect the mask in the lower byte */
205 arm_gic_init_secondary(device_t dev)
207 struct arm_gic_softc *sc = device_get_softc(dev);
210 /* Set the mask so we can find this CPU to send it IPIs */
211 cpu = PCPU_GET(cpuid);
212 arm_gic_map[cpu] = gic_cpu_mask(sc);
214 for (irq = 0; irq < sc->nirqs; irq += 4)
215 gic_d_write_4(sc, GICD_IPRIORITYR(irq), 0);
217 /* Set all the interrupts to be in Group 0 (secure) */
218 for (irq = 0; GIC_SUPPORT_SECEXT(sc) && irq < sc->nirqs; irq += 32) {
219 gic_d_write_4(sc, GICD_IGROUPR(irq), 0);
222 /* Enable CPU interface */
223 gic_c_write_4(sc, GICC_CTLR, 1);
225 /* Set priority mask register. */
226 gic_c_write_4(sc, GICC_PMR, 0xff);
228 /* Enable interrupt distribution */
229 gic_d_write_4(sc, GICD_CTLR, 0x01);
231 /* Unmask attached SGI interrupts. */
232 for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++)
233 if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu))
234 gic_irq_unmask(sc, irq);
236 /* Unmask attached PPI interrupts. */
237 for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++)
238 if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu))
239 gic_irq_unmask(sc, irq);
244 arm_gic_register_isrcs(struct arm_gic_softc *sc, uint32_t num)
248 struct gic_irqsrc *irqs;
249 struct intr_irqsrc *isrc;
252 irqs = malloc(num * sizeof(struct gic_irqsrc), M_DEVBUF,
255 name = device_get_nameunit(sc->gic_dev);
256 for (irq = 0; irq < num; irq++) {
257 irqs[irq].gi_irq = irq;
258 irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
259 irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
261 isrc = &irqs[irq].gi_isrc;
262 if (irq <= GIC_LAST_SGI) {
263 error = intr_isrc_register(isrc, sc->gic_dev,
264 INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
265 } else if (irq <= GIC_LAST_PPI) {
266 error = intr_isrc_register(isrc, sc->gic_dev,
267 INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
269 error = intr_isrc_register(isrc, sc->gic_dev, 0,
270 "%s,s%u", name, irq - GIC_FIRST_SPI);
273 /* XXX call intr_isrc_deregister() */
274 free(irqs, M_DEVBUF);
284 arm_gic_reserve_msi_range(device_t dev, u_int start, u_int count)
286 struct arm_gic_softc *sc;
289 sc = device_get_softc(dev);
291 KASSERT((start + count) <= sc->nirqs,
292 ("%s: Trying to allocate too many MSI IRQs: %d + %d > %d", __func__,
293 start, count, sc->nirqs));
294 for (i = 0; i < count; i++) {
295 KASSERT(sc->gic_irqs[start + i].gi_isrc.isrc_handlers == 0,
296 ("%s: MSI interrupt %d already has a handler", __func__,
298 KASSERT(sc->gic_irqs[start + i].gi_pol == INTR_POLARITY_CONFORM,
299 ("%s: MSI interrupt %d already has a polarity", __func__,
301 KASSERT(sc->gic_irqs[start + i].gi_trig == INTR_TRIGGER_CONFORM,
302 ("%s: MSI interrupt %d already has a trigger", __func__,
304 sc->gic_irqs[start + i].gi_pol = INTR_POLARITY_HIGH;
305 sc->gic_irqs[start + i].gi_trig = INTR_TRIGGER_EDGE;
306 sc->gic_irqs[start + i].gi_flags |= GI_FLAG_MSI;
311 arm_gic_attach(device_t dev)
313 struct arm_gic_softc *sc;
315 uint32_t icciidr, mask, nirqs;
320 sc = device_get_softc(dev);
322 if (bus_alloc_resources(dev, arm_gic_spec, sc->gic_res)) {
323 device_printf(dev, "could not allocate resources\n");
330 /* Initialize mutex */
331 mtx_init(&sc->mutex, "GIC lock", NULL, MTX_SPIN);
333 /* Disable interrupt forwarding to the CPU interface */
334 gic_d_write_4(sc, GICD_CTLR, 0x00);
336 /* Get the number of interrupts */
337 sc->typer = gic_d_read_4(sc, GICD_TYPER);
338 nirqs = GICD_TYPER_I_NUM(sc->typer);
340 if (arm_gic_register_isrcs(sc, nirqs)) {
341 device_printf(dev, "could not register irqs\n");
345 icciidr = gic_c_read_4(sc, GICC_IIDR);
347 "pn 0x%x, arch 0x%x, rev 0x%x, implementer 0x%x irqs %u\n",
348 GICD_IIDR_PROD(icciidr), GICD_IIDR_VAR(icciidr),
349 GICD_IIDR_REV(icciidr), GICD_IIDR_IMPL(icciidr), sc->nirqs);
350 sc->gic_iidr = icciidr;
352 /* Set all global interrupts to be level triggered, active low. */
353 for (i = 32; i < sc->nirqs; i += 16) {
354 gic_d_write_4(sc, GICD_ICFGR(i), GIC_DEFAULT_ICFGR_INIT);
357 /* Disable all interrupts. */
358 for (i = 32; i < sc->nirqs; i += 32) {
359 gic_d_write_4(sc, GICD_ICENABLER(i), 0xFFFFFFFF);
362 /* Find the current cpu mask */
363 mask = gic_cpu_mask(sc);
364 /* Set the mask so we can find this CPU to send it IPIs */
365 arm_gic_map[PCPU_GET(cpuid)] = mask;
366 /* Set all four targets to this cpu */
370 for (i = 0; i < sc->nirqs; i += 4) {
371 gic_d_write_4(sc, GICD_IPRIORITYR(i), 0);
373 gic_d_write_4(sc, GICD_ITARGETSR(i), mask);
377 /* Set all the interrupts to be in Group 0 (secure) */
378 for (i = 0; GIC_SUPPORT_SECEXT(sc) && i < sc->nirqs; i += 32) {
379 gic_d_write_4(sc, GICD_IGROUPR(i), 0);
382 /* Enable CPU interface */
383 gic_c_write_4(sc, GICC_CTLR, 1);
385 /* Set priority mask register. */
386 gic_c_write_4(sc, GICC_PMR, 0xff);
388 /* Enable interrupt distribution */
389 gic_d_write_4(sc, GICD_CTLR, 0x01);
398 arm_gic_detach(device_t dev)
400 struct arm_gic_softc *sc;
402 sc = device_get_softc(dev);
404 if (sc->gic_irqs != NULL)
405 free(sc->gic_irqs, M_DEVBUF);
407 bus_release_resources(dev, arm_gic_spec, sc->gic_res);
413 arm_gic_print_child(device_t bus, device_t child)
415 struct resource_list *rl;
418 rv = bus_print_child_header(bus, child);
420 rl = BUS_GET_RESOURCE_LIST(bus, child);
422 rv += resource_list_print_type(rl, "mem", SYS_RES_MEMORY,
424 rv += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd");
427 rv += bus_print_child_footer(bus, child);
432 static struct resource *
433 arm_gic_alloc_resource(device_t bus, device_t child, int type, int *rid,
434 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
436 struct arm_gic_softc *sc;
437 struct resource_list_entry *rle;
438 struct resource_list *rl;
441 KASSERT(type == SYS_RES_MEMORY, ("Invalid resoure type %x", type));
443 sc = device_get_softc(bus);
446 * Request for the default allocation with a given rid: use resource
447 * list stored in the local device info.
449 if (RMAN_IS_DEFAULT_RANGE(start, end)) {
450 rl = BUS_GET_RESOURCE_LIST(bus, child);
452 if (type == SYS_RES_IOPORT)
453 type = SYS_RES_MEMORY;
455 rle = resource_list_find(rl, type, *rid);
458 device_printf(bus, "no default resources for "
459 "rid = %d, type = %d\n", *rid, type);
467 /* Remap through ranges property */
468 for (j = 0; j < sc->nranges; j++) {
469 if (start >= sc->ranges[j].bus && end <
470 sc->ranges[j].bus + sc->ranges[j].size) {
471 start -= sc->ranges[j].bus;
472 start += sc->ranges[j].host;
473 end -= sc->ranges[j].bus;
474 end += sc->ranges[j].host;
478 if (j == sc->nranges && sc->nranges != 0) {
480 device_printf(bus, "Could not map resource "
481 "%#jx-%#jx\n", (uintmax_t)start, (uintmax_t)end);
486 return (bus_generic_alloc_resource(bus, child, type, rid, start, end,
491 arm_gic_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
493 struct arm_gic_softc *sc;
495 sc = device_get_softc(dev);
498 case GIC_IVAR_HW_REV:
499 KASSERT(GICD_IIDR_VAR(sc->gic_iidr) < 3,
500 ("arm_gic_read_ivar: Unknown IIDR revision %u (%.08x)",
501 GICD_IIDR_VAR(sc->gic_iidr), sc->gic_iidr));
502 *result = GICD_IIDR_VAR(sc->gic_iidr);
505 KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN,
506 ("arm_gic_read_ivar: Unknown bus type"));
507 KASSERT(sc->gic_bus <= GIC_BUS_MAX,
508 ("arm_gic_read_ivar: Invalid bus type %u", sc->gic_bus));
509 *result = sc->gic_bus;
517 arm_gic_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
520 case GIC_IVAR_HW_REV:
529 arm_gic_intr(void *arg)
531 struct arm_gic_softc *sc = arg;
532 struct gic_irqsrc *gi;
533 uint32_t irq_active_reg, irq;
534 struct trapframe *tf;
536 irq_active_reg = gic_c_read_4(sc, GICC_IAR);
537 irq = irq_active_reg & 0x3FF;
540 * 1. We do EOI here because recent read value from active interrupt
541 * register must be used for it. Another approach is to save this
542 * value into associated interrupt source.
543 * 2. EOI must be done on same CPU where interrupt has fired. Thus
544 * we must ensure that interrupted thread does not migrate to
546 * 3. EOI cannot be delayed by any preemption which could happen on
547 * critical_exit() used in MI intr code, when interrupt thread is
548 * scheduled. See next point.
549 * 4. IPI_RENDEZVOUS assumes that no preemption is permitted during
550 * an action and any use of critical_exit() could break this
551 * assumption. See comments within smp_rendezvous_action().
552 * 5. We always return FILTER_HANDLED as this is an interrupt
553 * controller dispatch function. Otherwise, in cascaded interrupt
554 * case, the whole interrupt subtree would be masked.
557 if (irq >= sc->nirqs) {
558 if (gic_debug_spurious)
559 device_printf(sc->gic_dev,
560 "Spurious interrupt detected: last irq: %d on CPU%d\n",
561 sc->last_irq[PCPU_GET(cpuid)], PCPU_GET(cpuid));
562 return (FILTER_HANDLED);
565 tf = curthread->td_intr_frame;
567 gi = sc->gic_irqs + irq;
569 * Note that GIC_FIRST_SGI is zero and is not used in 'if' statement
570 * as compiler complains that comparing u_int >= 0 is always true.
572 if (irq <= GIC_LAST_SGI) {
574 /* Call EOI for all IPI before dispatch. */
575 gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
576 intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq]);
579 device_printf(sc->gic_dev, "SGI %u on UP system detected\n",
580 irq - GIC_FIRST_SGI);
581 gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
586 if (gic_debug_spurious)
587 sc->last_irq[PCPU_GET(cpuid)] = irq;
588 if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI)
589 gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
591 if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
592 gic_irq_mask(sc, irq);
593 if ((gi->gi_flags & GI_FLAG_EARLY_EOI) != GI_FLAG_EARLY_EOI)
594 gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
595 device_printf(sc->gic_dev, "Stray irq %u disabled\n", irq);
599 arm_irq_memory_barrier(irq);
600 irq_active_reg = gic_c_read_4(sc, GICC_IAR);
601 irq = irq_active_reg & 0x3FF;
605 return (FILTER_HANDLED);
609 gic_config(struct arm_gic_softc *sc, u_int irq, enum intr_trigger trig,
610 enum intr_polarity pol)
615 if (irq < GIC_FIRST_SPI)
618 mtx_lock_spin(&sc->mutex);
620 reg = gic_d_read_4(sc, GICD_ICFGR(irq));
621 mask = (reg >> 2*(irq % 16)) & 0x3;
623 if (pol == INTR_POLARITY_LOW) {
624 mask &= ~GICD_ICFGR_POL_MASK;
625 mask |= GICD_ICFGR_POL_LOW;
626 } else if (pol == INTR_POLARITY_HIGH) {
627 mask &= ~GICD_ICFGR_POL_MASK;
628 mask |= GICD_ICFGR_POL_HIGH;
631 if (trig == INTR_TRIGGER_LEVEL) {
632 mask &= ~GICD_ICFGR_TRIG_MASK;
633 mask |= GICD_ICFGR_TRIG_LVL;
634 } else if (trig == INTR_TRIGGER_EDGE) {
635 mask &= ~GICD_ICFGR_TRIG_MASK;
636 mask |= GICD_ICFGR_TRIG_EDGE;
640 reg = reg & ~(0x3 << 2*(irq % 16));
641 reg = reg | (mask << 2*(irq % 16));
642 gic_d_write_4(sc, GICD_ICFGR(irq), reg);
644 mtx_unlock_spin(&sc->mutex);
648 gic_bind(struct arm_gic_softc *sc, u_int irq, cpuset_t *cpus)
650 uint32_t cpu, end, mask;
652 end = min(mp_ncpus, 8);
653 for (cpu = end; cpu < MAXCPU; cpu++)
654 if (CPU_ISSET(cpu, cpus))
657 for (mask = 0, cpu = 0; cpu < end; cpu++)
658 if (CPU_ISSET(cpu, cpus))
659 mask |= arm_gic_map[cpu];
661 gic_d_write_1(sc, GICD_ITARGETSR(0) + irq, mask);
667 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
668 enum intr_polarity *polp, enum intr_trigger *trigp)
673 *polp = INTR_POLARITY_CONFORM;
674 *trigp = INTR_TRIGGER_CONFORM;
681 * The 1st cell is the interrupt type:
684 * The 2nd cell contains the interrupt number:
687 * The 3rd cell is the flags, encoded as follows:
688 * bits[3:0] trigger type and level flags
689 * 1 = low-to-high edge triggered
690 * 2 = high-to-low edge triggered
691 * 4 = active high level-sensitive
692 * 8 = active low level-sensitive
693 * bits[15:8] PPI interrupt cpu mask
694 * Each bit corresponds to each of the 8 possible cpus
695 * attached to the GIC. A bit set to '1' indicated
696 * the interrupt is wired to that CPU.
700 irq = GIC_FIRST_SPI + cells[1];
701 /* SPI irq is checked later. */
704 irq = GIC_FIRST_PPI + cells[1];
705 if (irq > GIC_LAST_PPI) {
706 device_printf(dev, "unsupported PPI interrupt "
707 "number %u\n", cells[1]);
712 device_printf(dev, "unsupported interrupt type "
713 "configuration %u\n", cells[0]);
717 tripol = cells[2] & 0xff;
718 if (tripol & 0xf0 || (tripol & FDT_INTR_LOW_MASK &&
720 device_printf(dev, "unsupported trigger/polarity "
721 "configuration 0x%02x\n", tripol);
724 *polp = INTR_POLARITY_CONFORM;
725 *trigp = tripol & FDT_INTR_EDGE_MASK ?
726 INTR_TRIGGER_EDGE : INTR_TRIGGER_LEVEL;
734 gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
735 enum intr_polarity *polp, enum intr_trigger *trigp)
737 struct gic_irqsrc *gi;
739 /* Map a non-GICv2m MSI */
740 gi = (struct gic_irqsrc *)msi_data->isrc;
746 /* MSI/MSI-X interrupts are always edge triggered with high polarity */
747 *polp = INTR_POLARITY_HIGH;
748 *trigp = INTR_TRIGGER_EDGE;
754 gic_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
755 enum intr_polarity *polp, enum intr_trigger *trigp)
758 enum intr_polarity pol;
759 enum intr_trigger trig;
760 struct arm_gic_softc *sc;
761 struct intr_map_data_msi *dam;
763 struct intr_map_data_fdt *daf;
766 struct intr_map_data_acpi *daa;
769 sc = device_get_softc(dev);
770 switch (data->type) {
772 case INTR_MAP_DATA_FDT:
773 daf = (struct intr_map_data_fdt *)data;
774 if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
777 KASSERT(irq >= sc->nirqs ||
778 (sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) == 0,
779 ("%s: Attempting to map a MSI interrupt from FDT",
784 case INTR_MAP_DATA_ACPI:
785 daa = (struct intr_map_data_acpi *)data;
791 case INTR_MAP_DATA_MSI:
793 dam = (struct intr_map_data_msi *)data;
794 if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
801 if (irq >= sc->nirqs)
803 if (pol != INTR_POLARITY_CONFORM && pol != INTR_POLARITY_LOW &&
804 pol != INTR_POLARITY_HIGH)
806 if (trig != INTR_TRIGGER_CONFORM && trig != INTR_TRIGGER_EDGE &&
807 trig != INTR_TRIGGER_LEVEL)
819 arm_gic_map_intr(device_t dev, struct intr_map_data *data,
820 struct intr_irqsrc **isrcp)
824 struct arm_gic_softc *sc;
826 error = gic_map_intr(dev, data, &irq, NULL, NULL);
828 sc = device_get_softc(dev);
829 *isrcp = GIC_INTR_ISRC(sc, irq);
835 arm_gic_setup_intr(device_t dev, struct intr_irqsrc *isrc,
836 struct resource *res, struct intr_map_data *data)
838 struct arm_gic_softc *sc = device_get_softc(dev);
839 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
840 enum intr_trigger trig;
841 enum intr_polarity pol;
843 if ((gi->gi_flags & GI_FLAG_MSI) == GI_FLAG_MSI) {
847 KASSERT(pol == INTR_POLARITY_HIGH,
848 ("%s: MSI interrupts must be active-high", __func__));
849 KASSERT(trig == INTR_TRIGGER_EDGE,
850 ("%s: MSI interrupts must be edge triggered", __func__));
851 } else if (data != NULL) {
854 /* Get config for resource. */
855 if (gic_map_intr(dev, data, &irq, &pol, &trig) ||
859 pol = INTR_POLARITY_CONFORM;
860 trig = INTR_TRIGGER_CONFORM;
863 /* Compare config if this is not first setup. */
864 if (isrc->isrc_handlers != 0) {
865 if ((pol != INTR_POLARITY_CONFORM && pol != gi->gi_pol) ||
866 (trig != INTR_TRIGGER_CONFORM && trig != gi->gi_trig))
872 /* For MSI/MSI-X we should have already configured these */
873 if ((gi->gi_flags & GI_FLAG_MSI) == 0) {
874 if (pol == INTR_POLARITY_CONFORM)
875 pol = INTR_POLARITY_LOW; /* just pick some */
876 if (trig == INTR_TRIGGER_CONFORM)
877 trig = INTR_TRIGGER_EDGE; /* just pick some */
882 /* Edge triggered interrupts need an early EOI sent */
883 if (gi->gi_trig == INTR_TRIGGER_EDGE)
884 gi->gi_flags |= GI_FLAG_EARLY_EOI;
888 * XXX - In case that per CPU interrupt is going to be enabled in time
889 * when SMP is already started, we need some IPI call which
890 * enables it on others CPUs. Further, it's more complicated as
891 * pic_enable_source() and pic_disable_source() should act on
892 * per CPU basis only. Thus, it should be solved here somehow.
894 if (isrc->isrc_flags & INTR_ISRCF_PPI)
895 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
897 gic_config(sc, gi->gi_irq, gi->gi_trig, gi->gi_pol);
898 arm_gic_bind_intr(dev, isrc);
903 arm_gic_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
904 struct resource *res, struct intr_map_data *data)
906 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
908 if (isrc->isrc_handlers == 0 && (gi->gi_flags & GI_FLAG_MSI) == 0) {
909 gi->gi_pol = INTR_POLARITY_CONFORM;
910 gi->gi_trig = INTR_TRIGGER_CONFORM;
916 arm_gic_enable_intr(device_t dev, struct intr_irqsrc *isrc)
918 struct arm_gic_softc *sc = device_get_softc(dev);
919 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
921 arm_irq_memory_barrier(gi->gi_irq);
922 gic_irq_unmask(sc, gi->gi_irq);
926 arm_gic_disable_intr(device_t dev, struct intr_irqsrc *isrc)
928 struct arm_gic_softc *sc = device_get_softc(dev);
929 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
931 gic_irq_mask(sc, gi->gi_irq);
935 arm_gic_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
937 struct arm_gic_softc *sc = device_get_softc(dev);
938 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
940 arm_gic_disable_intr(dev, isrc);
941 gic_c_write_4(sc, GICC_EOIR, gi->gi_irq);
945 arm_gic_post_ithread(device_t dev, struct intr_irqsrc *isrc)
948 arm_irq_memory_barrier(0);
949 arm_gic_enable_intr(dev, isrc);
953 arm_gic_post_filter(device_t dev, struct intr_irqsrc *isrc)
955 struct arm_gic_softc *sc = device_get_softc(dev);
956 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
958 /* EOI for edge-triggered done earlier. */
959 if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI)
962 arm_irq_memory_barrier(0);
963 gic_c_write_4(sc, GICC_EOIR, gi->gi_irq);
967 arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc)
969 struct arm_gic_softc *sc = device_get_softc(dev);
970 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
972 if (gi->gi_irq < GIC_FIRST_SPI)
975 if (CPU_EMPTY(&isrc->isrc_cpu)) {
976 gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
977 CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
979 return (gic_bind(sc, gi->gi_irq, &isrc->isrc_cpu));
984 arm_gic_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
987 struct arm_gic_softc *sc = device_get_softc(dev);
988 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
991 for (i = 0; i < MAXCPU; i++)
992 if (CPU_ISSET(i, &cpus))
993 val |= arm_gic_map[i] << GICD_SGI_TARGET_SHIFT;
995 gic_d_write_4(sc, GICD_SGIR, val | gi->gi_irq);
999 arm_gic_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
1001 struct intr_irqsrc *isrc;
1002 struct arm_gic_softc *sc = device_get_softc(dev);
1004 if (sgi_first_unused > GIC_LAST_SGI)
1007 isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
1008 sgi_to_ipi[sgi_first_unused++] = ipi;
1010 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
1018 arm_gic_alloc_msi(device_t dev, u_int mbi_start, u_int mbi_count, int count,
1019 int maxcount, struct intr_irqsrc **isrc)
1021 struct arm_gic_softc *sc;
1022 int i, irq, end_irq;
1025 KASSERT(powerof2(count), ("%s: bad count", __func__));
1026 KASSERT(powerof2(maxcount), ("%s: bad maxcount", __func__));
1028 sc = device_get_softc(dev);
1030 mtx_lock_spin(&sc->mutex);
1033 for (irq = mbi_start; irq < mbi_start + mbi_count; irq++) {
1034 /* Start on an aligned interrupt */
1035 if ((irq & (maxcount - 1)) != 0)
1038 /* Assume we found a valid range until shown otherwise */
1041 /* Check this range is valid */
1042 for (end_irq = irq; end_irq != irq + count; end_irq++) {
1043 /* No free interrupts */
1044 if (end_irq == mbi_start + mbi_count) {
1049 KASSERT((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI)!= 0,
1050 ("%s: Non-MSI interrupt found", __func__));
1052 /* This is already used */
1053 if ((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI_USED) ==
1063 /* Not enough interrupts were found */
1064 if (!found || irq == mbi_start + mbi_count) {
1065 mtx_unlock_spin(&sc->mutex);
1069 for (i = 0; i < count; i++) {
1070 /* Mark the interrupt as used */
1071 sc->gic_irqs[irq + i].gi_flags |= GI_FLAG_MSI_USED;
1073 mtx_unlock_spin(&sc->mutex);
1075 for (i = 0; i < count; i++)
1076 isrc[i] = (struct intr_irqsrc *)&sc->gic_irqs[irq + i];
1082 arm_gic_release_msi(device_t dev, int count, struct intr_irqsrc **isrc)
1084 struct arm_gic_softc *sc;
1085 struct gic_irqsrc *gi;
1088 sc = device_get_softc(dev);
1090 mtx_lock_spin(&sc->mutex);
1091 for (i = 0; i < count; i++) {
1092 gi = (struct gic_irqsrc *)isrc[i];
1094 KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1095 ("%s: Trying to release an unused MSI-X interrupt",
1098 gi->gi_flags &= ~GI_FLAG_MSI_USED;
1100 mtx_unlock_spin(&sc->mutex);
1106 arm_gic_alloc_msix(device_t dev, u_int mbi_start, u_int mbi_count,
1107 struct intr_irqsrc **isrc)
1109 struct arm_gic_softc *sc;
1112 sc = device_get_softc(dev);
1114 mtx_lock_spin(&sc->mutex);
1115 /* Find an unused interrupt */
1116 for (irq = mbi_start; irq < mbi_start + mbi_count; irq++) {
1117 KASSERT((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) != 0,
1118 ("%s: Non-MSI interrupt found", __func__));
1119 if ((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) == 0)
1122 /* No free interrupt was found */
1123 if (irq == mbi_start + mbi_count) {
1124 mtx_unlock_spin(&sc->mutex);
1128 /* Mark the interrupt as used */
1129 sc->gic_irqs[irq].gi_flags |= GI_FLAG_MSI_USED;
1130 mtx_unlock_spin(&sc->mutex);
1132 *isrc = (struct intr_irqsrc *)&sc->gic_irqs[irq];
1138 arm_gic_release_msix(device_t dev, struct intr_irqsrc *isrc)
1140 struct arm_gic_softc *sc;
1141 struct gic_irqsrc *gi;
1143 sc = device_get_softc(dev);
1144 gi = (struct gic_irqsrc *)isrc;
1146 KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1147 ("%s: Trying to release an unused MSI-X interrupt", __func__));
1149 mtx_lock_spin(&sc->mutex);
1150 gi->gi_flags &= ~GI_FLAG_MSI_USED;
1151 mtx_unlock_spin(&sc->mutex);
1158 arm_gic_db_show(device_t dev)
1160 struct arm_gic_softc *sc = device_get_softc(dev);
1164 db_printf("%s CPU registers:\n", device_get_nameunit(dev));
1165 db_printf(" CTLR: %08x PMR: %08x BPR: %08x RPR: %08x\n",
1166 gic_c_read_4(sc, GICC_CTLR), gic_c_read_4(sc, GICC_PMR),
1167 gic_c_read_4(sc, GICC_BPR), gic_c_read_4(sc, GICC_RPR));
1168 db_printf("HPPIR: %08x IIDR: %08x\n", gic_c_read_4(sc, GICC_HPPIR),
1169 gic_c_read_4(sc, GICC_IIDR));
1171 db_printf("%s Distributor registers:\n", device_get_nameunit(dev));
1172 db_printf(" CTLR: %08x TYPER: %08x IIDR: %08x\n",
1173 gic_d_read_4(sc, GICD_CTLR), gic_d_read_4(sc, GICD_TYPER),
1174 gic_d_read_4(sc, GICD_IIDR));
1175 for (i = 0; i < sc->nirqs; i++) {
1176 if (i <= GIC_LAST_SGI)
1177 db_printf("SGI %2u ", i);
1178 else if (i <= GIC_LAST_PPI)
1179 db_printf("PPI %2u ", i - GIC_FIRST_PPI);
1181 db_printf("SPI %2u ", i - GIC_FIRST_SPI);
1182 db_printf(" grp:%u",
1183 !!(gic_d_read_4(sc, GICD_IGROUPR(i)) & GICD_I_MASK(i)));
1184 db_printf(" enable:%u pend:%u active:%u",
1185 !!(gic_d_read_4(sc, GICD_ISENABLER(i)) & GICD_I_MASK(i)),
1186 !!(gic_d_read_4(sc, GICD_ISPENDR(i)) & GICD_I_MASK(i)),
1187 !!(gic_d_read_4(sc, GICD_ISACTIVER(i)) & GICD_I_MASK(i)));
1188 db_printf(" pri:%u",
1189 (gic_d_read_4(sc, GICD_IPRIORITYR(i)) >> 8 * (i & 0x3)) &
1191 db_printf(" trg:%u",
1192 (gic_d_read_4(sc, GICD_ITARGETSR(i)) >> 8 * (i & 0x3)) &
1194 val = gic_d_read_4(sc, GICD_ICFGR(i)) >> 2 * (i & 0xf);
1195 if ((val & GICD_ICFGR_POL_MASK) == GICD_ICFGR_POL_LOW)
1199 if ((val & GICD_ICFGR_TRIG_MASK) == GICD_ICFGR_TRIG_LVL)
1208 static device_method_t arm_gic_methods[] = {
1210 DEVMETHOD(bus_print_child, arm_gic_print_child),
1211 DEVMETHOD(bus_add_child, bus_generic_add_child),
1212 DEVMETHOD(bus_alloc_resource, arm_gic_alloc_resource),
1213 DEVMETHOD(bus_release_resource, bus_generic_release_resource),
1214 DEVMETHOD(bus_activate_resource,bus_generic_activate_resource),
1215 DEVMETHOD(bus_read_ivar, arm_gic_read_ivar),
1216 DEVMETHOD(bus_write_ivar, arm_gic_write_ivar),
1218 /* Interrupt controller interface */
1219 DEVMETHOD(pic_disable_intr, arm_gic_disable_intr),
1220 DEVMETHOD(pic_enable_intr, arm_gic_enable_intr),
1221 DEVMETHOD(pic_map_intr, arm_gic_map_intr),
1222 DEVMETHOD(pic_setup_intr, arm_gic_setup_intr),
1223 DEVMETHOD(pic_teardown_intr, arm_gic_teardown_intr),
1224 DEVMETHOD(pic_post_filter, arm_gic_post_filter),
1225 DEVMETHOD(pic_post_ithread, arm_gic_post_ithread),
1226 DEVMETHOD(pic_pre_ithread, arm_gic_pre_ithread),
1228 DEVMETHOD(pic_bind_intr, arm_gic_bind_intr),
1229 DEVMETHOD(pic_init_secondary, arm_gic_init_secondary),
1230 DEVMETHOD(pic_ipi_send, arm_gic_ipi_send),
1231 DEVMETHOD(pic_ipi_setup, arm_gic_ipi_setup),
1235 DEVMETHOD(gic_reserve_msi_range, arm_gic_reserve_msi_range),
1236 DEVMETHOD(gic_alloc_msi, arm_gic_alloc_msi),
1237 DEVMETHOD(gic_release_msi, arm_gic_release_msi),
1238 DEVMETHOD(gic_alloc_msix, arm_gic_alloc_msix),
1239 DEVMETHOD(gic_release_msix, arm_gic_release_msix),
1241 DEVMETHOD(gic_db_show, arm_gic_db_show),
1247 DEFINE_CLASS_0(gic, arm_gic_driver, arm_gic_methods,
1248 sizeof(struct arm_gic_softc));
1251 DB_SHOW_COMMAND_FLAGS(gic, db_show_gic, CS_OWN)
1258 t = db_read_token();
1260 dev = device_lookup_by_name(db_tok_string);
1265 db_printf("usage: show gic <name>\n");
1270 db_printf("device not found\n");
1277 DB_SHOW_ALL_COMMAND(gics, db_show_all_gics)
1283 dc = devclass_find("gic");
1287 for (i = 0; i < devclass_get_maxunit(dc); i++) {
1288 dev = devclass_get_device(dc, i);
1299 * GICv2m support -- the GICv2 MSI/MSI-X controller.
1302 #define GICV2M_MSI_TYPER 0x008
1303 #define MSI_TYPER_SPI_BASE(x) (((x) >> 16) & 0x3ff)
1304 #define MSI_TYPER_SPI_COUNT(x) (((x) >> 0) & 0x3ff)
1305 #define GICv2M_MSI_SETSPI_NS 0x040
1306 #define GICV2M_MSI_IIDR 0xFCC
1309 arm_gicv2m_attach(device_t dev)
1311 struct arm_gicv2m_softc *sc;
1315 sc = device_get_softc(dev);
1318 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1320 if (sc->sc_mem == NULL) {
1321 device_printf(dev, "Unable to allocate resources\n");
1325 typer = bus_read_4(sc->sc_mem, GICV2M_MSI_TYPER);
1326 sc->sc_spi_start = MSI_TYPER_SPI_BASE(typer);
1327 sc->sc_spi_count = MSI_TYPER_SPI_COUNT(typer);
1329 /* Reserve these interrupts for MSI/MSI-X use */
1330 GIC_RESERVE_MSI_RANGE(device_get_parent(dev), sc->sc_spi_start,
1333 intr_msi_register(dev, sc->sc_xref);
1336 device_printf(dev, "using spi %u to %u\n", sc->sc_spi_start,
1337 sc->sc_spi_start + sc->sc_spi_count - 1);
1343 arm_gicv2m_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1344 device_t *pic, struct intr_irqsrc **srcs)
1346 struct arm_gicv2m_softc *sc;
1349 sc = device_get_softc(dev);
1350 error = GIC_ALLOC_MSI(device_get_parent(dev), sc->sc_spi_start,
1351 sc->sc_spi_count, count, maxcount, srcs);
1360 arm_gicv2m_release_msi(device_t dev, device_t child, int count,
1361 struct intr_irqsrc **isrc)
1363 return (GIC_RELEASE_MSI(device_get_parent(dev), count, isrc));
1367 arm_gicv2m_alloc_msix(device_t dev, device_t child, device_t *pic,
1368 struct intr_irqsrc **isrcp)
1370 struct arm_gicv2m_softc *sc;
1373 sc = device_get_softc(dev);
1374 error = GIC_ALLOC_MSIX(device_get_parent(dev), sc->sc_spi_start,
1375 sc->sc_spi_count, isrcp);
1384 arm_gicv2m_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1386 return (GIC_RELEASE_MSIX(device_get_parent(dev), isrc));
1390 arm_gicv2m_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1391 uint64_t *addr, uint32_t *data)
1393 struct arm_gicv2m_softc *sc = device_get_softc(dev);
1394 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1396 *addr = vtophys(rman_get_virtual(sc->sc_mem)) + GICv2M_MSI_SETSPI_NS;
1402 static device_method_t arm_gicv2m_methods[] = {
1403 /* Device interface */
1404 DEVMETHOD(device_attach, arm_gicv2m_attach),
1407 DEVMETHOD(msi_alloc_msi, arm_gicv2m_alloc_msi),
1408 DEVMETHOD(msi_release_msi, arm_gicv2m_release_msi),
1409 DEVMETHOD(msi_alloc_msix, arm_gicv2m_alloc_msix),
1410 DEVMETHOD(msi_release_msix, arm_gicv2m_release_msix),
1411 DEVMETHOD(msi_map_msi, arm_gicv2m_map_msi),
1417 DEFINE_CLASS_0(gicv2m, arm_gicv2m_driver, arm_gicv2m_methods,
1418 sizeof(struct arm_gicv2m_softc));