2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2011 The FreeBSD Foundation
7 * Developed by Damjan Marion <damjan.marion@gmail.com>
9 * Based on OMAP4 GIC code by Ben Gray
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The name of the company nor the name of the author may be used to
20 * endorse or promote products derived from this software without specific
21 * prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
40 #include "opt_platform.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
45 #include <sys/kernel.h>
47 #include <sys/module.h>
48 #include <sys/malloc.h>
52 #include <sys/cpuset.h>
54 #include <sys/mutex.h>
56 #include <sys/sched.h>
61 #include <machine/bus.h>
62 #include <machine/intr.h>
63 #include <machine/smp.h>
66 #include <dev/fdt/fdt_intr.h>
67 #include <dev/ofw/ofw_bus_subr.h>
71 #include <contrib/dev/acpica/include/acpi.h>
72 #include <dev/acpica/acpivar.h>
75 #include <arm/arm/gic.h>
76 #include <arm/arm/gic_common.h>
81 /* We are using GICv2 register naming */
83 /* Distributor Registers */
86 #define GICC_CTLR 0x0000 /* v1 ICCICR */
87 #define GICC_PMR 0x0004 /* v1 ICCPMR */
88 #define GICC_BPR 0x0008 /* v1 ICCBPR */
89 #define GICC_IAR 0x000C /* v1 ICCIAR */
90 #define GICC_EOIR 0x0010 /* v1 ICCEOIR */
91 #define GICC_RPR 0x0014 /* v1 ICCRPR */
92 #define GICC_HPPIR 0x0018 /* v1 ICCHPIR */
93 #define GICC_ABPR 0x001C /* v1 ICCABPR */
94 #define GICC_IIDR 0x00FC /* v1 ICCIIDR*/
97 #define GICD_TYPER_SECURITYEXT 0x400
98 #define GIC_SUPPORT_SECEXT(_sc) \
99 ((_sc->typer & GICD_TYPER_SECURITYEXT) == GICD_TYPER_SECURITYEXT)
101 #ifndef GIC_DEFAULT_ICFGR_INIT
102 #define GIC_DEFAULT_ICFGR_INIT 0x00000000
106 struct intr_irqsrc gi_isrc;
108 enum intr_polarity gi_pol;
109 enum intr_trigger gi_trig;
110 #define GI_FLAG_EARLY_EOI (1 << 0)
111 #define GI_FLAG_MSI (1 << 1) /* This interrupt source should only */
112 /* be used for MSI/MSI-X interrupts */
113 #define GI_FLAG_MSI_USED (1 << 2) /* This irq is already allocated */
114 /* for a MSI/MSI-X interrupt */
118 static u_int gic_irq_cpu;
119 static int arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc);
122 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
123 static u_int sgi_first_unused = GIC_FIRST_SGI;
126 #define GIC_INTR_ISRC(sc, irq) (&sc->gic_irqs[irq].gi_isrc)
128 static struct resource_spec arm_gic_spec[] = {
129 { SYS_RES_MEMORY, 0, RF_ACTIVE }, /* Distributor registers */
130 { SYS_RES_MEMORY, 1, RF_ACTIVE }, /* CPU Interrupt Intf. registers */
131 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_OPTIONAL }, /* Parent interrupt */
135 #if defined(__arm__) && defined(INVARIANTS)
136 static int gic_debug_spurious = 1;
138 static int gic_debug_spurious = 0;
140 TUNABLE_INT("hw.gic.debug_spurious", &gic_debug_spurious);
142 static u_int arm_gic_map[MAXCPU];
144 static struct arm_gic_softc *gic_sc = NULL;
146 #define gic_c_read_4(_sc, _reg) \
147 bus_space_read_4((_sc)->gic_c_bst, (_sc)->gic_c_bsh, (_reg))
148 #define gic_c_write_4(_sc, _reg, _val) \
149 bus_space_write_4((_sc)->gic_c_bst, (_sc)->gic_c_bsh, (_reg), (_val))
150 #define gic_d_read_4(_sc, _reg) \
151 bus_space_read_4((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg))
152 #define gic_d_write_1(_sc, _reg, _val) \
153 bus_space_write_1((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg), (_val))
154 #define gic_d_write_4(_sc, _reg, _val) \
155 bus_space_write_4((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg), (_val))
158 gic_irq_unmask(struct arm_gic_softc *sc, u_int irq)
161 gic_d_write_4(sc, GICD_ISENABLER(irq), GICD_I_MASK(irq));
165 gic_irq_mask(struct arm_gic_softc *sc, u_int irq)
168 gic_d_write_4(sc, GICD_ICENABLER(irq), GICD_I_MASK(irq));
172 gic_cpu_mask(struct arm_gic_softc *sc)
177 /* Read the current cpuid mask by reading ITARGETSR{0..7} */
178 for (i = 0; i < 8; i++) {
179 mask = gic_d_read_4(sc, GICD_ITARGETSR(4 * i));
183 /* No mask found, assume we are on CPU interface 0 */
187 /* Collect the mask in the lower byte */
196 arm_gic_init_secondary(device_t dev)
198 struct arm_gic_softc *sc = device_get_softc(dev);
201 /* Set the mask so we can find this CPU to send it IPIs */
202 cpu = PCPU_GET(cpuid);
203 arm_gic_map[cpu] = gic_cpu_mask(sc);
205 for (irq = 0; irq < sc->nirqs; irq += 4)
206 gic_d_write_4(sc, GICD_IPRIORITYR(irq), 0);
208 /* Set all the interrupts to be in Group 0 (secure) */
209 for (irq = 0; GIC_SUPPORT_SECEXT(sc) && irq < sc->nirqs; irq += 32) {
210 gic_d_write_4(sc, GICD_IGROUPR(irq), 0);
213 /* Enable CPU interface */
214 gic_c_write_4(sc, GICC_CTLR, 1);
216 /* Set priority mask register. */
217 gic_c_write_4(sc, GICC_PMR, 0xff);
219 /* Enable interrupt distribution */
220 gic_d_write_4(sc, GICD_CTLR, 0x01);
222 /* Unmask attached SGI interrupts. */
223 for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++)
224 if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu))
225 gic_irq_unmask(sc, irq);
227 /* Unmask attached PPI interrupts. */
228 for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++)
229 if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu))
230 gic_irq_unmask(sc, irq);
235 arm_gic_register_isrcs(struct arm_gic_softc *sc, uint32_t num)
239 struct gic_irqsrc *irqs;
240 struct intr_irqsrc *isrc;
243 irqs = malloc(num * sizeof(struct gic_irqsrc), M_DEVBUF,
246 name = device_get_nameunit(sc->gic_dev);
247 for (irq = 0; irq < num; irq++) {
248 irqs[irq].gi_irq = irq;
249 irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
250 irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
252 isrc = &irqs[irq].gi_isrc;
253 if (irq <= GIC_LAST_SGI) {
254 error = intr_isrc_register(isrc, sc->gic_dev,
255 INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
256 } else if (irq <= GIC_LAST_PPI) {
257 error = intr_isrc_register(isrc, sc->gic_dev,
258 INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
260 error = intr_isrc_register(isrc, sc->gic_dev, 0,
261 "%s,s%u", name, irq - GIC_FIRST_SPI);
264 /* XXX call intr_isrc_deregister() */
265 free(irqs, M_DEVBUF);
275 arm_gic_reserve_msi_range(device_t dev, u_int start, u_int count)
277 struct arm_gic_softc *sc;
280 sc = device_get_softc(dev);
282 KASSERT((start + count) < sc->nirqs,
283 ("%s: Trying to allocate too many MSI IRQs: %d + %d > %d", __func__,
284 start, count, sc->nirqs));
285 for (i = 0; i < count; i++) {
286 KASSERT(sc->gic_irqs[start + i].gi_isrc.isrc_handlers == 0,
287 ("%s: MSI interrupt %d already has a handler", __func__,
289 KASSERT(sc->gic_irqs[start + i].gi_pol == INTR_POLARITY_CONFORM,
290 ("%s: MSI interrupt %d already has a polarity", __func__,
292 KASSERT(sc->gic_irqs[start + i].gi_trig == INTR_TRIGGER_CONFORM,
293 ("%s: MSI interrupt %d already has a trigger", __func__,
295 sc->gic_irqs[start + i].gi_pol = INTR_POLARITY_HIGH;
296 sc->gic_irqs[start + i].gi_trig = INTR_TRIGGER_EDGE;
297 sc->gic_irqs[start + i].gi_flags |= GI_FLAG_MSI;
302 arm_gic_attach(device_t dev)
304 struct arm_gic_softc *sc;
306 uint32_t icciidr, mask, nirqs;
311 sc = device_get_softc(dev);
313 if (bus_alloc_resources(dev, arm_gic_spec, sc->gic_res)) {
314 device_printf(dev, "could not allocate resources\n");
321 /* Initialize mutex */
322 mtx_init(&sc->mutex, "GIC lock", NULL, MTX_SPIN);
324 /* Distributor Interface */
325 sc->gic_d_bst = rman_get_bustag(sc->gic_res[0]);
326 sc->gic_d_bsh = rman_get_bushandle(sc->gic_res[0]);
329 sc->gic_c_bst = rman_get_bustag(sc->gic_res[1]);
330 sc->gic_c_bsh = rman_get_bushandle(sc->gic_res[1]);
332 /* Disable interrupt forwarding to the CPU interface */
333 gic_d_write_4(sc, GICD_CTLR, 0x00);
335 /* Get the number of interrupts */
336 sc->typer = gic_d_read_4(sc, GICD_TYPER);
337 nirqs = GICD_TYPER_I_NUM(sc->typer);
339 if (arm_gic_register_isrcs(sc, nirqs)) {
340 device_printf(dev, "could not register irqs\n");
344 icciidr = gic_c_read_4(sc, GICC_IIDR);
346 "pn 0x%x, arch 0x%x, rev 0x%x, implementer 0x%x irqs %u\n",
347 GICD_IIDR_PROD(icciidr), GICD_IIDR_VAR(icciidr),
348 GICD_IIDR_REV(icciidr), GICD_IIDR_IMPL(icciidr), sc->nirqs);
349 sc->gic_iidr = icciidr;
351 /* Set all global interrupts to be level triggered, active low. */
352 for (i = 32; i < sc->nirqs; i += 16) {
353 gic_d_write_4(sc, GICD_ICFGR(i), GIC_DEFAULT_ICFGR_INIT);
356 /* Disable all interrupts. */
357 for (i = 32; i < sc->nirqs; i += 32) {
358 gic_d_write_4(sc, GICD_ICENABLER(i), 0xFFFFFFFF);
361 /* Find the current cpu mask */
362 mask = gic_cpu_mask(sc);
363 /* Set the mask so we can find this CPU to send it IPIs */
364 arm_gic_map[PCPU_GET(cpuid)] = mask;
365 /* Set all four targets to this cpu */
369 for (i = 0; i < sc->nirqs; i += 4) {
370 gic_d_write_4(sc, GICD_IPRIORITYR(i), 0);
372 gic_d_write_4(sc, GICD_ITARGETSR(i), mask);
376 /* Set all the interrupts to be in Group 0 (secure) */
377 for (i = 0; GIC_SUPPORT_SECEXT(sc) && i < sc->nirqs; i += 32) {
378 gic_d_write_4(sc, GICD_IGROUPR(i), 0);
381 /* Enable CPU interface */
382 gic_c_write_4(sc, GICC_CTLR, 1);
384 /* Set priority mask register. */
385 gic_c_write_4(sc, GICC_PMR, 0xff);
387 /* Enable interrupt distribution */
388 gic_d_write_4(sc, GICD_CTLR, 0x01);
397 arm_gic_detach(device_t dev)
399 struct arm_gic_softc *sc;
401 sc = device_get_softc(dev);
403 if (sc->gic_irqs != NULL)
404 free(sc->gic_irqs, M_DEVBUF);
406 bus_release_resources(dev, arm_gic_spec, sc->gic_res);
412 arm_gic_print_child(device_t bus, device_t child)
414 struct resource_list *rl;
417 rv = bus_print_child_header(bus, child);
419 rl = BUS_GET_RESOURCE_LIST(bus, child);
421 rv += resource_list_print_type(rl, "mem", SYS_RES_MEMORY,
423 rv += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd");
426 rv += bus_print_child_footer(bus, child);
431 static struct resource *
432 arm_gic_alloc_resource(device_t bus, device_t child, int type, int *rid,
433 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
435 struct arm_gic_softc *sc;
436 struct resource_list_entry *rle;
437 struct resource_list *rl;
440 KASSERT(type == SYS_RES_MEMORY, ("Invalid resoure type %x", type));
442 sc = device_get_softc(bus);
445 * Request for the default allocation with a given rid: use resource
446 * list stored in the local device info.
448 if (RMAN_IS_DEFAULT_RANGE(start, end)) {
449 rl = BUS_GET_RESOURCE_LIST(bus, child);
451 if (type == SYS_RES_IOPORT)
452 type = SYS_RES_MEMORY;
454 rle = resource_list_find(rl, type, *rid);
457 device_printf(bus, "no default resources for "
458 "rid = %d, type = %d\n", *rid, type);
466 /* Remap through ranges property */
467 for (j = 0; j < sc->nranges; j++) {
468 if (start >= sc->ranges[j].bus && end <
469 sc->ranges[j].bus + sc->ranges[j].size) {
470 start -= sc->ranges[j].bus;
471 start += sc->ranges[j].host;
472 end -= sc->ranges[j].bus;
473 end += sc->ranges[j].host;
477 if (j == sc->nranges && sc->nranges != 0) {
479 device_printf(bus, "Could not map resource "
480 "%#jx-%#jx\n", (uintmax_t)start, (uintmax_t)end);
485 return (bus_generic_alloc_resource(bus, child, type, rid, start, end,
490 arm_gic_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
492 struct arm_gic_softc *sc;
494 sc = device_get_softc(dev);
497 case GIC_IVAR_HW_REV:
498 KASSERT(GICD_IIDR_VAR(sc->gic_iidr) < 3,
499 ("arm_gic_read_ivar: Unknown IIDR revision %u (%.08x)",
500 GICD_IIDR_VAR(sc->gic_iidr), sc->gic_iidr));
501 *result = GICD_IIDR_VAR(sc->gic_iidr);
504 KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN,
505 ("arm_gic_read_ivar: Unknown bus type"));
506 KASSERT(sc->gic_bus <= GIC_BUS_MAX,
507 ("arm_gic_read_ivar: Invalid bus type %u", sc->gic_bus));
508 *result = sc->gic_bus;
516 arm_gic_intr(void *arg)
518 struct arm_gic_softc *sc = arg;
519 struct gic_irqsrc *gi;
520 uint32_t irq_active_reg, irq;
521 struct trapframe *tf;
523 irq_active_reg = gic_c_read_4(sc, GICC_IAR);
524 irq = irq_active_reg & 0x3FF;
527 * 1. We do EOI here because recent read value from active interrupt
528 * register must be used for it. Another approach is to save this
529 * value into associated interrupt source.
530 * 2. EOI must be done on same CPU where interrupt has fired. Thus
531 * we must ensure that interrupted thread does not migrate to
533 * 3. EOI cannot be delayed by any preemption which could happen on
534 * critical_exit() used in MI intr code, when interrupt thread is
535 * scheduled. See next point.
536 * 4. IPI_RENDEZVOUS assumes that no preemption is permitted during
537 * an action and any use of critical_exit() could break this
538 * assumption. See comments within smp_rendezvous_action().
539 * 5. We always return FILTER_HANDLED as this is an interrupt
540 * controller dispatch function. Otherwise, in cascaded interrupt
541 * case, the whole interrupt subtree would be masked.
544 if (irq >= sc->nirqs) {
545 if (gic_debug_spurious)
546 device_printf(sc->gic_dev,
547 "Spurious interrupt detected: last irq: %d on CPU%d\n",
548 sc->last_irq[PCPU_GET(cpuid)], PCPU_GET(cpuid));
549 return (FILTER_HANDLED);
552 tf = curthread->td_intr_frame;
554 gi = sc->gic_irqs + irq;
556 * Note that GIC_FIRST_SGI is zero and is not used in 'if' statement
557 * as compiler complains that comparing u_int >= 0 is always true.
559 if (irq <= GIC_LAST_SGI) {
561 /* Call EOI for all IPI before dispatch. */
562 gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
563 intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf);
566 device_printf(sc->gic_dev, "SGI %u on UP system detected\n",
567 irq - GIC_FIRST_SGI);
568 gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
573 if (gic_debug_spurious)
574 sc->last_irq[PCPU_GET(cpuid)] = irq;
575 if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI)
576 gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
578 if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
579 gic_irq_mask(sc, irq);
580 if ((gi->gi_flags & GI_FLAG_EARLY_EOI) != GI_FLAG_EARLY_EOI)
581 gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
582 device_printf(sc->gic_dev, "Stray irq %u disabled\n", irq);
586 arm_irq_memory_barrier(irq);
587 irq_active_reg = gic_c_read_4(sc, GICC_IAR);
588 irq = irq_active_reg & 0x3FF;
592 return (FILTER_HANDLED);
596 gic_config(struct arm_gic_softc *sc, u_int irq, enum intr_trigger trig,
597 enum intr_polarity pol)
602 if (irq < GIC_FIRST_SPI)
605 mtx_lock_spin(&sc->mutex);
607 reg = gic_d_read_4(sc, GICD_ICFGR(irq));
608 mask = (reg >> 2*(irq % 16)) & 0x3;
610 if (pol == INTR_POLARITY_LOW) {
611 mask &= ~GICD_ICFGR_POL_MASK;
612 mask |= GICD_ICFGR_POL_LOW;
613 } else if (pol == INTR_POLARITY_HIGH) {
614 mask &= ~GICD_ICFGR_POL_MASK;
615 mask |= GICD_ICFGR_POL_HIGH;
618 if (trig == INTR_TRIGGER_LEVEL) {
619 mask &= ~GICD_ICFGR_TRIG_MASK;
620 mask |= GICD_ICFGR_TRIG_LVL;
621 } else if (trig == INTR_TRIGGER_EDGE) {
622 mask &= ~GICD_ICFGR_TRIG_MASK;
623 mask |= GICD_ICFGR_TRIG_EDGE;
627 reg = reg & ~(0x3 << 2*(irq % 16));
628 reg = reg | (mask << 2*(irq % 16));
629 gic_d_write_4(sc, GICD_ICFGR(irq), reg);
631 mtx_unlock_spin(&sc->mutex);
635 gic_bind(struct arm_gic_softc *sc, u_int irq, cpuset_t *cpus)
637 uint32_t cpu, end, mask;
639 end = min(mp_ncpus, 8);
640 for (cpu = end; cpu < MAXCPU; cpu++)
641 if (CPU_ISSET(cpu, cpus))
644 for (mask = 0, cpu = 0; cpu < end; cpu++)
645 if (CPU_ISSET(cpu, cpus))
646 mask |= arm_gic_map[cpu];
648 gic_d_write_1(sc, GICD_ITARGETSR(0) + irq, mask);
654 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
655 enum intr_polarity *polp, enum intr_trigger *trigp)
660 *polp = INTR_POLARITY_CONFORM;
661 *trigp = INTR_TRIGGER_CONFORM;
668 * The 1st cell is the interrupt type:
671 * The 2nd cell contains the interrupt number:
674 * The 3rd cell is the flags, encoded as follows:
675 * bits[3:0] trigger type and level flags
676 * 1 = low-to-high edge triggered
677 * 2 = high-to-low edge triggered
678 * 4 = active high level-sensitive
679 * 8 = active low level-sensitive
680 * bits[15:8] PPI interrupt cpu mask
681 * Each bit corresponds to each of the 8 possible cpus
682 * attached to the GIC. A bit set to '1' indicated
683 * the interrupt is wired to that CPU.
687 irq = GIC_FIRST_SPI + cells[1];
688 /* SPI irq is checked later. */
691 irq = GIC_FIRST_PPI + cells[1];
692 if (irq > GIC_LAST_PPI) {
693 device_printf(dev, "unsupported PPI interrupt "
694 "number %u\n", cells[1]);
699 device_printf(dev, "unsupported interrupt type "
700 "configuration %u\n", cells[0]);
704 tripol = cells[2] & 0xff;
705 if (tripol & 0xf0 || (tripol & FDT_INTR_LOW_MASK &&
707 device_printf(dev, "unsupported trigger/polarity "
708 "configuration 0x%02x\n", tripol);
711 *polp = INTR_POLARITY_CONFORM;
712 *trigp = tripol & FDT_INTR_EDGE_MASK ?
713 INTR_TRIGGER_EDGE : INTR_TRIGGER_LEVEL;
721 gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
722 enum intr_polarity *polp, enum intr_trigger *trigp)
724 struct gic_irqsrc *gi;
726 /* Map a non-GICv2m MSI */
727 gi = (struct gic_irqsrc *)msi_data->isrc;
733 /* MSI/MSI-X interrupts are always edge triggered with high polarity */
734 *polp = INTR_POLARITY_HIGH;
735 *trigp = INTR_TRIGGER_EDGE;
741 gic_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
742 enum intr_polarity *polp, enum intr_trigger *trigp)
745 enum intr_polarity pol;
746 enum intr_trigger trig;
747 struct arm_gic_softc *sc;
748 struct intr_map_data_msi *dam;
750 struct intr_map_data_fdt *daf;
753 struct intr_map_data_acpi *daa;
756 sc = device_get_softc(dev);
757 switch (data->type) {
759 case INTR_MAP_DATA_FDT:
760 daf = (struct intr_map_data_fdt *)data;
761 if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
764 KASSERT(irq >= sc->nirqs ||
765 (sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) == 0,
766 ("%s: Attempting to map a MSI interrupt from FDT",
771 case INTR_MAP_DATA_ACPI:
772 daa = (struct intr_map_data_acpi *)data;
778 case INTR_MAP_DATA_MSI:
780 dam = (struct intr_map_data_msi *)data;
781 if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
788 if (irq >= sc->nirqs)
790 if (pol != INTR_POLARITY_CONFORM && pol != INTR_POLARITY_LOW &&
791 pol != INTR_POLARITY_HIGH)
793 if (trig != INTR_TRIGGER_CONFORM && trig != INTR_TRIGGER_EDGE &&
794 trig != INTR_TRIGGER_LEVEL)
806 arm_gic_map_intr(device_t dev, struct intr_map_data *data,
807 struct intr_irqsrc **isrcp)
811 struct arm_gic_softc *sc;
813 error = gic_map_intr(dev, data, &irq, NULL, NULL);
815 sc = device_get_softc(dev);
816 *isrcp = GIC_INTR_ISRC(sc, irq);
822 arm_gic_setup_intr(device_t dev, struct intr_irqsrc *isrc,
823 struct resource *res, struct intr_map_data *data)
825 struct arm_gic_softc *sc = device_get_softc(dev);
826 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
827 enum intr_trigger trig;
828 enum intr_polarity pol;
830 if ((gi->gi_flags & GI_FLAG_MSI) == GI_FLAG_MSI) {
834 KASSERT(pol == INTR_POLARITY_HIGH,
835 ("%s: MSI interrupts must be active-high", __func__));
836 KASSERT(trig == INTR_TRIGGER_EDGE,
837 ("%s: MSI interrupts must be edge triggered", __func__));
838 } else if (data != NULL) {
841 /* Get config for resource. */
842 if (gic_map_intr(dev, data, &irq, &pol, &trig) ||
846 pol = INTR_POLARITY_CONFORM;
847 trig = INTR_TRIGGER_CONFORM;
850 /* Compare config if this is not first setup. */
851 if (isrc->isrc_handlers != 0) {
852 if ((pol != INTR_POLARITY_CONFORM && pol != gi->gi_pol) ||
853 (trig != INTR_TRIGGER_CONFORM && trig != gi->gi_trig))
859 /* For MSI/MSI-X we should have already configured these */
860 if ((gi->gi_flags & GI_FLAG_MSI) == 0) {
861 if (pol == INTR_POLARITY_CONFORM)
862 pol = INTR_POLARITY_LOW; /* just pick some */
863 if (trig == INTR_TRIGGER_CONFORM)
864 trig = INTR_TRIGGER_EDGE; /* just pick some */
869 /* Edge triggered interrupts need an early EOI sent */
870 if (gi->gi_trig == INTR_TRIGGER_EDGE)
871 gi->gi_flags |= GI_FLAG_EARLY_EOI;
875 * XXX - In case that per CPU interrupt is going to be enabled in time
876 * when SMP is already started, we need some IPI call which
877 * enables it on others CPUs. Further, it's more complicated as
878 * pic_enable_source() and pic_disable_source() should act on
879 * per CPU basis only. Thus, it should be solved here somehow.
881 if (isrc->isrc_flags & INTR_ISRCF_PPI)
882 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
884 gic_config(sc, gi->gi_irq, gi->gi_trig, gi->gi_pol);
885 arm_gic_bind_intr(dev, isrc);
890 arm_gic_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
891 struct resource *res, struct intr_map_data *data)
893 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
895 if (isrc->isrc_handlers == 0 && (gi->gi_flags & GI_FLAG_MSI) == 0) {
896 gi->gi_pol = INTR_POLARITY_CONFORM;
897 gi->gi_trig = INTR_TRIGGER_CONFORM;
903 arm_gic_enable_intr(device_t dev, struct intr_irqsrc *isrc)
905 struct arm_gic_softc *sc = device_get_softc(dev);
906 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
908 arm_irq_memory_barrier(gi->gi_irq);
909 gic_irq_unmask(sc, gi->gi_irq);
913 arm_gic_disable_intr(device_t dev, struct intr_irqsrc *isrc)
915 struct arm_gic_softc *sc = device_get_softc(dev);
916 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
918 gic_irq_mask(sc, gi->gi_irq);
922 arm_gic_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
924 struct arm_gic_softc *sc = device_get_softc(dev);
925 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
927 arm_gic_disable_intr(dev, isrc);
928 gic_c_write_4(sc, GICC_EOIR, gi->gi_irq);
932 arm_gic_post_ithread(device_t dev, struct intr_irqsrc *isrc)
935 arm_irq_memory_barrier(0);
936 arm_gic_enable_intr(dev, isrc);
940 arm_gic_post_filter(device_t dev, struct intr_irqsrc *isrc)
942 struct arm_gic_softc *sc = device_get_softc(dev);
943 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
945 /* EOI for edge-triggered done earlier. */
946 if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI)
949 arm_irq_memory_barrier(0);
950 gic_c_write_4(sc, GICC_EOIR, gi->gi_irq);
954 arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc)
956 struct arm_gic_softc *sc = device_get_softc(dev);
957 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
959 if (gi->gi_irq < GIC_FIRST_SPI)
962 if (CPU_EMPTY(&isrc->isrc_cpu)) {
963 gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
964 CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
966 return (gic_bind(sc, gi->gi_irq, &isrc->isrc_cpu));
971 arm_gic_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
974 struct arm_gic_softc *sc = device_get_softc(dev);
975 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
978 for (i = 0; i < MAXCPU; i++)
979 if (CPU_ISSET(i, &cpus))
980 val |= arm_gic_map[i] << GICD_SGI_TARGET_SHIFT;
982 gic_d_write_4(sc, GICD_SGIR, val | gi->gi_irq);
986 arm_gic_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
988 struct intr_irqsrc *isrc;
989 struct arm_gic_softc *sc = device_get_softc(dev);
991 if (sgi_first_unused > GIC_LAST_SGI)
994 isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
995 sgi_to_ipi[sgi_first_unused++] = ipi;
997 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
1004 static device_method_t arm_gic_methods[] = {
1006 DEVMETHOD(bus_print_child, arm_gic_print_child),
1007 DEVMETHOD(bus_add_child, bus_generic_add_child),
1008 DEVMETHOD(bus_alloc_resource, arm_gic_alloc_resource),
1009 DEVMETHOD(bus_release_resource, bus_generic_release_resource),
1010 DEVMETHOD(bus_activate_resource,bus_generic_activate_resource),
1011 DEVMETHOD(bus_read_ivar, arm_gic_read_ivar),
1013 /* Interrupt controller interface */
1014 DEVMETHOD(pic_disable_intr, arm_gic_disable_intr),
1015 DEVMETHOD(pic_enable_intr, arm_gic_enable_intr),
1016 DEVMETHOD(pic_map_intr, arm_gic_map_intr),
1017 DEVMETHOD(pic_setup_intr, arm_gic_setup_intr),
1018 DEVMETHOD(pic_teardown_intr, arm_gic_teardown_intr),
1019 DEVMETHOD(pic_post_filter, arm_gic_post_filter),
1020 DEVMETHOD(pic_post_ithread, arm_gic_post_ithread),
1021 DEVMETHOD(pic_pre_ithread, arm_gic_pre_ithread),
1023 DEVMETHOD(pic_bind_intr, arm_gic_bind_intr),
1024 DEVMETHOD(pic_init_secondary, arm_gic_init_secondary),
1025 DEVMETHOD(pic_ipi_send, arm_gic_ipi_send),
1026 DEVMETHOD(pic_ipi_setup, arm_gic_ipi_setup),
1031 DEFINE_CLASS_0(gic, arm_gic_driver, arm_gic_methods,
1032 sizeof(struct arm_gic_softc));
1035 * GICv2m support -- the GICv2 MSI/MSI-X controller.
1038 #define GICV2M_MSI_TYPER 0x008
1039 #define MSI_TYPER_SPI_BASE(x) (((x) >> 16) & 0x3ff)
1040 #define MSI_TYPER_SPI_COUNT(x) (((x) >> 0) & 0x3ff)
1041 #define GICv2M_MSI_SETSPI_NS 0x040
1042 #define GICV2M_MSI_IIDR 0xFCC
1045 arm_gicv2m_attach(device_t dev)
1047 struct arm_gicv2m_softc *sc;
1051 sc = device_get_softc(dev);
1054 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1056 if (sc->sc_mem == NULL) {
1057 device_printf(dev, "Unable to allocate resources\n");
1061 typer = bus_read_4(sc->sc_mem, GICV2M_MSI_TYPER);
1062 sc->sc_spi_start = MSI_TYPER_SPI_BASE(typer);
1063 sc->sc_spi_count = MSI_TYPER_SPI_COUNT(typer);
1064 sc->sc_spi_end = sc->sc_spi_start + sc->sc_spi_count;
1066 /* Reserve these interrupts for MSI/MSI-X use */
1067 arm_gic_reserve_msi_range(device_get_parent(dev), sc->sc_spi_start,
1070 mtx_init(&sc->sc_mutex, "GICv2m lock", NULL, MTX_DEF);
1072 intr_msi_register(dev, sc->sc_xref);
1075 device_printf(dev, "using spi %u to %u\n", sc->sc_spi_start,
1076 sc->sc_spi_start + sc->sc_spi_count - 1);
1082 arm_gicv2m_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1083 device_t *pic, struct intr_irqsrc **srcs)
1085 struct arm_gic_softc *psc;
1086 struct arm_gicv2m_softc *sc;
1087 int i, irq, end_irq;
1090 KASSERT(powerof2(count), ("%s: bad count", __func__));
1091 KASSERT(powerof2(maxcount), ("%s: bad maxcount", __func__));
1093 psc = device_get_softc(device_get_parent(dev));
1094 sc = device_get_softc(dev);
1096 mtx_lock(&sc->sc_mutex);
1099 for (irq = sc->sc_spi_start; irq < sc->sc_spi_end; irq++) {
1100 /* Start on an aligned interrupt */
1101 if ((irq & (maxcount - 1)) != 0)
1104 /* Assume we found a valid range until shown otherwise */
1107 /* Check this range is valid */
1108 for (end_irq = irq; end_irq != irq + count; end_irq++) {
1109 /* No free interrupts */
1110 if (end_irq == sc->sc_spi_end) {
1115 KASSERT((psc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI)!= 0,
1116 ("%s: Non-MSI interrupt found", __func__));
1118 /* This is already used */
1119 if ((psc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI_USED) ==
1129 /* Not enough interrupts were found */
1130 if (!found || irq == sc->sc_spi_end) {
1131 mtx_unlock(&sc->sc_mutex);
1135 for (i = 0; i < count; i++) {
1136 /* Mark the interrupt as used */
1137 psc->gic_irqs[irq + i].gi_flags |= GI_FLAG_MSI_USED;
1139 mtx_unlock(&sc->sc_mutex);
1141 for (i = 0; i < count; i++)
1142 srcs[i] = (struct intr_irqsrc *)&psc->gic_irqs[irq + i];
1143 *pic = device_get_parent(dev);
1149 arm_gicv2m_release_msi(device_t dev, device_t child, int count,
1150 struct intr_irqsrc **isrc)
1152 struct arm_gicv2m_softc *sc;
1153 struct gic_irqsrc *gi;
1156 sc = device_get_softc(dev);
1158 mtx_lock(&sc->sc_mutex);
1159 for (i = 0; i < count; i++) {
1160 gi = (struct gic_irqsrc *)isrc[i];
1162 KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1163 ("%s: Trying to release an unused MSI-X interrupt",
1166 gi->gi_flags &= ~GI_FLAG_MSI_USED;
1168 mtx_unlock(&sc->sc_mutex);
1174 arm_gicv2m_alloc_msix(device_t dev, device_t child, device_t *pic,
1175 struct intr_irqsrc **isrcp)
1177 struct arm_gicv2m_softc *sc;
1178 struct arm_gic_softc *psc;
1181 psc = device_get_softc(device_get_parent(dev));
1182 sc = device_get_softc(dev);
1184 mtx_lock(&sc->sc_mutex);
1185 /* Find an unused interrupt */
1186 for (irq = sc->sc_spi_start; irq < sc->sc_spi_end; irq++) {
1187 KASSERT((psc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) != 0,
1188 ("%s: Non-MSI interrupt found", __func__));
1189 if ((psc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) == 0)
1192 /* No free interrupt was found */
1193 if (irq == sc->sc_spi_end) {
1194 mtx_unlock(&sc->sc_mutex);
1198 /* Mark the interrupt as used */
1199 psc->gic_irqs[irq].gi_flags |= GI_FLAG_MSI_USED;
1200 mtx_unlock(&sc->sc_mutex);
1202 *isrcp = (struct intr_irqsrc *)&psc->gic_irqs[irq];
1203 *pic = device_get_parent(dev);
1209 arm_gicv2m_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1211 struct arm_gicv2m_softc *sc;
1212 struct gic_irqsrc *gi;
1214 sc = device_get_softc(dev);
1215 gi = (struct gic_irqsrc *)isrc;
1217 KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1218 ("%s: Trying to release an unused MSI-X interrupt", __func__));
1220 mtx_lock(&sc->sc_mutex);
1221 gi->gi_flags &= ~GI_FLAG_MSI_USED;
1222 mtx_unlock(&sc->sc_mutex);
1228 arm_gicv2m_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1229 uint64_t *addr, uint32_t *data)
1231 struct arm_gicv2m_softc *sc = device_get_softc(dev);
1232 struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1234 *addr = vtophys(rman_get_virtual(sc->sc_mem)) + GICv2M_MSI_SETSPI_NS;
1240 static device_method_t arm_gicv2m_methods[] = {
1241 /* Device interface */
1242 DEVMETHOD(device_attach, arm_gicv2m_attach),
1245 DEVMETHOD(msi_alloc_msi, arm_gicv2m_alloc_msi),
1246 DEVMETHOD(msi_release_msi, arm_gicv2m_release_msi),
1247 DEVMETHOD(msi_alloc_msix, arm_gicv2m_alloc_msix),
1248 DEVMETHOD(msi_release_msix, arm_gicv2m_release_msix),
1249 DEVMETHOD(msi_map_msi, arm_gicv2m_map_msi),
1255 DEFINE_CLASS_0(gicv2m, arm_gicv2m_driver, arm_gicv2m_methods,
1256 sizeof(struct arm_gicv2m_softc));