2 * Copyright (c) 2015-2016 The FreeBSD Foundation
4 * This software was developed by Andrew Turner under
5 * the sponsorship of the FreeBSD Foundation.
7 * This software was developed by Semihalf under
8 * the sponsorship of the FreeBSD Foundation.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include "opt_platform.h"
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bitstring.h>
42 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/module.h>
49 #include <sys/cpuset.h>
51 #include <sys/mutex.h>
53 #include <sys/interrupt.h>
58 #include <machine/bus.h>
59 #include <machine/cpu.h>
60 #include <machine/intr.h>
63 #include <dev/fdt/fdt_intr.h>
64 #include <dev/ofw/ofw_bus_subr.h>
68 #include <contrib/dev/acpica/include/acpi.h>
69 #include <dev/acpica/acpivar.h>
75 #include <arm/arm/gic_common.h>
76 #include "gic_v3_reg.h"
77 #include "gic_v3_var.h"
79 static bus_get_domain_t gic_v3_get_domain;
80 static bus_read_ivar_t gic_v3_read_ivar;
82 static pic_disable_intr_t gic_v3_disable_intr;
83 static pic_enable_intr_t gic_v3_enable_intr;
84 static pic_map_intr_t gic_v3_map_intr;
85 static pic_setup_intr_t gic_v3_setup_intr;
86 static pic_teardown_intr_t gic_v3_teardown_intr;
87 static pic_post_filter_t gic_v3_post_filter;
88 static pic_post_ithread_t gic_v3_post_ithread;
89 static pic_pre_ithread_t gic_v3_pre_ithread;
90 static pic_bind_intr_t gic_v3_bind_intr;
92 static pic_init_secondary_t gic_v3_init_secondary;
93 static pic_ipi_send_t gic_v3_ipi_send;
94 static pic_ipi_setup_t gic_v3_ipi_setup;
97 static msi_alloc_msi_t gic_v3_alloc_msi;
98 static msi_release_msi_t gic_v3_release_msi;
99 static msi_alloc_msix_t gic_v3_alloc_msix;
100 static msi_release_msix_t gic_v3_release_msix;
101 static msi_map_msi_t gic_v3_map_msi;
103 static u_int gic_irq_cpu;
105 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
106 static u_int sgi_first_unused = GIC_FIRST_SGI;
109 static device_method_t gic_v3_methods[] = {
110 /* Device interface */
111 DEVMETHOD(device_detach, gic_v3_detach),
114 DEVMETHOD(bus_get_domain, gic_v3_get_domain),
115 DEVMETHOD(bus_read_ivar, gic_v3_read_ivar),
117 /* Interrupt controller interface */
118 DEVMETHOD(pic_disable_intr, gic_v3_disable_intr),
119 DEVMETHOD(pic_enable_intr, gic_v3_enable_intr),
120 DEVMETHOD(pic_map_intr, gic_v3_map_intr),
121 DEVMETHOD(pic_setup_intr, gic_v3_setup_intr),
122 DEVMETHOD(pic_teardown_intr, gic_v3_teardown_intr),
123 DEVMETHOD(pic_post_filter, gic_v3_post_filter),
124 DEVMETHOD(pic_post_ithread, gic_v3_post_ithread),
125 DEVMETHOD(pic_pre_ithread, gic_v3_pre_ithread),
127 DEVMETHOD(pic_bind_intr, gic_v3_bind_intr),
128 DEVMETHOD(pic_init_secondary, gic_v3_init_secondary),
129 DEVMETHOD(pic_ipi_send, gic_v3_ipi_send),
130 DEVMETHOD(pic_ipi_setup, gic_v3_ipi_setup),
134 DEVMETHOD(msi_alloc_msi, gic_v3_alloc_msi),
135 DEVMETHOD(msi_release_msi, gic_v3_release_msi),
136 DEVMETHOD(msi_alloc_msix, gic_v3_alloc_msix),
137 DEVMETHOD(msi_release_msix, gic_v3_release_msix),
138 DEVMETHOD(msi_map_msi, gic_v3_map_msi),
144 DEFINE_CLASS_0(gic, gic_v3_driver, gic_v3_methods,
145 sizeof(struct gic_v3_softc));
148 * Driver-specific definitions.
150 MALLOC_DEFINE(M_GIC_V3, "GICv3", GIC_V3_DEVSTR);
153 * Helper functions and definitions.
155 /* Destination registers, either Distributor or Re-Distributor */
161 struct gic_v3_irqsrc {
162 struct intr_irqsrc gi_isrc;
164 enum intr_polarity gi_pol;
165 enum intr_trigger gi_trig;
166 #define GI_FLAG_MSI (1 << 1) /* This interrupt source should only */
167 /* be used for MSI/MSI-X interrupts */
168 #define GI_FLAG_MSI_USED (1 << 2) /* This irq is already allocated */
169 /* for a MSI/MSI-X interrupt */
173 /* Helper routines starting with gic_v3_ */
174 static int gic_v3_dist_init(struct gic_v3_softc *);
175 static int gic_v3_redist_alloc(struct gic_v3_softc *);
176 static int gic_v3_redist_find(struct gic_v3_softc *);
177 static int gic_v3_redist_init(struct gic_v3_softc *);
178 static int gic_v3_cpu_init(struct gic_v3_softc *);
179 static void gic_v3_wait_for_rwp(struct gic_v3_softc *, enum gic_v3_xdist);
181 /* A sequence of init functions for primary (boot) CPU */
182 typedef int (*gic_v3_initseq_t) (struct gic_v3_softc *);
183 /* Primary CPU initialization sequence */
184 static gic_v3_initseq_t gic_v3_primary_init[] = {
193 /* Secondary CPU initialization sequence */
194 static gic_v3_initseq_t gic_v3_secondary_init[] = {
202 gic_r_read_4(device_t dev, bus_size_t offset)
204 struct gic_v3_softc *sc;
205 struct resource *rdist;
207 sc = device_get_softc(dev);
208 rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
209 return (bus_read_4(rdist, offset));
213 gic_r_read_8(device_t dev, bus_size_t offset)
215 struct gic_v3_softc *sc;
216 struct resource *rdist;
218 sc = device_get_softc(dev);
219 rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
220 return (bus_read_8(rdist, offset));
224 gic_r_write_4(device_t dev, bus_size_t offset, uint32_t val)
226 struct gic_v3_softc *sc;
227 struct resource *rdist;
229 sc = device_get_softc(dev);
230 rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
231 bus_write_4(rdist, offset, val);
235 gic_r_write_8(device_t dev, bus_size_t offset, uint64_t val)
237 struct gic_v3_softc *sc;
238 struct resource *rdist;
240 sc = device_get_softc(dev);
241 rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
242 bus_write_8(rdist, offset, val);
249 gic_v3_attach(device_t dev)
251 struct gic_v3_softc *sc;
252 gic_v3_initseq_t *init_func;
260 sc = device_get_softc(dev);
261 sc->gic_registered = FALSE;
265 /* Initialize mutex */
266 mtx_init(&sc->gic_mtx, "GICv3 lock", NULL, MTX_SPIN);
269 * Allocate array of struct resource.
270 * One entry for Distributor and all remaining for Re-Distributor.
272 sc->gic_res = malloc(
273 sizeof(*sc->gic_res) * (sc->gic_redists.nregions + 1),
276 /* Now allocate corresponding resources */
277 for (i = 0, rid = 0; i < (sc->gic_redists.nregions + 1); i++, rid++) {
278 sc->gic_res[rid] = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
280 if (sc->gic_res[rid] == NULL)
285 * Distributor interface
287 sc->gic_dist = sc->gic_res[0];
290 * Re-Dristributor interface
292 /* Allocate space under region descriptions */
293 sc->gic_redists.regions = malloc(
294 sizeof(*sc->gic_redists.regions) * sc->gic_redists.nregions,
297 /* Fill-up bus_space information for each region. */
298 for (i = 0, rid = 1; i < sc->gic_redists.nregions; i++, rid++)
299 sc->gic_redists.regions[i] = sc->gic_res[rid];
301 /* Get the number of supported SPI interrupts */
302 typer = gic_d_read(sc, 4, GICD_TYPER);
303 sc->gic_nirqs = GICD_TYPER_I_NUM(typer);
304 if (sc->gic_nirqs > GIC_I_NUM_MAX)
305 sc->gic_nirqs = GIC_I_NUM_MAX;
307 sc->gic_irqs = malloc(sizeof(*sc->gic_irqs) * sc->gic_nirqs,
308 M_GIC_V3, M_WAITOK | M_ZERO);
309 name = device_get_nameunit(dev);
310 for (irq = 0; irq < sc->gic_nirqs; irq++) {
311 struct intr_irqsrc *isrc;
313 sc->gic_irqs[irq].gi_irq = irq;
314 sc->gic_irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
315 sc->gic_irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
317 isrc = &sc->gic_irqs[irq].gi_isrc;
318 if (irq <= GIC_LAST_SGI) {
319 err = intr_isrc_register(isrc, sc->dev,
320 INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
321 } else if (irq <= GIC_LAST_PPI) {
322 err = intr_isrc_register(isrc, sc->dev,
323 INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
325 err = intr_isrc_register(isrc, sc->dev, 0,
326 "%s,s%u", name, irq - GIC_FIRST_SPI);
329 /* XXX call intr_isrc_deregister() */
330 free(sc->gic_irqs, M_DEVBUF);
335 if (sc->gic_mbi_start > 0) {
336 /* Reserve these interrupts for MSI/MSI-X use */
337 for (irq = sc->gic_mbi_start; irq <= sc->gic_mbi_end; irq++) {
338 sc->gic_irqs[irq].gi_pol = INTR_POLARITY_HIGH;
339 sc->gic_irqs[irq].gi_trig = INTR_TRIGGER_EDGE;
340 sc->gic_irqs[irq].gi_flags |= GI_FLAG_MSI;
343 mtx_init(&sc->gic_mbi_mtx, "GICv3 mbi lock", NULL, MTX_DEF);
346 device_printf(dev, "using spi %u to %u\n", sc->gic_mbi_start,
352 * Read the Peripheral ID2 register. This is an implementation
353 * defined register, but seems to be implemented in all GICv3
354 * parts and Linux expects it to be there.
356 sc->gic_pidr2 = gic_d_read(sc, 4, GICD_PIDR2);
358 /* Get the number of supported interrupt identifier bits */
359 sc->gic_idbits = GICD_TYPER_IDBITS(typer);
362 device_printf(dev, "SPIs: %u, IDs: %u\n",
363 sc->gic_nirqs, (1 << sc->gic_idbits) - 1);
366 /* Train init sequence for boot CPU */
367 for (init_func = gic_v3_primary_init; *init_func != NULL; init_func++) {
368 err = (*init_func)(sc);
377 gic_v3_detach(device_t dev)
379 struct gic_v3_softc *sc;
383 sc = device_get_softc(dev);
385 if (device_is_attached(dev)) {
387 * XXX: We should probably deregister PIC
389 if (sc->gic_registered)
390 panic("Trying to detach registered PIC");
392 for (rid = 0; rid < (sc->gic_redists.nregions + 1); rid++)
393 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->gic_res[rid]);
395 for (i = 0; i <= mp_maxid; i++)
396 free(sc->gic_redists.pcpu[i], M_GIC_V3);
398 free(sc->gic_res, M_GIC_V3);
399 free(sc->gic_redists.regions, M_GIC_V3);
405 gic_v3_get_domain(device_t dev, device_t child, int *domain)
407 struct gic_v3_devinfo *di;
409 di = device_get_ivars(child);
410 if (di->gic_domain < 0)
413 *domain = di->gic_domain;
418 gic_v3_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
420 struct gic_v3_softc *sc;
422 sc = device_get_softc(dev);
425 case GICV3_IVAR_NIRQS:
426 *result = (intr_nirq - sc->gic_nirqs) / sc->gic_nchildren;
428 case GICV3_IVAR_REDIST:
429 *result = (uintptr_t)sc->gic_redists.pcpu[PCPU_GET(cpuid)];
431 case GIC_IVAR_HW_REV:
433 GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv3 ||
434 GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv4,
435 ("gic_v3_read_ivar: Invalid GIC architecture: %d (%.08X)",
436 GICR_PIDR2_ARCH(sc->gic_pidr2), sc->gic_pidr2));
437 *result = GICR_PIDR2_ARCH(sc->gic_pidr2);
440 KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN,
441 ("gic_v3_read_ivar: Unknown bus type"));
442 KASSERT(sc->gic_bus <= GIC_BUS_MAX,
443 ("gic_v3_read_ivar: Invalid bus type %u", sc->gic_bus));
444 *result = sc->gic_bus;
452 arm_gic_v3_intr(void *arg)
454 struct gic_v3_softc *sc = arg;
455 struct gic_v3_irqsrc *gi;
456 struct intr_pic *pic;
458 struct trapframe *tf;
463 if (CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1) {
465 * Hardware: Cavium ThunderX
466 * Chip revision: Pass 1.0 (early version)
467 * Pass 1.1 (production)
468 * ERRATUM: 22978, 23154
471 "nop;nop;nop;nop;nop;nop;nop;nop; \n"
472 "mrs %0, ICC_IAR1_EL1 \n"
473 "nop;nop;nop;nop; \n"
475 : "=&r" (active_irq));
477 active_irq = gic_icc_read(IAR1);
480 if (active_irq >= GIC_FIRST_LPI) {
481 intr_child_irq_handler(pic, active_irq);
485 if (__predict_false(active_irq >= sc->gic_nirqs))
486 return (FILTER_HANDLED);
488 tf = curthread->td_intr_frame;
489 gi = &sc->gic_irqs[active_irq];
490 if (active_irq <= GIC_LAST_SGI) {
491 /* Call EOI for all IPI before dispatch. */
492 gic_icc_write(EOIR1, (uint64_t)active_irq);
494 intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf);
496 device_printf(sc->dev, "SGI %ju on UP system detected\n",
497 (uintmax_t)(active_irq - GIC_FIRST_SGI));
499 } else if (active_irq >= GIC_FIRST_PPI &&
500 active_irq <= GIC_LAST_SPI) {
501 if (gi->gi_trig == INTR_TRIGGER_EDGE)
502 gic_icc_write(EOIR1, gi->gi_irq);
504 if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
505 if (gi->gi_trig != INTR_TRIGGER_EDGE)
506 gic_icc_write(EOIR1, gi->gi_irq);
507 gic_v3_disable_intr(sc->dev, &gi->gi_isrc);
508 device_printf(sc->dev,
509 "Stray irq %lu disabled\n", active_irq);
517 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
518 enum intr_polarity *polp, enum intr_trigger *trigp)
526 * The 1st cell is the interrupt type:
529 * The 2nd cell contains the interrupt number:
532 * The 3rd cell is the flags, encoded as follows:
533 * bits[3:0] trigger type and level flags
535 * 2 = edge triggered (PPI only)
536 * 4 = level-sensitive
537 * 8 = level-sensitive (PPI only)
541 irq = GIC_FIRST_SPI + cells[1];
542 /* SPI irq is checked later. */
545 irq = GIC_FIRST_PPI + cells[1];
546 if (irq > GIC_LAST_PPI) {
547 device_printf(dev, "unsupported PPI interrupt "
548 "number %u\n", cells[1]);
553 device_printf(dev, "unsupported interrupt type "
554 "configuration %u\n", cells[0]);
558 switch (cells[2] & FDT_INTR_MASK) {
559 case FDT_INTR_EDGE_RISING:
560 *trigp = INTR_TRIGGER_EDGE;
561 *polp = INTR_POLARITY_HIGH;
563 case FDT_INTR_EDGE_FALLING:
564 *trigp = INTR_TRIGGER_EDGE;
565 *polp = INTR_POLARITY_LOW;
567 case FDT_INTR_LEVEL_HIGH:
568 *trigp = INTR_TRIGGER_LEVEL;
569 *polp = INTR_POLARITY_HIGH;
571 case FDT_INTR_LEVEL_LOW:
572 *trigp = INTR_TRIGGER_LEVEL;
573 *polp = INTR_POLARITY_LOW;
576 device_printf(dev, "unsupported trigger/polarity "
577 "configuration 0x%02x\n", cells[2]);
581 /* Check the interrupt is valid */
582 if (irq >= GIC_FIRST_SPI && *polp != INTR_POLARITY_HIGH)
591 gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
592 enum intr_polarity *polp, enum intr_trigger *trigp)
594 struct gic_v3_irqsrc *gi;
597 gi = (struct gic_v3_irqsrc *)msi_data->isrc;
603 /* MSI/MSI-X interrupts are always edge triggered with high polarity */
604 *polp = INTR_POLARITY_HIGH;
605 *trigp = INTR_TRIGGER_EDGE;
611 do_gic_v3_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
612 enum intr_polarity *polp, enum intr_trigger *trigp)
614 struct gic_v3_softc *sc;
615 enum intr_polarity pol;
616 enum intr_trigger trig;
617 struct intr_map_data_msi *dam;
619 struct intr_map_data_fdt *daf;
622 struct intr_map_data_acpi *daa;
626 sc = device_get_softc(dev);
628 switch (data->type) {
630 case INTR_MAP_DATA_FDT:
631 daf = (struct intr_map_data_fdt *)data;
632 if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
638 case INTR_MAP_DATA_ACPI:
639 daa = (struct intr_map_data_acpi *)data;
645 case INTR_MAP_DATA_MSI:
647 dam = (struct intr_map_data_msi *)data;
648 if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
655 if (irq >= sc->gic_nirqs)
658 case INTR_POLARITY_CONFORM:
659 case INTR_POLARITY_LOW:
660 case INTR_POLARITY_HIGH:
666 case INTR_TRIGGER_CONFORM:
667 case INTR_TRIGGER_EDGE:
668 case INTR_TRIGGER_LEVEL:
683 gic_v3_map_intr(device_t dev, struct intr_map_data *data,
684 struct intr_irqsrc **isrcp)
686 struct gic_v3_softc *sc;
690 error = do_gic_v3_map_intr(dev, data, &irq, NULL, NULL);
692 sc = device_get_softc(dev);
693 *isrcp = GIC_INTR_ISRC(sc, irq);
699 gic_v3_setup_intr(device_t dev, struct intr_irqsrc *isrc,
700 struct resource *res, struct intr_map_data *data)
702 struct gic_v3_softc *sc = device_get_softc(dev);
703 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
704 enum intr_trigger trig;
705 enum intr_polarity pol;
713 error = do_gic_v3_map_intr(dev, data, &irq, &pol, &trig);
717 if (gi->gi_irq != irq || pol == INTR_POLARITY_CONFORM ||
718 trig == INTR_TRIGGER_CONFORM)
721 /* Compare config if this is not first setup. */
722 if (isrc->isrc_handlers != 0) {
723 if (pol != gi->gi_pol || trig != gi->gi_trig)
729 /* For MSI/MSI-X we should have already configured these */
730 if ((gi->gi_flags & GI_FLAG_MSI) == 0) {
736 * XXX - In case that per CPU interrupt is going to be enabled in time
737 * when SMP is already started, we need some IPI call which
738 * enables it on others CPUs. Further, it's more complicated as
739 * pic_enable_source() and pic_disable_source() should act on
740 * per CPU basis only. Thus, it should be solved here somehow.
742 if (isrc->isrc_flags & INTR_ISRCF_PPI)
743 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
745 if (irq >= GIC_FIRST_PPI && irq <= GIC_LAST_SPI) {
746 mtx_lock_spin(&sc->gic_mtx);
748 /* Set the trigger and polarity */
749 if (irq <= GIC_LAST_PPI)
750 reg = gic_r_read(sc, 4,
751 GICR_SGI_BASE_SIZE + GICD_ICFGR(irq));
753 reg = gic_d_read(sc, 4, GICD_ICFGR(irq));
754 if (trig == INTR_TRIGGER_LEVEL)
755 reg &= ~(2 << ((irq % 16) * 2));
757 reg |= 2 << ((irq % 16) * 2);
759 if (irq <= GIC_LAST_PPI) {
761 GICR_SGI_BASE_SIZE + GICD_ICFGR(irq), reg);
762 gic_v3_wait_for_rwp(sc, REDIST);
764 gic_d_write(sc, 4, GICD_ICFGR(irq), reg);
765 gic_v3_wait_for_rwp(sc, DIST);
768 mtx_unlock_spin(&sc->gic_mtx);
770 gic_v3_bind_intr(dev, isrc);
777 gic_v3_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
778 struct resource *res, struct intr_map_data *data)
780 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
782 if (isrc->isrc_handlers == 0 && (gi->gi_flags & GI_FLAG_MSI) == 0) {
783 gi->gi_pol = INTR_POLARITY_CONFORM;
784 gi->gi_trig = INTR_TRIGGER_CONFORM;
791 gic_v3_disable_intr(device_t dev, struct intr_irqsrc *isrc)
793 struct gic_v3_softc *sc;
794 struct gic_v3_irqsrc *gi;
797 sc = device_get_softc(dev);
798 gi = (struct gic_v3_irqsrc *)isrc;
801 if (irq <= GIC_LAST_PPI) {
802 /* SGIs and PPIs in corresponding Re-Distributor */
803 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq),
805 gic_v3_wait_for_rwp(sc, REDIST);
806 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
807 /* SPIs in distributor */
808 gic_d_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq));
809 gic_v3_wait_for_rwp(sc, DIST);
811 panic("%s: Unsupported IRQ %u", __func__, irq);
815 gic_v3_enable_intr(device_t dev, struct intr_irqsrc *isrc)
817 struct gic_v3_softc *sc;
818 struct gic_v3_irqsrc *gi;
821 sc = device_get_softc(dev);
822 gi = (struct gic_v3_irqsrc *)isrc;
825 if (irq <= GIC_LAST_PPI) {
826 /* SGIs and PPIs in corresponding Re-Distributor */
827 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq),
829 gic_v3_wait_for_rwp(sc, REDIST);
830 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
831 /* SPIs in distributor */
832 gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq));
833 gic_v3_wait_for_rwp(sc, DIST);
835 panic("%s: Unsupported IRQ %u", __func__, irq);
839 gic_v3_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
841 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
843 gic_v3_disable_intr(dev, isrc);
844 gic_icc_write(EOIR1, gi->gi_irq);
848 gic_v3_post_ithread(device_t dev, struct intr_irqsrc *isrc)
851 gic_v3_enable_intr(dev, isrc);
855 gic_v3_post_filter(device_t dev, struct intr_irqsrc *isrc)
857 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
859 if (gi->gi_trig == INTR_TRIGGER_EDGE)
862 gic_icc_write(EOIR1, gi->gi_irq);
866 gic_v3_bind_intr(device_t dev, struct intr_irqsrc *isrc)
868 struct gic_v3_softc *sc;
869 struct gic_v3_irqsrc *gi;
872 gi = (struct gic_v3_irqsrc *)isrc;
873 if (gi->gi_irq <= GIC_LAST_PPI)
876 KASSERT(gi->gi_irq >= GIC_FIRST_SPI && gi->gi_irq <= GIC_LAST_SPI,
877 ("%s: Attempting to bind an invalid IRQ", __func__));
879 sc = device_get_softc(dev);
881 if (CPU_EMPTY(&isrc->isrc_cpu)) {
882 gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
883 CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
884 gic_d_write(sc, 8, GICD_IROUTER(gi->gi_irq),
885 CPU_AFFINITY(gic_irq_cpu));
888 * We can only bind to a single CPU so select
889 * the first CPU found.
891 cpu = CPU_FFS(&isrc->isrc_cpu) - 1;
892 gic_d_write(sc, 8, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(cpu));
900 gic_v3_init_secondary(device_t dev)
903 struct gic_v3_softc *sc;
904 gic_v3_initseq_t *init_func;
905 struct intr_irqsrc *isrc;
909 sc = device_get_softc(dev);
910 cpu = PCPU_GET(cpuid);
912 /* Train init sequence for boot CPU */
913 for (init_func = gic_v3_secondary_init; *init_func != NULL;
915 err = (*init_func)(sc);
918 "Could not initialize GIC for CPU%u\n", cpu);
923 /* Unmask attached SGI interrupts. */
924 for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) {
925 isrc = GIC_INTR_ISRC(sc, irq);
926 if (intr_isrc_init_on_cpu(isrc, cpu))
927 gic_v3_enable_intr(dev, isrc);
930 /* Unmask attached PPI interrupts. */
931 for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) {
932 isrc = GIC_INTR_ISRC(sc, irq);
933 if (intr_isrc_init_on_cpu(isrc, cpu))
934 gic_v3_enable_intr(dev, isrc);
937 for (i = 0; i < sc->gic_nchildren; i++) {
938 child = sc->gic_children[i];
939 PIC_INIT_SECONDARY(child);
944 gic_v3_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
947 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
948 uint64_t aff, val, irq;
951 #define GIC_AFF_MASK (CPU_AFF3_MASK | CPU_AFF2_MASK | CPU_AFF1_MASK)
952 #define GIC_AFFINITY(i) (CPU_AFFINITY(i) & GIC_AFF_MASK)
953 aff = GIC_AFFINITY(0);
957 /* Iterate through all CPUs in set */
958 for (i = 0; i <= mp_maxid; i++) {
959 /* Move to the next affinity group */
960 if (aff != GIC_AFFINITY(i)) {
963 gic_icc_write(SGI1R, val);
966 aff = GIC_AFFINITY(i);
969 /* Send the IPI to this cpu */
970 if (CPU_ISSET(i, &cpus)) {
971 #define ICC_SGI1R_AFFINITY(aff) \
972 (((uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT) | \
973 ((uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT) | \
974 ((uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT))
975 /* Set the affinity when the first at this level */
977 val = ICC_SGI1R_AFFINITY(aff) |
978 irq << ICC_SGI1R_EL1_SGIID_SHIFT;
979 /* Set the bit to send the IPI to te CPU */
980 val |= 1 << CPU_AFF0(CPU_AFFINITY(i));
984 /* Send the IPI to the last cpu affinity group */
986 gic_icc_write(SGI1R, val);
992 gic_v3_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
994 struct intr_irqsrc *isrc;
995 struct gic_v3_softc *sc = device_get_softc(dev);
997 if (sgi_first_unused > GIC_LAST_SGI)
1000 isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
1001 sgi_to_ipi[sgi_first_unused++] = ipi;
1003 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
1014 gic_v3_wait_for_rwp(struct gic_v3_softc *sc, enum gic_v3_xdist xdist)
1016 struct resource *res;
1018 size_t us_left = 1000000;
1020 cpuid = PCPU_GET(cpuid);
1027 res = &sc->gic_redists.pcpu[cpuid]->res;
1030 KASSERT(0, ("%s: Attempt to wait for unknown RWP", __func__));
1034 while ((bus_read_4(res, GICD_CTLR) & GICD_CTLR_RWP) != 0) {
1037 panic("GICD Register write pending for too long");
1041 /* CPU interface. */
1042 static __inline void
1043 gic_v3_cpu_priority(uint64_t mask)
1046 /* Set prority mask */
1047 gic_icc_write(PMR, mask & ICC_PMR_EL1_PRIO_MASK);
1051 gic_v3_cpu_enable_sre(struct gic_v3_softc *sc)
1056 cpuid = PCPU_GET(cpuid);
1058 * Set the SRE bit to enable access to GIC CPU interface
1059 * via system registers.
1061 sre = READ_SPECIALREG(icc_sre_el1);
1062 sre |= ICC_SRE_EL1_SRE;
1063 WRITE_SPECIALREG(icc_sre_el1, sre);
1066 * Now ensure that the bit is set.
1068 sre = READ_SPECIALREG(icc_sre_el1);
1069 if ((sre & ICC_SRE_EL1_SRE) == 0) {
1070 /* We are done. This was disabled in EL2 */
1071 device_printf(sc->dev, "ERROR: CPU%u cannot enable CPU interface "
1072 "via system registers\n", cpuid);
1074 } else if (bootverbose) {
1075 device_printf(sc->dev,
1076 "CPU%u enabled CPU interface via system registers\n",
1084 gic_v3_cpu_init(struct gic_v3_softc *sc)
1088 /* Enable access to CPU interface via system registers */
1089 err = gic_v3_cpu_enable_sre(sc);
1092 /* Priority mask to minimum - accept all interrupts */
1093 gic_v3_cpu_priority(GIC_PRIORITY_MIN);
1094 /* Disable EOI mode */
1095 gic_icc_clear(CTLR, ICC_CTLR_EL1_EOIMODE);
1096 /* Enable group 1 (insecure) interrups */
1097 gic_icc_set(IGRPEN1, ICC_IGRPEN0_EL1_EN);
1104 gic_v3_dist_init(struct gic_v3_softc *sc)
1110 * 1. Disable the Distributor
1112 gic_d_write(sc, 4, GICD_CTLR, 0);
1113 gic_v3_wait_for_rwp(sc, DIST);
1116 * 2. Configure the Distributor
1118 /* Set all SPIs to be Group 1 Non-secure */
1119 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_IGROUPRn)
1120 gic_d_write(sc, 4, GICD_IGROUPR(i), 0xFFFFFFFF);
1122 /* Set all global interrupts to be level triggered, active low. */
1123 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ICFGRn)
1124 gic_d_write(sc, 4, GICD_ICFGR(i), 0x00000000);
1126 /* Set priority to all shared interrupts */
1127 for (i = GIC_FIRST_SPI;
1128 i < sc->gic_nirqs; i += GICD_I_PER_IPRIORITYn) {
1129 /* Set highest priority */
1130 gic_d_write(sc, 4, GICD_IPRIORITYR(i), GIC_PRIORITY_MAX);
1134 * Disable all interrupts. Leave PPI and SGIs as they are enabled in
1135 * Re-Distributor registers.
1137 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ISENABLERn)
1138 gic_d_write(sc, 4, GICD_ICENABLER(i), 0xFFFFFFFF);
1140 gic_v3_wait_for_rwp(sc, DIST);
1143 * 3. Enable Distributor
1145 /* Enable Distributor with ARE, Group 1 */
1146 gic_d_write(sc, 4, GICD_CTLR, GICD_CTLR_ARE_NS | GICD_CTLR_G1A |
1150 * 4. Route all interrupts to boot CPU.
1152 aff = CPU_AFFINITY(0);
1153 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i++)
1154 gic_d_write(sc, 8, GICD_IROUTER(i), aff);
1159 /* Re-Distributor */
1161 gic_v3_redist_alloc(struct gic_v3_softc *sc)
1165 /* Allocate struct resource for all CPU's Re-Distributor registers */
1166 for (cpuid = 0; cpuid <= mp_maxid; cpuid++)
1167 if (CPU_ISSET(cpuid, &all_cpus) != 0)
1168 sc->gic_redists.pcpu[cpuid] =
1169 malloc(sizeof(*sc->gic_redists.pcpu[0]),
1170 M_GIC_V3, M_WAITOK);
1172 sc->gic_redists.pcpu[cpuid] = NULL;
1177 gic_v3_redist_find(struct gic_v3_softc *sc)
1179 struct resource r_res;
1180 bus_space_handle_t r_bsh;
1187 cpuid = PCPU_GET(cpuid);
1189 aff = CPU_AFFINITY(cpuid);
1190 /* Affinity in format for comparison with typer */
1191 aff = (CPU_AFF3(aff) << 24) | (CPU_AFF2(aff) << 16) |
1192 (CPU_AFF1(aff) << 8) | CPU_AFF0(aff);
1195 device_printf(sc->dev,
1196 "Start searching for Re-Distributor\n");
1198 /* Iterate through Re-Distributor regions */
1199 for (i = 0; i < sc->gic_redists.nregions; i++) {
1200 /* Take a copy of the region's resource */
1201 r_res = *sc->gic_redists.regions[i];
1202 r_bsh = rman_get_bushandle(&r_res);
1204 pidr2 = bus_read_4(&r_res, GICR_PIDR2);
1205 switch (GICR_PIDR2_ARCH(pidr2)) {
1206 case GICR_PIDR2_ARCH_GICv3: /* fall through */
1207 case GICR_PIDR2_ARCH_GICv4:
1210 device_printf(sc->dev,
1211 "No Re-Distributor found for CPU%u\n", cpuid);
1216 typer = bus_read_8(&r_res, GICR_TYPER);
1217 if ((typer >> GICR_TYPER_AFF_SHIFT) == aff) {
1218 KASSERT(sc->gic_redists.pcpu[cpuid] != NULL,
1219 ("Invalid pointer to per-CPU redistributor"));
1220 /* Copy res contents to its final destination */
1221 sc->gic_redists.pcpu[cpuid]->res = r_res;
1222 sc->gic_redists.pcpu[cpuid]->lpi_enabled = false;
1224 device_printf(sc->dev,
1225 "CPU%u Re-Distributor has been found\n",
1231 r_bsh += (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1232 if ((typer & GICR_TYPER_VLPIS) != 0) {
1234 (GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE);
1237 rman_set_bushandle(&r_res, r_bsh);
1238 } while ((typer & GICR_TYPER_LAST) == 0);
1241 device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid);
1246 gic_v3_redist_wake(struct gic_v3_softc *sc)
1249 size_t us_left = 1000000;
1251 waker = gic_r_read(sc, 4, GICR_WAKER);
1252 /* Wake up Re-Distributor for this CPU */
1253 waker &= ~GICR_WAKER_PS;
1254 gic_r_write(sc, 4, GICR_WAKER, waker);
1256 * When clearing ProcessorSleep bit it is required to wait for
1257 * ChildrenAsleep to become zero following the processor power-on.
1259 while ((gic_r_read(sc, 4, GICR_WAKER) & GICR_WAKER_CA) != 0) {
1261 if (us_left-- == 0) {
1262 panic("Could not wake Re-Distributor for CPU%u",
1268 device_printf(sc->dev, "CPU%u Re-Distributor woke up\n",
1276 gic_v3_redist_init(struct gic_v3_softc *sc)
1281 err = gic_v3_redist_find(sc);
1285 err = gic_v3_redist_wake(sc);
1289 /* Configure SGIs and PPIs to be Group1 Non-secure */
1290 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_IGROUPR0,
1294 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ICENABLER0,
1295 GICR_I_ENABLER_PPI_MASK);
1297 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ISENABLER0,
1298 GICR_I_ENABLER_SGI_MASK);
1300 /* Set priority for SGIs and PPIs */
1301 for (i = 0; i <= GIC_LAST_PPI; i += GICR_I_PER_IPRIORITYn) {
1302 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_IPRIORITYR(i),
1306 gic_v3_wait_for_rwp(sc, REDIST);
1312 * SPI-mapped Message Based Interrupts -- a GICv3 MSI/MSI-X controller.
1316 gic_v3_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1317 device_t *pic, struct intr_irqsrc **srcs)
1319 struct gic_v3_softc *sc;
1320 int i, irq, end_irq;
1323 KASSERT(powerof2(count), ("%s: bad count", __func__));
1324 KASSERT(powerof2(maxcount), ("%s: bad maxcount", __func__));
1326 sc = device_get_softc(dev);
1328 mtx_lock(&sc->gic_mbi_mtx);
1331 for (irq = sc->gic_mbi_start; irq < sc->gic_mbi_end; irq++) {
1332 /* Start on an aligned interrupt */
1333 if ((irq & (maxcount - 1)) != 0)
1336 /* Assume we found a valid range until shown otherwise */
1339 /* Check this range is valid */
1340 for (end_irq = irq; end_irq != irq + count; end_irq++) {
1341 /* No free interrupts */
1342 if (end_irq == sc->gic_mbi_end) {
1347 KASSERT((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI)!= 0,
1348 ("%s: Non-MSI interrupt found", __func__));
1350 /* This is already used */
1351 if ((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI_USED) ==
1361 /* Not enough interrupts were found */
1362 if (!found || irq == sc->gic_mbi_end) {
1363 mtx_unlock(&sc->gic_mbi_mtx);
1367 for (i = 0; i < count; i++) {
1368 /* Mark the interrupt as used */
1369 sc->gic_irqs[irq + i].gi_flags |= GI_FLAG_MSI_USED;
1371 mtx_unlock(&sc->gic_mbi_mtx);
1373 for (i = 0; i < count; i++)
1374 srcs[i] = (struct intr_irqsrc *)&sc->gic_irqs[irq + i];
1381 gic_v3_release_msi(device_t dev, device_t child, int count,
1382 struct intr_irqsrc **isrc)
1384 struct gic_v3_softc *sc;
1385 struct gic_v3_irqsrc *gi;
1388 sc = device_get_softc(dev);
1390 mtx_lock(&sc->gic_mbi_mtx);
1391 for (i = 0; i < count; i++) {
1392 gi = (struct gic_v3_irqsrc *)isrc[i];
1394 KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1395 ("%s: Trying to release an unused MSI-X interrupt",
1398 gi->gi_flags &= ~GI_FLAG_MSI_USED;
1400 mtx_unlock(&sc->gic_mbi_mtx);
1406 gic_v3_alloc_msix(device_t dev, device_t child, device_t *pic,
1407 struct intr_irqsrc **isrcp)
1409 struct gic_v3_softc *sc;
1412 sc = device_get_softc(dev);
1414 mtx_lock(&sc->gic_mbi_mtx);
1415 /* Find an unused interrupt */
1416 for (irq = sc->gic_mbi_start; irq < sc->gic_mbi_end; irq++) {
1417 KASSERT((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) != 0,
1418 ("%s: Non-MSI interrupt found", __func__));
1419 if ((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) == 0)
1422 /* No free interrupt was found */
1423 if (irq == sc->gic_mbi_end) {
1424 mtx_unlock(&sc->gic_mbi_mtx);
1428 /* Mark the interrupt as used */
1429 sc->gic_irqs[irq].gi_flags |= GI_FLAG_MSI_USED;
1430 mtx_unlock(&sc->gic_mbi_mtx);
1432 *isrcp = (struct intr_irqsrc *)&sc->gic_irqs[irq];
1439 gic_v3_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1441 struct gic_v3_softc *sc;
1442 struct gic_v3_irqsrc *gi;
1444 sc = device_get_softc(dev);
1445 gi = (struct gic_v3_irqsrc *)isrc;
1447 KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1448 ("%s: Trying to release an unused MSI-X interrupt", __func__));
1450 mtx_lock(&sc->gic_mbi_mtx);
1451 gi->gi_flags &= ~GI_FLAG_MSI_USED;
1452 mtx_unlock(&sc->gic_mbi_mtx);
1458 gic_v3_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1459 uint64_t *addr, uint32_t *data)
1461 struct gic_v3_softc *sc = device_get_softc(dev);
1462 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
1464 *addr = vtophys(rman_get_virtual(sc->gic_dist)) + GICD_SETSPI_NSR;