2 * Copyright (c) 2015-2016 The FreeBSD Foundation
5 * This software was developed by Andrew Turner under
6 * the sponsorship of the FreeBSD Foundation.
8 * This software was developed by Semihalf under
9 * the sponsorship of the FreeBSD Foundation.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include "opt_platform.h"
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bitstring.h>
42 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/module.h>
49 #include <sys/cpuset.h>
51 #include <sys/mutex.h>
57 #include <machine/bus.h>
58 #include <machine/cpu.h>
59 #include <machine/intr.h>
62 #include <dev/fdt/fdt_intr.h>
63 #include <dev/ofw/ofw_bus_subr.h>
68 #include <arm/arm/gic_common.h>
69 #include "gic_v3_reg.h"
70 #include "gic_v3_var.h"
72 static bus_get_domain_t gic_v3_get_domain;
73 static bus_read_ivar_t gic_v3_read_ivar;
75 static pic_disable_intr_t gic_v3_disable_intr;
76 static pic_enable_intr_t gic_v3_enable_intr;
77 static pic_map_intr_t gic_v3_map_intr;
78 static pic_setup_intr_t gic_v3_setup_intr;
79 static pic_teardown_intr_t gic_v3_teardown_intr;
80 static pic_post_filter_t gic_v3_post_filter;
81 static pic_post_ithread_t gic_v3_post_ithread;
82 static pic_pre_ithread_t gic_v3_pre_ithread;
83 static pic_bind_intr_t gic_v3_bind_intr;
85 static pic_init_secondary_t gic_v3_init_secondary;
86 static pic_ipi_send_t gic_v3_ipi_send;
87 static pic_ipi_setup_t gic_v3_ipi_setup;
90 static u_int gic_irq_cpu;
92 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
93 static u_int sgi_first_unused = GIC_FIRST_SGI;
96 static device_method_t gic_v3_methods[] = {
97 /* Device interface */
98 DEVMETHOD(device_detach, gic_v3_detach),
101 DEVMETHOD(bus_get_domain, gic_v3_get_domain),
102 DEVMETHOD(bus_read_ivar, gic_v3_read_ivar),
104 /* Interrupt controller interface */
105 DEVMETHOD(pic_disable_intr, gic_v3_disable_intr),
106 DEVMETHOD(pic_enable_intr, gic_v3_enable_intr),
107 DEVMETHOD(pic_map_intr, gic_v3_map_intr),
108 DEVMETHOD(pic_setup_intr, gic_v3_setup_intr),
109 DEVMETHOD(pic_teardown_intr, gic_v3_teardown_intr),
110 DEVMETHOD(pic_post_filter, gic_v3_post_filter),
111 DEVMETHOD(pic_post_ithread, gic_v3_post_ithread),
112 DEVMETHOD(pic_pre_ithread, gic_v3_pre_ithread),
114 DEVMETHOD(pic_bind_intr, gic_v3_bind_intr),
115 DEVMETHOD(pic_init_secondary, gic_v3_init_secondary),
116 DEVMETHOD(pic_ipi_send, gic_v3_ipi_send),
117 DEVMETHOD(pic_ipi_setup, gic_v3_ipi_setup),
124 DEFINE_CLASS_0(gic, gic_v3_driver, gic_v3_methods,
125 sizeof(struct gic_v3_softc));
128 * Driver-specific definitions.
130 MALLOC_DEFINE(M_GIC_V3, "GICv3", GIC_V3_DEVSTR);
133 * Helper functions and definitions.
135 /* Destination registers, either Distributor or Re-Distributor */
141 struct gic_v3_irqsrc {
142 struct intr_irqsrc gi_isrc;
144 enum intr_polarity gi_pol;
145 enum intr_trigger gi_trig;
148 /* Helper routines starting with gic_v3_ */
149 static int gic_v3_dist_init(struct gic_v3_softc *);
150 static int gic_v3_redist_alloc(struct gic_v3_softc *);
151 static int gic_v3_redist_find(struct gic_v3_softc *);
152 static int gic_v3_redist_init(struct gic_v3_softc *);
153 static int gic_v3_cpu_init(struct gic_v3_softc *);
154 static void gic_v3_wait_for_rwp(struct gic_v3_softc *, enum gic_v3_xdist);
156 /* A sequence of init functions for primary (boot) CPU */
157 typedef int (*gic_v3_initseq_t) (struct gic_v3_softc *);
158 /* Primary CPU initialization sequence */
159 static gic_v3_initseq_t gic_v3_primary_init[] = {
168 /* Secondary CPU initialization sequence */
169 static gic_v3_initseq_t gic_v3_secondary_init[] = {
177 gic_r_read_4(device_t dev, bus_size_t offset)
179 struct gic_v3_softc *sc;
181 sc = device_get_softc(dev);
182 return (bus_read_4(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset));
186 gic_r_read_8(device_t dev, bus_size_t offset)
188 struct gic_v3_softc *sc;
190 sc = device_get_softc(dev);
191 return (bus_read_8(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset));
195 gic_r_write_4(device_t dev, bus_size_t offset, uint32_t val)
197 struct gic_v3_softc *sc;
199 sc = device_get_softc(dev);
200 bus_write_4(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset, val);
204 gic_r_write_8(device_t dev, bus_size_t offset, uint64_t val)
206 struct gic_v3_softc *sc;
208 sc = device_get_softc(dev);
209 bus_write_8(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset, val);
216 gic_v3_attach(device_t dev)
218 struct gic_v3_softc *sc;
219 gic_v3_initseq_t *init_func;
227 sc = device_get_softc(dev);
228 sc->gic_registered = FALSE;
232 /* Initialize mutex */
233 mtx_init(&sc->gic_mtx, "GICv3 lock", NULL, MTX_SPIN);
236 * Allocate array of struct resource.
237 * One entry for Distributor and all remaining for Re-Distributor.
239 sc->gic_res = malloc(
240 sizeof(*sc->gic_res) * (sc->gic_redists.nregions + 1),
243 /* Now allocate corresponding resources */
244 for (i = 0, rid = 0; i < (sc->gic_redists.nregions + 1); i++, rid++) {
245 sc->gic_res[rid] = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
247 if (sc->gic_res[rid] == NULL)
252 * Distributor interface
254 sc->gic_dist = sc->gic_res[0];
257 * Re-Dristributor interface
259 /* Allocate space under region descriptions */
260 sc->gic_redists.regions = malloc(
261 sizeof(*sc->gic_redists.regions) * sc->gic_redists.nregions,
264 /* Fill-up bus_space information for each region. */
265 for (i = 0, rid = 1; i < sc->gic_redists.nregions; i++, rid++)
266 sc->gic_redists.regions[i] = sc->gic_res[rid];
268 /* Get the number of supported SPI interrupts */
269 typer = gic_d_read(sc, 4, GICD_TYPER);
270 sc->gic_nirqs = GICD_TYPER_I_NUM(typer);
271 if (sc->gic_nirqs > GIC_I_NUM_MAX)
272 sc->gic_nirqs = GIC_I_NUM_MAX;
274 sc->gic_irqs = malloc(sizeof(*sc->gic_irqs) * sc->gic_nirqs,
275 M_GIC_V3, M_WAITOK | M_ZERO);
276 name = device_get_nameunit(dev);
277 for (irq = 0; irq < sc->gic_nirqs; irq++) {
278 struct intr_irqsrc *isrc;
280 sc->gic_irqs[irq].gi_irq = irq;
281 sc->gic_irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
282 sc->gic_irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
284 isrc = &sc->gic_irqs[irq].gi_isrc;
285 if (irq <= GIC_LAST_SGI) {
286 err = intr_isrc_register(isrc, sc->dev,
287 INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
288 } else if (irq <= GIC_LAST_PPI) {
289 err = intr_isrc_register(isrc, sc->dev,
290 INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
292 err = intr_isrc_register(isrc, sc->dev, 0,
293 "%s,s%u", name, irq - GIC_FIRST_SPI);
296 /* XXX call intr_isrc_deregister() */
297 free(sc->gic_irqs, M_DEVBUF);
303 * Read the Peripheral ID2 register. This is an implementation
304 * defined register, but seems to be implemented in all GICv3
305 * parts and Linux expects it to be there.
307 sc->gic_pidr2 = gic_d_read(sc, 4, GICD_PIDR2);
309 /* Get the number of supported interrupt identifier bits */
310 sc->gic_idbits = GICD_TYPER_IDBITS(typer);
313 device_printf(dev, "SPIs: %u, IDs: %u\n",
314 sc->gic_nirqs, (1 << sc->gic_idbits) - 1);
317 /* Train init sequence for boot CPU */
318 for (init_func = gic_v3_primary_init; *init_func != NULL; init_func++) {
319 err = (*init_func)(sc);
328 gic_v3_detach(device_t dev)
330 struct gic_v3_softc *sc;
334 sc = device_get_softc(dev);
336 if (device_is_attached(dev)) {
338 * XXX: We should probably deregister PIC
340 if (sc->gic_registered)
341 panic("Trying to detach registered PIC");
343 for (rid = 0; rid < (sc->gic_redists.nregions + 1); rid++)
344 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->gic_res[rid]);
346 for (i = 0; i <= mp_maxid; i++)
347 free(sc->gic_redists.pcpu[i], M_GIC_V3);
349 free(sc->gic_res, M_GIC_V3);
350 free(sc->gic_redists.regions, M_GIC_V3);
356 gic_v3_get_domain(device_t dev, device_t child, int *domain)
358 struct gic_v3_devinfo *di;
360 di = device_get_ivars(child);
361 if (di->gic_domain < 0)
364 *domain = di->gic_domain;
369 gic_v3_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
371 struct gic_v3_softc *sc;
373 sc = device_get_softc(dev);
376 case GICV3_IVAR_NIRQS:
377 *result = (NIRQ - sc->gic_nirqs) / sc->gic_nchildren;
379 case GICV3_IVAR_REDIST_VADDR:
380 *result = (uintptr_t)rman_get_virtual(
381 sc->gic_redists.pcpu[PCPU_GET(cpuid)]);
383 case GIC_IVAR_HW_REV:
385 GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv3 ||
386 GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv4,
387 ("gic_v3_read_ivar: Invalid GIC architecture: %d (%.08X)",
388 GICR_PIDR2_ARCH(sc->gic_pidr2), sc->gic_pidr2));
389 *result = GICR_PIDR2_ARCH(sc->gic_pidr2);
392 KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN,
393 ("gic_v3_read_ivar: Unknown bus type"));
394 KASSERT(sc->gic_bus <= GIC_BUS_MAX,
395 ("gic_v3_read_ivar: Invalid bus type %u", sc->gic_bus));
396 *result = sc->gic_bus;
404 arm_gic_v3_intr(void *arg)
406 struct gic_v3_softc *sc = arg;
407 struct gic_v3_irqsrc *gi;
408 struct intr_pic *pic;
410 struct trapframe *tf;
417 if (CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1) {
419 * Hardware: Cavium ThunderX
420 * Chip revision: Pass 1.0 (early version)
421 * Pass 1.1 (production)
422 * ERRATUM: 22978, 23154
425 "nop;nop;nop;nop;nop;nop;nop;nop; \n"
426 "mrs %0, ICC_IAR1_EL1 \n"
427 "nop;nop;nop;nop; \n"
429 : "=&r" (active_irq));
431 active_irq = gic_icc_read(IAR1);
434 if (active_irq >= GIC_FIRST_LPI) {
435 intr_child_irq_handler(pic, active_irq);
439 if (__predict_false(active_irq >= sc->gic_nirqs))
440 return (FILTER_HANDLED);
442 tf = curthread->td_intr_frame;
443 gi = &sc->gic_irqs[active_irq];
444 if (active_irq <= GIC_LAST_SGI) {
445 /* Call EOI for all IPI before dispatch. */
446 gic_icc_write(EOIR1, (uint64_t)active_irq);
448 intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf);
450 device_printf(sc->dev, "SGI %ju on UP system detected\n",
451 (uintmax_t)(active_irq - GIC_FIRST_SGI));
453 } else if (active_irq >= GIC_FIRST_PPI &&
454 active_irq <= GIC_LAST_SPI) {
455 if (gi->gi_trig == INTR_TRIGGER_EDGE)
456 gic_icc_write(EOIR1, gi->gi_irq);
458 if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
459 if (gi->gi_trig != INTR_TRIGGER_EDGE)
460 gic_icc_write(EOIR1, gi->gi_irq);
461 gic_v3_disable_intr(sc->dev, &gi->gi_isrc);
462 device_printf(sc->dev,
463 "Stray irq %lu disabled\n", active_irq);
471 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
472 enum intr_polarity *polp, enum intr_trigger *trigp)
480 * The 1st cell is the interrupt type:
483 * The 2nd cell contains the interrupt number:
486 * The 3rd cell is the flags, encoded as follows:
487 * bits[3:0] trigger type and level flags
489 * 2 = edge triggered (PPI only)
490 * 4 = level-sensitive
491 * 8 = level-sensitive (PPI only)
495 irq = GIC_FIRST_SPI + cells[1];
496 /* SPI irq is checked later. */
499 irq = GIC_FIRST_PPI + cells[1];
500 if (irq > GIC_LAST_PPI) {
501 device_printf(dev, "unsupported PPI interrupt "
502 "number %u\n", cells[1]);
507 device_printf(dev, "unsupported interrupt type "
508 "configuration %u\n", cells[0]);
512 switch (cells[2] & FDT_INTR_MASK) {
513 case FDT_INTR_EDGE_RISING:
514 *trigp = INTR_TRIGGER_EDGE;
515 *polp = INTR_POLARITY_HIGH;
517 case FDT_INTR_EDGE_FALLING:
518 *trigp = INTR_TRIGGER_EDGE;
519 *polp = INTR_POLARITY_LOW;
521 case FDT_INTR_LEVEL_HIGH:
522 *trigp = INTR_TRIGGER_LEVEL;
523 *polp = INTR_POLARITY_HIGH;
525 case FDT_INTR_LEVEL_LOW:
526 *trigp = INTR_TRIGGER_LEVEL;
527 *polp = INTR_POLARITY_LOW;
530 device_printf(dev, "unsupported trigger/polarity "
531 "configuration 0x%02x\n", cells[2]);
535 /* Check the interrupt is valid */
536 if (irq >= GIC_FIRST_SPI && *polp != INTR_POLARITY_HIGH)
545 gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
546 enum intr_polarity *polp, enum intr_trigger *trigp)
548 struct gic_v3_irqsrc *gi;
551 gi = (struct gic_v3_irqsrc *)msi_data->isrc;
557 /* MSI/MSI-X interrupts are always edge triggered with high polarity */
558 *polp = INTR_POLARITY_HIGH;
559 *trigp = INTR_TRIGGER_EDGE;
565 do_gic_v3_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
566 enum intr_polarity *polp, enum intr_trigger *trigp)
568 struct gic_v3_softc *sc;
569 enum intr_polarity pol;
570 enum intr_trigger trig;
571 struct intr_map_data_msi *dam;
573 struct intr_map_data_fdt *daf;
577 sc = device_get_softc(dev);
579 switch (data->type) {
581 case INTR_MAP_DATA_FDT:
582 daf = (struct intr_map_data_fdt *)data;
583 if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
588 case INTR_MAP_DATA_MSI:
590 dam = (struct intr_map_data_msi *)data;
591 if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
598 if (irq >= sc->gic_nirqs)
601 case INTR_POLARITY_CONFORM:
602 case INTR_POLARITY_LOW:
603 case INTR_POLARITY_HIGH:
609 case INTR_TRIGGER_CONFORM:
610 case INTR_TRIGGER_EDGE:
611 case INTR_TRIGGER_LEVEL:
626 gic_v3_map_intr(device_t dev, struct intr_map_data *data,
627 struct intr_irqsrc **isrcp)
629 struct gic_v3_softc *sc;
633 error = do_gic_v3_map_intr(dev, data, &irq, NULL, NULL);
635 sc = device_get_softc(dev);
636 *isrcp = GIC_INTR_ISRC(sc, irq);
642 gic_v3_setup_intr(device_t dev, struct intr_irqsrc *isrc,
643 struct resource *res, struct intr_map_data *data)
645 struct gic_v3_softc *sc = device_get_softc(dev);
646 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
647 enum intr_trigger trig;
648 enum intr_polarity pol;
656 error = do_gic_v3_map_intr(dev, data, &irq, &pol, &trig);
660 if (gi->gi_irq != irq || pol == INTR_POLARITY_CONFORM ||
661 trig == INTR_TRIGGER_CONFORM)
664 /* Compare config if this is not first setup. */
665 if (isrc->isrc_handlers != 0) {
666 if (pol != gi->gi_pol || trig != gi->gi_trig)
676 * XXX - In case that per CPU interrupt is going to be enabled in time
677 * when SMP is already started, we need some IPI call which
678 * enables it on others CPUs. Further, it's more complicated as
679 * pic_enable_source() and pic_disable_source() should act on
680 * per CPU basis only. Thus, it should be solved here somehow.
682 if (isrc->isrc_flags & INTR_ISRCF_PPI)
683 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
685 if (irq >= GIC_FIRST_PPI && irq <= GIC_LAST_SPI) {
686 mtx_lock_spin(&sc->gic_mtx);
688 /* Set the trigger and polarity */
689 if (irq <= GIC_LAST_PPI)
690 reg = gic_r_read(sc, 4,
691 GICR_SGI_BASE_SIZE + GICD_ICFGR(irq));
693 reg = gic_d_read(sc, 4, GICD_ICFGR(irq));
694 if (trig == INTR_TRIGGER_LEVEL)
695 reg &= ~(2 << ((irq % 16) * 2));
697 reg |= 2 << ((irq % 16) * 2);
699 if (irq <= GIC_LAST_PPI) {
701 GICR_SGI_BASE_SIZE + GICD_ICFGR(irq), reg);
702 gic_v3_wait_for_rwp(sc, REDIST);
704 gic_d_write(sc, 4, GICD_ICFGR(irq), reg);
705 gic_v3_wait_for_rwp(sc, DIST);
708 mtx_unlock_spin(&sc->gic_mtx);
710 gic_v3_bind_intr(dev, isrc);
717 gic_v3_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
718 struct resource *res, struct intr_map_data *data)
720 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
722 if (isrc->isrc_handlers == 0) {
723 gi->gi_pol = INTR_POLARITY_CONFORM;
724 gi->gi_trig = INTR_TRIGGER_CONFORM;
731 gic_v3_disable_intr(device_t dev, struct intr_irqsrc *isrc)
733 struct gic_v3_softc *sc;
734 struct gic_v3_irqsrc *gi;
737 sc = device_get_softc(dev);
738 gi = (struct gic_v3_irqsrc *)isrc;
741 if (irq <= GIC_LAST_PPI) {
742 /* SGIs and PPIs in corresponding Re-Distributor */
743 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq),
745 gic_v3_wait_for_rwp(sc, REDIST);
746 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
747 /* SPIs in distributor */
748 gic_d_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq));
749 gic_v3_wait_for_rwp(sc, DIST);
751 panic("%s: Unsupported IRQ %u", __func__, irq);
755 gic_v3_enable_intr(device_t dev, struct intr_irqsrc *isrc)
757 struct gic_v3_softc *sc;
758 struct gic_v3_irqsrc *gi;
761 sc = device_get_softc(dev);
762 gi = (struct gic_v3_irqsrc *)isrc;
765 if (irq <= GIC_LAST_PPI) {
766 /* SGIs and PPIs in corresponding Re-Distributor */
767 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq),
769 gic_v3_wait_for_rwp(sc, REDIST);
770 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
771 /* SPIs in distributor */
772 gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq));
773 gic_v3_wait_for_rwp(sc, DIST);
775 panic("%s: Unsupported IRQ %u", __func__, irq);
779 gic_v3_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
781 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
783 gic_v3_disable_intr(dev, isrc);
784 gic_icc_write(EOIR1, gi->gi_irq);
788 gic_v3_post_ithread(device_t dev, struct intr_irqsrc *isrc)
791 gic_v3_enable_intr(dev, isrc);
795 gic_v3_post_filter(device_t dev, struct intr_irqsrc *isrc)
797 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
799 if (gi->gi_trig == INTR_TRIGGER_EDGE)
802 gic_icc_write(EOIR1, gi->gi_irq);
806 gic_v3_bind_intr(device_t dev, struct intr_irqsrc *isrc)
808 struct gic_v3_softc *sc;
809 struct gic_v3_irqsrc *gi;
812 gi = (struct gic_v3_irqsrc *)isrc;
813 if (gi->gi_irq <= GIC_LAST_PPI)
816 KASSERT(gi->gi_irq >= GIC_FIRST_SPI && gi->gi_irq <= GIC_LAST_SPI,
817 ("%s: Attempting to bind an invalid IRQ", __func__));
819 sc = device_get_softc(dev);
821 if (CPU_EMPTY(&isrc->isrc_cpu)) {
822 gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
823 CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
824 gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq),
825 CPU_AFFINITY(gic_irq_cpu));
828 * We can only bind to a single CPU so select
829 * the first CPU found.
831 cpu = CPU_FFS(&isrc->isrc_cpu) - 1;
832 gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(cpu));
840 gic_v3_init_secondary(device_t dev)
843 struct gic_v3_softc *sc;
844 gic_v3_initseq_t *init_func;
845 struct intr_irqsrc *isrc;
849 sc = device_get_softc(dev);
850 cpu = PCPU_GET(cpuid);
852 /* Train init sequence for boot CPU */
853 for (init_func = gic_v3_secondary_init; *init_func != NULL;
855 err = (*init_func)(sc);
858 "Could not initialize GIC for CPU%u\n", cpu);
863 /* Unmask attached SGI interrupts. */
864 for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) {
865 isrc = GIC_INTR_ISRC(sc, irq);
866 if (intr_isrc_init_on_cpu(isrc, cpu))
867 gic_v3_enable_intr(dev, isrc);
870 /* Unmask attached PPI interrupts. */
871 for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) {
872 isrc = GIC_INTR_ISRC(sc, irq);
873 if (intr_isrc_init_on_cpu(isrc, cpu))
874 gic_v3_enable_intr(dev, isrc);
877 for (i = 0; i < sc->gic_nchildren; i++) {
878 child = sc->gic_children[i];
879 PIC_INIT_SECONDARY(child);
884 gic_v3_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
887 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
888 uint64_t aff, val, irq;
891 #define GIC_AFF_MASK (CPU_AFF3_MASK | CPU_AFF2_MASK | CPU_AFF1_MASK)
892 #define GIC_AFFINITY(i) (CPU_AFFINITY(i) & GIC_AFF_MASK)
893 aff = GIC_AFFINITY(0);
897 /* Iterate through all CPUs in set */
898 for (i = 0; i <= mp_maxid; i++) {
899 /* Move to the next affinity group */
900 if (aff != GIC_AFFINITY(i)) {
903 gic_icc_write(SGI1R, val);
906 aff = GIC_AFFINITY(i);
909 /* Send the IPI to this cpu */
910 if (CPU_ISSET(i, &cpus)) {
911 #define ICC_SGI1R_AFFINITY(aff) \
912 (((uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT) | \
913 ((uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT) | \
914 ((uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT))
915 /* Set the affinity when the first at this level */
917 val = ICC_SGI1R_AFFINITY(aff) |
918 irq << ICC_SGI1R_EL1_SGIID_SHIFT;
919 /* Set the bit to send the IPI to te CPU */
920 val |= 1 << CPU_AFF0(CPU_AFFINITY(i));
924 /* Send the IPI to the last cpu affinity group */
926 gic_icc_write(SGI1R, val);
932 gic_v3_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
934 struct intr_irqsrc *isrc;
935 struct gic_v3_softc *sc = device_get_softc(dev);
937 if (sgi_first_unused > GIC_LAST_SGI)
940 isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
941 sgi_to_ipi[sgi_first_unused++] = ipi;
943 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
954 gic_v3_wait_for_rwp(struct gic_v3_softc *sc, enum gic_v3_xdist xdist)
956 struct resource *res;
958 size_t us_left = 1000000;
960 cpuid = PCPU_GET(cpuid);
967 res = sc->gic_redists.pcpu[cpuid];
970 KASSERT(0, ("%s: Attempt to wait for unknown RWP", __func__));
974 while ((bus_read_4(res, GICD_CTLR) & GICD_CTLR_RWP) != 0) {
977 panic("GICD Register write pending for too long");
983 gic_v3_cpu_priority(uint64_t mask)
986 /* Set prority mask */
987 gic_icc_write(PMR, mask & ICC_PMR_EL1_PRIO_MASK);
991 gic_v3_cpu_enable_sre(struct gic_v3_softc *sc)
996 cpuid = PCPU_GET(cpuid);
998 * Set the SRE bit to enable access to GIC CPU interface
999 * via system registers.
1001 sre = READ_SPECIALREG(icc_sre_el1);
1002 sre |= ICC_SRE_EL1_SRE;
1003 WRITE_SPECIALREG(icc_sre_el1, sre);
1006 * Now ensure that the bit is set.
1008 sre = READ_SPECIALREG(icc_sre_el1);
1009 if ((sre & ICC_SRE_EL1_SRE) == 0) {
1010 /* We are done. This was disabled in EL2 */
1011 device_printf(sc->dev, "ERROR: CPU%u cannot enable CPU interface "
1012 "via system registers\n", cpuid);
1014 } else if (bootverbose) {
1015 device_printf(sc->dev,
1016 "CPU%u enabled CPU interface via system registers\n",
1024 gic_v3_cpu_init(struct gic_v3_softc *sc)
1028 /* Enable access to CPU interface via system registers */
1029 err = gic_v3_cpu_enable_sre(sc);
1032 /* Priority mask to minimum - accept all interrupts */
1033 gic_v3_cpu_priority(GIC_PRIORITY_MIN);
1034 /* Disable EOI mode */
1035 gic_icc_clear(CTLR, ICC_CTLR_EL1_EOIMODE);
1036 /* Enable group 1 (insecure) interrups */
1037 gic_icc_set(IGRPEN1, ICC_IGRPEN0_EL1_EN);
1044 gic_v3_dist_init(struct gic_v3_softc *sc)
1050 * 1. Disable the Distributor
1052 gic_d_write(sc, 4, GICD_CTLR, 0);
1053 gic_v3_wait_for_rwp(sc, DIST);
1056 * 2. Configure the Distributor
1058 /* Set all SPIs to be Group 1 Non-secure */
1059 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_IGROUPRn)
1060 gic_d_write(sc, 4, GICD_IGROUPR(i), 0xFFFFFFFF);
1062 /* Set all global interrupts to be level triggered, active low. */
1063 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ICFGRn)
1064 gic_d_write(sc, 4, GICD_ICFGR(i), 0x00000000);
1066 /* Set priority to all shared interrupts */
1067 for (i = GIC_FIRST_SPI;
1068 i < sc->gic_nirqs; i += GICD_I_PER_IPRIORITYn) {
1069 /* Set highest priority */
1070 gic_d_write(sc, 4, GICD_IPRIORITYR(i), GIC_PRIORITY_MAX);
1074 * Disable all interrupts. Leave PPI and SGIs as they are enabled in
1075 * Re-Distributor registers.
1077 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ISENABLERn)
1078 gic_d_write(sc, 4, GICD_ICENABLER(i), 0xFFFFFFFF);
1080 gic_v3_wait_for_rwp(sc, DIST);
1083 * 3. Enable Distributor
1085 /* Enable Distributor with ARE, Group 1 */
1086 gic_d_write(sc, 4, GICD_CTLR, GICD_CTLR_ARE_NS | GICD_CTLR_G1A |
1090 * 4. Route all interrupts to boot CPU.
1092 aff = CPU_AFFINITY(0);
1093 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i++)
1094 gic_d_write(sc, 4, GICD_IROUTER(i), aff);
1099 /* Re-Distributor */
1101 gic_v3_redist_alloc(struct gic_v3_softc *sc)
1105 /* Allocate struct resource for all CPU's Re-Distributor registers */
1106 for (cpuid = 0; cpuid <= mp_maxid; cpuid++)
1107 if (CPU_ISSET(cpuid, &all_cpus) != 0)
1108 sc->gic_redists.pcpu[cpuid] =
1109 malloc(sizeof(*sc->gic_redists.pcpu[0]),
1110 M_GIC_V3, M_WAITOK);
1112 sc->gic_redists.pcpu[cpuid] = NULL;
1117 gic_v3_redist_find(struct gic_v3_softc *sc)
1119 struct resource r_res;
1120 bus_space_handle_t r_bsh;
1127 cpuid = PCPU_GET(cpuid);
1129 aff = CPU_AFFINITY(cpuid);
1130 /* Affinity in format for comparison with typer */
1131 aff = (CPU_AFF3(aff) << 24) | (CPU_AFF2(aff) << 16) |
1132 (CPU_AFF1(aff) << 8) | CPU_AFF0(aff);
1135 device_printf(sc->dev,
1136 "Start searching for Re-Distributor\n");
1138 /* Iterate through Re-Distributor regions */
1139 for (i = 0; i < sc->gic_redists.nregions; i++) {
1140 /* Take a copy of the region's resource */
1141 r_res = *sc->gic_redists.regions[i];
1142 r_bsh = rman_get_bushandle(&r_res);
1144 pidr2 = bus_read_4(&r_res, GICR_PIDR2);
1145 switch (GICR_PIDR2_ARCH(pidr2)) {
1146 case GICR_PIDR2_ARCH_GICv3: /* fall through */
1147 case GICR_PIDR2_ARCH_GICv4:
1150 device_printf(sc->dev,
1151 "No Re-Distributor found for CPU%u\n", cpuid);
1156 typer = bus_read_8(&r_res, GICR_TYPER);
1157 if ((typer >> GICR_TYPER_AFF_SHIFT) == aff) {
1158 KASSERT(sc->gic_redists.pcpu[cpuid] != NULL,
1159 ("Invalid pointer to per-CPU redistributor"));
1160 /* Copy res contents to its final destination */
1161 *sc->gic_redists.pcpu[cpuid] = r_res;
1163 device_printf(sc->dev,
1164 "CPU%u Re-Distributor has been found\n",
1170 r_bsh += (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1171 if ((typer & GICR_TYPER_VLPIS) != 0) {
1173 (GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE);
1176 rman_set_bushandle(&r_res, r_bsh);
1177 } while ((typer & GICR_TYPER_LAST) == 0);
1180 device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid);
1185 gic_v3_redist_wake(struct gic_v3_softc *sc)
1188 size_t us_left = 1000000;
1190 waker = gic_r_read(sc, 4, GICR_WAKER);
1191 /* Wake up Re-Distributor for this CPU */
1192 waker &= ~GICR_WAKER_PS;
1193 gic_r_write(sc, 4, GICR_WAKER, waker);
1195 * When clearing ProcessorSleep bit it is required to wait for
1196 * ChildrenAsleep to become zero following the processor power-on.
1198 while ((gic_r_read(sc, 4, GICR_WAKER) & GICR_WAKER_CA) != 0) {
1200 if (us_left-- == 0) {
1201 panic("Could not wake Re-Distributor for CPU%u",
1207 device_printf(sc->dev, "CPU%u Re-Distributor woke up\n",
1215 gic_v3_redist_init(struct gic_v3_softc *sc)
1220 err = gic_v3_redist_find(sc);
1224 err = gic_v3_redist_wake(sc);
1228 /* Configure SGIs and PPIs to be Group1 Non-secure */
1229 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_IGROUPR0,
1233 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ICENABLER0,
1234 GICR_I_ENABLER_PPI_MASK);
1236 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ISENABLER0,
1237 GICR_I_ENABLER_SGI_MASK);
1239 /* Set priority for SGIs and PPIs */
1240 for (i = 0; i <= GIC_LAST_PPI; i += GICR_I_PER_IPRIORITYn) {
1241 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_IPRIORITYR(i),
1245 gic_v3_wait_for_rwp(sc, REDIST);