2 * Copyright (c) 2015-2016 The FreeBSD Foundation
5 * This software was developed by Andrew Turner under
6 * the sponsorship of the FreeBSD Foundation.
8 * This software was developed by Semihalf under
9 * the sponsorship of the FreeBSD Foundation.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include "opt_platform.h"
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bitstring.h>
42 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/module.h>
49 #include <sys/cpuset.h>
51 #include <sys/mutex.h>
57 #include <machine/bus.h>
58 #include <machine/cpu.h>
59 #include <machine/intr.h>
63 #include "gic_v3_reg.h"
64 #include "gic_v3_var.h"
67 static pic_disable_intr_t gic_v3_disable_intr;
68 static pic_enable_intr_t gic_v3_enable_intr;
69 static pic_map_intr_t gic_v3_map_intr;
70 static pic_setup_intr_t gic_v3_setup_intr;
71 static pic_teardown_intr_t gic_v3_teardown_intr;
72 static pic_post_filter_t gic_v3_post_filter;
73 static pic_post_ithread_t gic_v3_post_ithread;
74 static pic_pre_ithread_t gic_v3_pre_ithread;
75 static pic_bind_intr_t gic_v3_bind_intr;
77 static pic_init_secondary_t gic_v3_init_secondary;
78 static pic_ipi_send_t gic_v3_ipi_send;
79 static pic_ipi_setup_t gic_v3_ipi_setup;
82 static u_int gic_irq_cpu;
84 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
85 static u_int sgi_first_unused = GIC_FIRST_SGI;
88 /* Device and PIC methods */
89 static int gic_v3_bind(device_t, u_int, u_int);
90 static void gic_v3_dispatch(device_t, struct trapframe *);
91 static void gic_v3_eoi(device_t, u_int);
92 static void gic_v3_mask_irq(device_t, u_int);
93 static void gic_v3_unmask_irq(device_t, u_int);
95 static void gic_v3_init_secondary(device_t);
96 static void gic_v3_ipi_send(device_t, cpuset_t, u_int);
100 static device_method_t gic_v3_methods[] = {
101 /* Device interface */
102 DEVMETHOD(device_detach, gic_v3_detach),
105 /* Interrupt controller interface */
106 DEVMETHOD(pic_disable_intr, gic_v3_disable_intr),
107 DEVMETHOD(pic_enable_intr, gic_v3_enable_intr),
108 DEVMETHOD(pic_map_intr, gic_v3_map_intr),
109 DEVMETHOD(pic_setup_intr, gic_v3_setup_intr),
110 DEVMETHOD(pic_teardown_intr, gic_v3_teardown_intr),
111 DEVMETHOD(pic_post_filter, gic_v3_post_filter),
112 DEVMETHOD(pic_post_ithread, gic_v3_post_ithread),
113 DEVMETHOD(pic_pre_ithread, gic_v3_pre_ithread),
115 DEVMETHOD(pic_bind_intr, gic_v3_bind_intr),
116 DEVMETHOD(pic_init_secondary, gic_v3_init_secondary),
117 DEVMETHOD(pic_ipi_send, gic_v3_ipi_send),
118 DEVMETHOD(pic_ipi_setup, gic_v3_ipi_setup),
122 DEVMETHOD(pic_bind, gic_v3_bind),
123 DEVMETHOD(pic_dispatch, gic_v3_dispatch),
124 DEVMETHOD(pic_eoi, gic_v3_eoi),
125 DEVMETHOD(pic_mask, gic_v3_mask_irq),
126 DEVMETHOD(pic_unmask, gic_v3_unmask_irq),
128 DEVMETHOD(pic_init_secondary, gic_v3_init_secondary),
129 DEVMETHOD(pic_ipi_send, gic_v3_ipi_send),
137 DEFINE_CLASS_0(gic, gic_v3_driver, gic_v3_methods,
138 sizeof(struct gic_v3_softc));
141 * Driver-specific definitions.
143 MALLOC_DEFINE(M_GIC_V3, "GICv3", GIC_V3_DEVSTR);
146 * Helper functions and definitions.
148 /* Destination registers, either Distributor or Re-Distributor */
154 /* Helper routines starting with gic_v3_ */
155 static int gic_v3_dist_init(struct gic_v3_softc *);
156 static int gic_v3_redist_alloc(struct gic_v3_softc *);
157 static int gic_v3_redist_find(struct gic_v3_softc *);
158 static int gic_v3_redist_init(struct gic_v3_softc *);
159 static int gic_v3_cpu_init(struct gic_v3_softc *);
160 static void gic_v3_wait_for_rwp(struct gic_v3_softc *, enum gic_v3_xdist);
162 /* A sequence of init functions for primary (boot) CPU */
163 typedef int (*gic_v3_initseq_t) (struct gic_v3_softc *);
164 /* Primary CPU initialization sequence */
165 static gic_v3_initseq_t gic_v3_primary_init[] = {
174 /* Secondary CPU initialization sequence */
175 static gic_v3_initseq_t gic_v3_secondary_init[] = {
186 gic_v3_attach(device_t dev)
188 struct gic_v3_softc *sc;
189 gic_v3_initseq_t *init_func;
199 sc = device_get_softc(dev);
200 sc->gic_registered = FALSE;
204 /* Initialize mutex */
205 mtx_init(&sc->gic_mtx, "GICv3 lock", NULL, MTX_SPIN);
208 * Allocate array of struct resource.
209 * One entry for Distributor and all remaining for Re-Distributor.
211 sc->gic_res = malloc(
212 sizeof(*sc->gic_res) * (sc->gic_redists.nregions + 1),
215 /* Now allocate corresponding resources */
216 for (i = 0, rid = 0; i < (sc->gic_redists.nregions + 1); i++, rid++) {
217 sc->gic_res[rid] = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
219 if (sc->gic_res[rid] == NULL)
224 * Distributor interface
226 sc->gic_dist = sc->gic_res[0];
229 * Re-Dristributor interface
231 /* Allocate space under region descriptions */
232 sc->gic_redists.regions = malloc(
233 sizeof(*sc->gic_redists.regions) * sc->gic_redists.nregions,
236 /* Fill-up bus_space information for each region. */
237 for (i = 0, rid = 1; i < sc->gic_redists.nregions; i++, rid++)
238 sc->gic_redists.regions[i] = sc->gic_res[rid];
240 /* Get the number of supported SPI interrupts */
241 typer = gic_d_read(sc, 4, GICD_TYPER);
242 sc->gic_nirqs = GICD_TYPER_I_NUM(typer);
243 if (sc->gic_nirqs > GIC_I_NUM_MAX)
244 sc->gic_nirqs = GIC_I_NUM_MAX;
247 sc->gic_irqs = malloc(sizeof(*sc->gic_irqs) * sc->gic_nirqs,
248 M_GIC_V3, M_WAITOK | M_ZERO);
249 name = device_get_nameunit(dev);
250 for (irq = 0; irq < sc->gic_nirqs; irq++) {
251 struct intr_irqsrc *isrc;
253 sc->gic_irqs[irq].gi_irq = irq;
254 sc->gic_irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
255 sc->gic_irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
257 isrc = &sc->gic_irqs[irq].gi_isrc;
258 if (irq <= GIC_LAST_SGI) {
259 err = intr_isrc_register(isrc, sc->dev,
260 INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
261 } else if (irq <= GIC_LAST_PPI) {
262 err = intr_isrc_register(isrc, sc->dev,
263 INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
265 err = intr_isrc_register(isrc, sc->dev, 0,
266 "%s,s%u", name, irq - GIC_FIRST_SPI);
269 /* XXX call intr_isrc_deregister() */
270 free(sc->gic_irqs, M_DEVBUF);
276 /* Get the number of supported interrupt identifier bits */
277 sc->gic_idbits = GICD_TYPER_IDBITS(typer);
280 device_printf(dev, "SPIs: %u, IDs: %u\n",
281 sc->gic_nirqs, (1 << sc->gic_idbits) - 1);
284 /* Train init sequence for boot CPU */
285 for (init_func = gic_v3_primary_init; *init_func != NULL; init_func++) {
286 err = (*init_func)(sc);
292 * Now register PIC to the interrupts handling layer.
295 arm_register_root_pic(dev, sc->gic_nirqs);
296 sc->gic_registered = TRUE;
303 gic_v3_detach(device_t dev)
305 struct gic_v3_softc *sc;
309 sc = device_get_softc(dev);
311 if (device_is_attached(dev)) {
313 * XXX: We should probably deregister PIC
315 if (sc->gic_registered)
316 panic("Trying to detach registered PIC");
318 for (rid = 0; rid < (sc->gic_redists.nregions + 1); rid++)
319 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->gic_res[rid]);
321 for (i = 0; i < mp_ncpus; i++)
322 free(sc->gic_redists.pcpu[i], M_GIC_V3);
324 free(sc->gic_res, M_GIC_V3);
325 free(sc->gic_redists.regions, M_GIC_V3);
332 arm_gic_v3_intr(void *arg)
334 struct gic_v3_softc *sc = arg;
335 struct gic_v3_irqsrc *gi;
337 struct trapframe *tf;
343 if (CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1) {
345 * Hardware: Cavium ThunderX
346 * Chip revision: Pass 1.0 (early version)
347 * Pass 1.1 (production)
348 * ERRATUM: 22978, 23154
351 "nop;nop;nop;nop;nop;nop;nop;nop; \n"
352 "mrs %0, ICC_IAR1_EL1 \n"
353 "nop;nop;nop;nop; \n"
355 : "=&r" (active_irq));
357 active_irq = gic_icc_read(IAR1);
360 if (__predict_false(active_irq >= sc->gic_nirqs))
361 return (FILTER_HANDLED);
363 tf = curthread->td_intr_frame;
364 gi = &sc->gic_irqs[active_irq];
365 if (active_irq <= GIC_LAST_SGI) {
366 /* Call EOI for all IPI before dispatch. */
367 gic_icc_write(EOIR1, (uint64_t)active_irq);
369 intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf);
371 device_printf(sc->dev, "SGI %u on UP system detected\n",
372 active_irq - GIC_FIRST_SGI);
374 } else if (active_irq >= GIC_FIRST_PPI &&
375 active_irq <= GIC_LAST_SPI) {
376 if (gi->gi_pol == INTR_TRIGGER_EDGE)
377 gic_icc_write(EOIR1, gi->gi_irq);
379 if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
380 if (gi->gi_pol != INTR_TRIGGER_EDGE)
381 gic_icc_write(EOIR1, gi->gi_irq);
382 gic_v3_disable_intr(sc->dev, &gi->gi_isrc);
383 device_printf(sc->dev,
384 "Stray irq %lu disabled\n", active_irq);
392 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
393 enum intr_polarity *polp, enum intr_trigger *trigp)
401 * The 1st cell is the interrupt type:
404 * The 2nd cell contains the interrupt number:
407 * The 3rd cell is the flags, encoded as follows:
408 * bits[3:0] trigger type and level flags
410 * 2 = edge triggered (PPI only)
411 * 4 = level-sensitive
412 * 8 = level-sensitive (PPI only)
416 irq = GIC_FIRST_SPI + cells[1];
417 /* SPI irq is checked later. */
420 irq = GIC_FIRST_PPI + cells[1];
421 if (irq > GIC_LAST_PPI) {
422 device_printf(dev, "unsupported PPI interrupt "
423 "number %u\n", cells[1]);
428 device_printf(dev, "unsupported interrupt type "
429 "configuration %u\n", cells[0]);
433 switch (cells[2] & 0xf) {
435 *trigp = INTR_TRIGGER_EDGE;
436 *polp = INTR_POLARITY_HIGH;
439 *trigp = INTR_TRIGGER_EDGE;
440 *polp = INTR_POLARITY_LOW;
443 *trigp = INTR_TRIGGER_LEVEL;
444 *polp = INTR_POLARITY_HIGH;
447 *trigp = INTR_TRIGGER_LEVEL;
448 *polp = INTR_POLARITY_LOW;
451 device_printf(dev, "unsupported trigger/polarity "
452 "configuration 0x%02x\n", cells[2]);
456 /* Check the interrupt is valid */
457 if (irq >= GIC_FIRST_SPI && *polp != INTR_POLARITY_HIGH)
466 do_gic_v3_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
467 enum intr_polarity *polp, enum intr_trigger *trigp)
469 struct gic_v3_softc *sc;
470 enum intr_polarity pol;
471 enum intr_trigger trig;
473 struct intr_map_data_fdt *daf;
477 sc = device_get_softc(dev);
479 switch (data->type) {
481 case INTR_MAP_DATA_FDT:
482 daf = (struct intr_map_data_fdt *)data;
483 if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
492 if (irq >= sc->gic_nirqs)
495 case INTR_POLARITY_CONFORM:
496 case INTR_POLARITY_LOW:
497 case INTR_POLARITY_HIGH:
503 case INTR_TRIGGER_CONFORM:
504 case INTR_TRIGGER_EDGE:
505 case INTR_TRIGGER_LEVEL:
520 gic_v3_map_intr(device_t dev, struct intr_map_data *data,
521 struct intr_irqsrc **isrcp)
523 struct gic_v3_softc *sc;
527 error = do_gic_v3_map_intr(dev, data, &irq, NULL, NULL);
529 sc = device_get_softc(dev);
530 *isrcp = GIC_INTR_ISRC(sc, irq);
536 gic_v3_setup_intr(device_t dev, struct intr_irqsrc *isrc,
537 struct resource *res, struct intr_map_data *data)
539 struct gic_v3_softc *sc = device_get_softc(dev);
540 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
541 enum intr_trigger trig;
542 enum intr_polarity pol;
550 error = do_gic_v3_map_intr(dev, data, &irq, &pol, &trig);
554 if (gi->gi_irq != irq || pol == INTR_POLARITY_CONFORM ||
555 trig == INTR_TRIGGER_CONFORM)
558 /* Compare config if this is not first setup. */
559 if (isrc->isrc_handlers != 0) {
560 if (pol != gi->gi_pol || trig != gi->gi_trig)
570 * XXX - In case that per CPU interrupt is going to be enabled in time
571 * when SMP is already started, we need some IPI call which
572 * enables it on others CPUs. Further, it's more complicated as
573 * pic_enable_source() and pic_disable_source() should act on
574 * per CPU basis only. Thus, it should be solved here somehow.
576 if (isrc->isrc_flags & INTR_ISRCF_PPI)
577 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
579 if (irq >= GIC_FIRST_PPI && irq <= GIC_LAST_SPI) {
580 mtx_lock_spin(&sc->gic_mtx);
582 /* Set the trigger and polarity */
583 if (irq <= GIC_LAST_PPI)
584 reg = gic_r_read(sc, 4,
585 GICR_SGI_BASE_SIZE + GICD_ICFGR(irq));
587 reg = gic_d_read(sc, 4, GICD_ICFGR(irq));
588 if (trig == INTR_TRIGGER_LEVEL)
589 reg &= ~(2 << ((irq % 16) * 2));
591 reg |= 2 << ((irq % 16) * 2);
593 if (irq <= GIC_LAST_PPI) {
595 GICR_SGI_BASE_SIZE + GICD_ICFGR(irq), reg);
596 gic_v3_wait_for_rwp(sc, REDIST);
598 gic_d_write(sc, 4, GICD_ICFGR(irq), reg);
599 gic_v3_wait_for_rwp(sc, DIST);
602 mtx_unlock_spin(&sc->gic_mtx);
604 gic_v3_bind_intr(dev, isrc);
611 gic_v3_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
612 struct resource *res, struct intr_map_data *data)
614 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
616 if (isrc->isrc_handlers == 0) {
617 gi->gi_pol = INTR_POLARITY_CONFORM;
618 gi->gi_trig = INTR_TRIGGER_CONFORM;
625 gic_v3_disable_intr(device_t dev, struct intr_irqsrc *isrc)
627 struct gic_v3_softc *sc;
628 struct gic_v3_irqsrc *gi;
631 sc = device_get_softc(dev);
632 gi = (struct gic_v3_irqsrc *)isrc;
635 if (irq <= GIC_LAST_PPI) {
636 /* SGIs and PPIs in corresponding Re-Distributor */
637 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq),
639 gic_v3_wait_for_rwp(sc, REDIST);
640 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
641 /* SPIs in distributor */
642 gic_d_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq));
643 gic_v3_wait_for_rwp(sc, DIST);
645 panic("%s: Unsupported IRQ %u", __func__, irq);
649 gic_v3_enable_intr(device_t dev, struct intr_irqsrc *isrc)
651 struct gic_v3_softc *sc;
652 struct gic_v3_irqsrc *gi;
655 sc = device_get_softc(dev);
656 gi = (struct gic_v3_irqsrc *)isrc;
659 if (irq <= GIC_LAST_PPI) {
660 /* SGIs and PPIs in corresponding Re-Distributor */
661 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq),
663 gic_v3_wait_for_rwp(sc, REDIST);
664 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
665 /* SPIs in distributor */
666 gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq));
667 gic_v3_wait_for_rwp(sc, DIST);
669 panic("%s: Unsupported IRQ %u", __func__, irq);
673 gic_v3_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
675 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
677 gic_v3_disable_intr(dev, isrc);
678 gic_icc_write(EOIR1, gi->gi_irq);
682 gic_v3_post_ithread(device_t dev, struct intr_irqsrc *isrc)
685 gic_v3_enable_intr(dev, isrc);
689 gic_v3_post_filter(device_t dev, struct intr_irqsrc *isrc)
691 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
693 if (gi->gi_pol == INTR_TRIGGER_EDGE)
696 gic_icc_write(EOIR1, gi->gi_irq);
700 gic_v3_bind_intr(device_t dev, struct intr_irqsrc *isrc)
702 struct gic_v3_softc *sc;
703 struct gic_v3_irqsrc *gi;
706 gi = (struct gic_v3_irqsrc *)isrc;
707 if (gi->gi_irq <= GIC_LAST_PPI)
710 KASSERT(gi->gi_irq >= GIC_FIRST_SPI && gi->gi_irq <= GIC_LAST_SPI,
711 ("%s: Attempting to bind an invalid IRQ", __func__));
713 sc = device_get_softc(dev);
715 if (CPU_EMPTY(&isrc->isrc_cpu)) {
716 gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
717 CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
718 gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq),
719 CPU_AFFINITY(gic_irq_cpu));
722 * We can only bind to a single CPU so select
723 * the first CPU found.
725 cpu = CPU_FFS(&isrc->isrc_cpu) - 1;
726 gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(cpu));
734 gic_v3_init_secondary(device_t dev)
736 struct gic_v3_softc *sc;
737 gic_v3_initseq_t *init_func;
738 struct intr_irqsrc *isrc;
742 sc = device_get_softc(dev);
743 cpu = PCPU_GET(cpuid);
745 /* Train init sequence for boot CPU */
746 for (init_func = gic_v3_secondary_init; *init_func != NULL;
748 err = (*init_func)(sc);
751 "Could not initialize GIC for CPU%u\n", cpu);
756 /* Unmask attached SGI interrupts. */
757 for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) {
758 isrc = GIC_INTR_ISRC(sc, irq);
759 if (intr_isrc_init_on_cpu(isrc, cpu))
760 gic_v3_enable_intr(dev, isrc);
763 /* Unmask attached PPI interrupts. */
764 for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) {
765 isrc = GIC_INTR_ISRC(sc, irq);
766 if (intr_isrc_init_on_cpu(isrc, cpu))
767 gic_v3_enable_intr(dev, isrc);
772 gic_v3_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
775 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
776 uint64_t aff, val, irq;
779 #define GIC_AFF_MASK (CPU_AFF3_MASK | CPU_AFF2_MASK | CPU_AFF1_MASK)
780 #define GIC_AFFINITY(i) (CPU_AFFINITY(i) & GIC_AFF_MASK)
781 aff = GIC_AFFINITY(0);
785 /* Iterate through all CPUs in set */
786 for (i = 0; i < mp_ncpus; i++) {
787 /* Move to the next affinity group */
788 if (aff != GIC_AFFINITY(i)) {
791 gic_icc_write(SGI1R, val);
794 aff = GIC_AFFINITY(i);
797 /* Send the IPI to this cpu */
798 if (CPU_ISSET(i, &cpus)) {
799 #define ICC_SGI1R_AFFINITY(aff) \
800 (((uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT) | \
801 ((uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT) | \
802 ((uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT))
803 /* Set the affinity when the first at this level */
805 val = ICC_SGI1R_AFFINITY(aff) |
806 irq << ICC_SGI1R_EL1_SGIID_SHIFT;
807 /* Set the bit to send the IPI to te CPU */
808 val |= 1 << CPU_AFF0(CPU_AFFINITY(i));
812 /* Send the IPI to the last cpu affinity group */
814 gic_icc_write(SGI1R, val);
820 gic_v3_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
822 struct intr_irqsrc *isrc;
823 struct gic_v3_softc *sc = device_get_softc(dev);
825 if (sgi_first_unused > GIC_LAST_SGI)
828 isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
829 sgi_to_ipi[sgi_first_unused++] = ipi;
831 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
843 gic_v3_bind(device_t dev, u_int irq, u_int cpuid)
846 struct gic_v3_softc *sc;
848 sc = device_get_softc(dev);
850 if (irq <= GIC_LAST_PPI) {
851 /* Can't bind PPI to another CPU but it's not an error */
853 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
854 aff = CPU_AFFINITY(cpuid);
855 gic_d_write(sc, 4, GICD_IROUTER(irq), aff);
857 } else if (irq >= GIC_FIRST_LPI)
858 return (lpi_migrate(dev, irq, cpuid));
864 gic_v3_dispatch(device_t dev, struct trapframe *frame)
869 if (CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1) {
871 * Hardware: Cavium ThunderX
872 * Chip revision: Pass 1.0 (early version)
873 * Pass 1.1 (production)
874 * ERRATUM: 22978, 23154
877 "nop;nop;nop;nop;nop;nop;nop;nop; \n"
878 "mrs %0, ICC_IAR1_EL1 \n"
879 "nop;nop;nop;nop; \n"
881 : "=&r" (active_irq));
883 active_irq = gic_icc_read(IAR1);
886 if (__predict_false(active_irq == ICC_IAR1_EL1_SPUR))
889 if (__predict_true((active_irq >= GIC_FIRST_PPI &&
890 active_irq <= GIC_LAST_SPI) || active_irq >= GIC_FIRST_LPI)) {
891 arm_dispatch_intr(active_irq, frame);
895 if (active_irq <= GIC_LAST_SGI) {
896 gic_icc_write(EOIR1, (uint64_t)active_irq);
897 arm_dispatch_intr(active_irq, frame);
904 gic_v3_eoi(device_t dev, u_int irq)
907 gic_icc_write(EOIR1, (uint64_t)irq);
911 gic_v3_mask_irq(device_t dev, u_int irq)
913 struct gic_v3_softc *sc;
915 sc = device_get_softc(dev);
917 if (irq <= GIC_LAST_PPI) { /* SGIs and PPIs in corresponding Re-Distributor */
919 GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq), GICD_I_MASK(irq));
920 gic_v3_wait_for_rwp(sc, REDIST);
921 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { /* SPIs in distributor */
922 gic_r_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq));
923 gic_v3_wait_for_rwp(sc, DIST);
924 } else if (irq >= GIC_FIRST_LPI) { /* LPIs */
925 lpi_mask_irq(dev, irq);
927 panic("%s: Unsupported IRQ number %u", __func__, irq);
931 gic_v3_unmask_irq(device_t dev, u_int irq)
933 struct gic_v3_softc *sc;
935 sc = device_get_softc(dev);
937 if (irq <= GIC_LAST_PPI) { /* SGIs and PPIs in corresponding Re-Distributor */
939 GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq), GICD_I_MASK(irq));
940 gic_v3_wait_for_rwp(sc, REDIST);
941 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { /* SPIs in distributor */
942 gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq));
943 gic_v3_wait_for_rwp(sc, DIST);
944 } else if (irq >= GIC_FIRST_LPI) { /* LPIs */
945 lpi_unmask_irq(dev, irq);
947 panic("%s: Unsupported IRQ number %u", __func__, irq);
952 gic_v3_init_secondary(device_t dev)
954 struct gic_v3_softc *sc;
955 gic_v3_initseq_t *init_func;
958 sc = device_get_softc(dev);
960 /* Train init sequence for boot CPU */
961 for (init_func = gic_v3_secondary_init; *init_func != NULL; init_func++) {
962 err = (*init_func)(sc);
965 "Could not initialize GIC for CPU%u\n",
972 * Try to initialize ITS.
973 * If there is no driver attached this routine will fail but that
974 * does not mean failure here as only LPIs will not be functional
975 * on the current CPU.
977 if (its_init_cpu(NULL) != 0) {
979 "Could not initialize ITS for CPU%u. "
980 "No LPIs will arrive on this CPU\n",
985 * ARM64TODO: Unmask timer PPIs. To be removed when appropriate
986 * mechanism is implemented.
987 * Activate the timer interrupts: virtual (27), secure (29),
988 * and non-secure (30). Use hardcoded values here as there
989 * should be no defines for them.
991 gic_v3_unmask_irq(dev, 27);
992 gic_v3_unmask_irq(dev, 29);
993 gic_v3_unmask_irq(dev, 30);
997 gic_v3_ipi_send(device_t dev, cpuset_t cpuset, u_int ipi)
1000 uint64_t aff, tlist;
1004 /* Set affinity mask to match level 3, 2 and 1 */
1005 aff_mask = CPU_AFF1_MASK | CPU_AFF2_MASK | CPU_AFF3_MASK;
1007 /* Iterate through all CPUs in set */
1008 while (!CPU_EMPTY(&cpuset)) {
1010 for (cpu = 0; cpu < mp_ncpus; cpu++) {
1011 /* Compose target list for single AFF3:AFF2:AFF1 set */
1012 if (CPU_ISSET(cpu, &cpuset)) {
1015 * Save affinity of the first CPU to
1016 * send IPI to for later comparison.
1018 aff = CPU_AFFINITY(cpu);
1019 tlist |= (1UL << CPU_AFF0(aff));
1020 CPU_CLR(cpu, &cpuset);
1022 /* Check for same Affinity level 3, 2 and 1 */
1023 if ((aff & aff_mask) == (CPU_AFFINITY(cpu) & aff_mask)) {
1024 tlist |= (1UL << CPU_AFF0(CPU_AFFINITY(cpu)));
1025 /* Clear CPU in cpuset from target list */
1026 CPU_CLR(cpu, &cpuset);
1031 KASSERT((tlist & ~ICC_SGI1R_EL1_TL_MASK) == 0,
1032 ("Target list too long for GICv3 IPI"));
1033 /* Send SGI to CPUs in target list */
1035 val |= (uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT;
1036 val |= (uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT;
1037 val |= (uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT;
1038 val |= (uint64_t)(ipi & ICC_SGI1R_EL1_SGIID_MASK) <<
1039 ICC_SGI1R_EL1_SGIID_SHIFT;
1040 gic_icc_write(SGI1R, val);
1045 #endif /* !INTRNG */
1051 gic_v3_wait_for_rwp(struct gic_v3_softc *sc, enum gic_v3_xdist xdist)
1053 struct resource *res;
1055 size_t us_left = 1000000;
1057 cpuid = PCPU_GET(cpuid);
1064 res = sc->gic_redists.pcpu[cpuid];
1067 KASSERT(0, ("%s: Attempt to wait for unknown RWP", __func__));
1071 while ((bus_read_4(res, GICD_CTLR) & GICD_CTLR_RWP) != 0) {
1074 panic("GICD Register write pending for too long");
1078 /* CPU interface. */
1079 static __inline void
1080 gic_v3_cpu_priority(uint64_t mask)
1083 /* Set prority mask */
1084 gic_icc_write(PMR, mask & ICC_PMR_EL1_PRIO_MASK);
1088 gic_v3_cpu_enable_sre(struct gic_v3_softc *sc)
1093 cpuid = PCPU_GET(cpuid);
1095 * Set the SRE bit to enable access to GIC CPU interface
1096 * via system registers.
1098 sre = READ_SPECIALREG(icc_sre_el1);
1099 sre |= ICC_SRE_EL1_SRE;
1100 WRITE_SPECIALREG(icc_sre_el1, sre);
1103 * Now ensure that the bit is set.
1105 sre = READ_SPECIALREG(icc_sre_el1);
1106 if ((sre & ICC_SRE_EL1_SRE) == 0) {
1107 /* We are done. This was disabled in EL2 */
1108 device_printf(sc->dev, "ERROR: CPU%u cannot enable CPU interface "
1109 "via system registers\n", cpuid);
1111 } else if (bootverbose) {
1112 device_printf(sc->dev,
1113 "CPU%u enabled CPU interface via system registers\n",
1121 gic_v3_cpu_init(struct gic_v3_softc *sc)
1125 /* Enable access to CPU interface via system registers */
1126 err = gic_v3_cpu_enable_sre(sc);
1129 /* Priority mask to minimum - accept all interrupts */
1130 gic_v3_cpu_priority(GIC_PRIORITY_MIN);
1131 /* Disable EOI mode */
1132 gic_icc_clear(CTLR, ICC_CTLR_EL1_EOIMODE);
1133 /* Enable group 1 (insecure) interrups */
1134 gic_icc_set(IGRPEN1, ICC_IGRPEN0_EL1_EN);
1141 gic_v3_dist_init(struct gic_v3_softc *sc)
1147 * 1. Disable the Distributor
1149 gic_d_write(sc, 4, GICD_CTLR, 0);
1150 gic_v3_wait_for_rwp(sc, DIST);
1153 * 2. Configure the Distributor
1155 /* Set all global interrupts to be level triggered, active low. */
1156 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ICFGRn)
1157 gic_d_write(sc, 4, GICD_ICFGR(i), 0x00000000);
1159 /* Set priority to all shared interrupts */
1160 for (i = GIC_FIRST_SPI;
1161 i < sc->gic_nirqs; i += GICD_I_PER_IPRIORITYn) {
1162 /* Set highest priority */
1163 gic_d_write(sc, 4, GICD_IPRIORITYR(i), GIC_PRIORITY_MAX);
1167 * Disable all interrupts. Leave PPI and SGIs as they are enabled in
1168 * Re-Distributor registers.
1170 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ISENABLERn)
1171 gic_d_write(sc, 4, GICD_ICENABLER(i), 0xFFFFFFFF);
1173 gic_v3_wait_for_rwp(sc, DIST);
1176 * 3. Enable Distributor
1178 /* Enable Distributor with ARE, Group 1 */
1179 gic_d_write(sc, 4, GICD_CTLR, GICD_CTLR_ARE_NS | GICD_CTLR_G1A |
1183 * 4. Route all interrupts to boot CPU.
1185 aff = CPU_AFFINITY(0);
1186 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i++)
1187 gic_d_write(sc, 4, GICD_IROUTER(i), aff);
1192 /* Re-Distributor */
1194 gic_v3_redist_alloc(struct gic_v3_softc *sc)
1198 /* Allocate struct resource for all CPU's Re-Distributor registers */
1199 for (cpuid = 0; cpuid < mp_ncpus; cpuid++)
1200 if (CPU_ISSET(cpuid, &all_cpus) != 0)
1201 sc->gic_redists.pcpu[cpuid] =
1202 malloc(sizeof(*sc->gic_redists.pcpu[0]),
1203 M_GIC_V3, M_WAITOK);
1205 sc->gic_redists.pcpu[cpuid] = NULL;
1210 gic_v3_redist_find(struct gic_v3_softc *sc)
1212 struct resource r_res;
1213 bus_space_handle_t r_bsh;
1220 cpuid = PCPU_GET(cpuid);
1222 aff = CPU_AFFINITY(cpuid);
1223 /* Affinity in format for comparison with typer */
1224 aff = (CPU_AFF3(aff) << 24) | (CPU_AFF2(aff) << 16) |
1225 (CPU_AFF1(aff) << 8) | CPU_AFF0(aff);
1228 device_printf(sc->dev,
1229 "Start searching for Re-Distributor\n");
1231 /* Iterate through Re-Distributor regions */
1232 for (i = 0; i < sc->gic_redists.nregions; i++) {
1233 /* Take a copy of the region's resource */
1234 r_res = *sc->gic_redists.regions[i];
1235 r_bsh = rman_get_bushandle(&r_res);
1237 pidr2 = bus_read_4(&r_res, GICR_PIDR2);
1238 switch (pidr2 & GICR_PIDR2_ARCH_MASK) {
1239 case GICR_PIDR2_ARCH_GICv3: /* fall through */
1240 case GICR_PIDR2_ARCH_GICv4:
1243 device_printf(sc->dev,
1244 "No Re-Distributor found for CPU%u\n", cpuid);
1249 typer = bus_read_8(&r_res, GICR_TYPER);
1250 if ((typer >> GICR_TYPER_AFF_SHIFT) == aff) {
1251 KASSERT(sc->gic_redists.pcpu[cpuid] != NULL,
1252 ("Invalid pointer to per-CPU redistributor"));
1253 /* Copy res contents to its final destination */
1254 *sc->gic_redists.pcpu[cpuid] = r_res;
1256 device_printf(sc->dev,
1257 "CPU%u Re-Distributor has been found\n",
1263 r_bsh += (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1264 if ((typer & GICR_TYPER_VLPIS) != 0) {
1266 (GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE);
1269 rman_set_bushandle(&r_res, r_bsh);
1270 } while ((typer & GICR_TYPER_LAST) == 0);
1273 device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid);
1278 gic_v3_redist_wake(struct gic_v3_softc *sc)
1281 size_t us_left = 1000000;
1283 waker = gic_r_read(sc, 4, GICR_WAKER);
1284 /* Wake up Re-Distributor for this CPU */
1285 waker &= ~GICR_WAKER_PS;
1286 gic_r_write(sc, 4, GICR_WAKER, waker);
1288 * When clearing ProcessorSleep bit it is required to wait for
1289 * ChildrenAsleep to become zero following the processor power-on.
1291 while ((gic_r_read(sc, 4, GICR_WAKER) & GICR_WAKER_CA) != 0) {
1293 if (us_left-- == 0) {
1294 panic("Could not wake Re-Distributor for CPU%u",
1300 device_printf(sc->dev, "CPU%u Re-Distributor woke up\n",
1308 gic_v3_redist_init(struct gic_v3_softc *sc)
1313 err = gic_v3_redist_find(sc);
1317 err = gic_v3_redist_wake(sc);
1322 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ICENABLER0,
1323 GICR_I_ENABLER_PPI_MASK);
1325 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ISENABLER0,
1326 GICR_I_ENABLER_SGI_MASK);
1328 /* Set priority for SGIs and PPIs */
1329 for (i = 0; i <= GIC_LAST_PPI; i += GICR_I_PER_IPRIORITYn) {
1330 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_IPRIORITYR(i),
1334 gic_v3_wait_for_rwp(sc, REDIST);