2 * Copyright (c) 2015 The FreeBSD Foundation
5 * This software was developed by Semihalf under
6 * the sponsorship of the FreeBSD Foundation.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
36 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
43 #include <sys/cpuset.h>
45 #include <sys/mutex.h>
51 #include <machine/bus.h>
52 #include <machine/cpu.h>
53 #include <machine/intr.h>
57 #include "gic_v3_reg.h"
58 #include "gic_v3_var.h"
60 /* Device and PIC methods */
61 static void gic_v3_dispatch(device_t, struct trapframe *);
62 static void gic_v3_eoi(device_t, u_int);
63 static void gic_v3_mask_irq(device_t, u_int);
64 static void gic_v3_unmask_irq(device_t, u_int);
66 static void gic_v3_init_secondary(device_t);
67 static void gic_v3_ipi_send(device_t, cpuset_t, u_int);
70 static device_method_t gic_v3_methods[] = {
71 /* Device interface */
72 DEVMETHOD(device_detach, gic_v3_detach),
75 DEVMETHOD(pic_dispatch, gic_v3_dispatch),
76 DEVMETHOD(pic_eoi, gic_v3_eoi),
77 DEVMETHOD(pic_mask, gic_v3_mask_irq),
78 DEVMETHOD(pic_unmask, gic_v3_unmask_irq),
80 DEVMETHOD(pic_init_secondary, gic_v3_init_secondary),
81 DEVMETHOD(pic_ipi_send, gic_v3_ipi_send),
87 DEFINE_CLASS_0(gic_v3, gic_v3_driver, gic_v3_methods,
88 sizeof(struct gic_v3_softc));
91 * Driver-specific definitions.
93 MALLOC_DEFINE(M_GIC_V3, "GICv3", GIC_V3_DEVSTR);
96 * Helper functions and definitions.
98 /* Destination registers, either Distributor or Re-Distributor */
104 /* Helper routines starting with gic_v3_ */
105 static int gic_v3_dist_init(struct gic_v3_softc *);
106 static int gic_v3_redist_alloc(struct gic_v3_softc *);
107 static int gic_v3_redist_find(struct gic_v3_softc *);
108 static int gic_v3_redist_init(struct gic_v3_softc *);
109 static int gic_v3_cpu_init(struct gic_v3_softc *);
110 static void gic_v3_wait_for_rwp(struct gic_v3_softc *, enum gic_v3_xdist);
112 /* A sequence of init functions for primary (boot) CPU */
113 typedef int (*gic_v3_initseq_t) (struct gic_v3_softc *);
114 /* Primary CPU initialization sequence */
115 static gic_v3_initseq_t gic_v3_primary_init[] = {
124 /* Secondary CPU initialization sequence */
125 static gic_v3_initseq_t gic_v3_secondary_init[] = {
136 gic_v3_attach(device_t dev)
138 struct gic_v3_softc *sc;
139 gic_v3_initseq_t *init_func;
145 sc = device_get_softc(dev);
146 sc->gic_registered = FALSE;
150 /* Initialize mutex */
151 mtx_init(&sc->gic_mtx, "GICv3 lock", NULL, MTX_SPIN);
154 * Allocate array of struct resource.
155 * One entry for Distributor and all remaining for Re-Distributor.
157 sc->gic_res = malloc(
158 sizeof(sc->gic_res) * (sc->gic_redists.nregions + 1),
161 /* Now allocate corresponding resources */
162 for (i = 0, rid = 0; i < (sc->gic_redists.nregions + 1); i++, rid++) {
163 sc->gic_res[rid] = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
165 if (sc->gic_res[rid] == NULL)
170 * Distributor interface
172 sc->gic_dist = sc->gic_res[0];
175 * Re-Dristributor interface
177 /* Allocate space under region descriptions */
178 sc->gic_redists.regions = malloc(
179 sizeof(*sc->gic_redists.regions) * sc->gic_redists.nregions,
182 /* Fill-up bus_space information for each region. */
183 for (i = 0, rid = 1; i < sc->gic_redists.nregions; i++, rid++)
184 sc->gic_redists.regions[i] = sc->gic_res[rid];
186 /* Get the number of supported SPI interrupts */
187 typer = gic_d_read(sc, 4, GICD_TYPER);
188 sc->gic_nirqs = GICD_TYPER_I_NUM(typer);
189 if (sc->gic_nirqs > GIC_I_NUM_MAX)
190 sc->gic_nirqs = GIC_I_NUM_MAX;
192 /* Get the number of supported interrupt identifier bits */
193 sc->gic_idbits = GICD_TYPER_IDBITS(typer);
196 device_printf(dev, "SPIs: %u, IDs: %u\n",
197 sc->gic_nirqs, (1 << sc->gic_idbits) - 1);
200 /* Train init sequence for boot CPU */
201 for (init_func = gic_v3_primary_init; *init_func != NULL; init_func++) {
202 err = (*init_func)(sc);
208 * Now register PIC to the interrupts handling layer.
210 arm_register_root_pic(dev, sc->gic_nirqs);
211 sc->gic_registered = TRUE;
217 gic_v3_detach(device_t dev)
219 struct gic_v3_softc *sc;
223 sc = device_get_softc(dev);
225 if (device_is_attached(dev)) {
227 * XXX: We should probably deregister PIC
229 if (sc->gic_registered)
230 panic("Trying to detach registered PIC");
232 for (rid = 0; rid < (sc->gic_redists.nregions + 1); rid++)
233 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->gic_res[rid]);
235 for (i = 0; i < mp_ncpus; i++)
236 free(sc->gic_redists.pcpu[i], M_GIC_V3);
238 free(sc->gic_res, M_GIC_V3);
239 free(sc->gic_redists.regions, M_GIC_V3);
248 gic_v3_dispatch(device_t dev, struct trapframe *frame)
253 if (CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1) {
255 * Hardware: Cavium ThunderX
256 * Chip revision: Pass 1.0 (early version)
257 * Pass 1.1 (production)
258 * ERRATUM: 22978, 23154
261 "nop;nop;nop;nop;nop;nop;nop;nop; \n"
262 "mrs %0, ICC_IAR1_EL1 \n"
263 "nop;nop;nop;nop; \n"
265 : "=&r" (active_irq));
267 active_irq = gic_icc_read(IAR1);
270 if (__predict_false(active_irq == ICC_IAR1_EL1_SPUR))
273 if (__predict_true((active_irq >= GIC_FIRST_PPI &&
274 active_irq <= GIC_LAST_SPI) || active_irq >= GIC_FIRST_LPI)) {
275 arm_dispatch_intr(active_irq, frame);
279 if (active_irq <= GIC_LAST_SGI) {
280 gic_icc_write(EOIR1, (uint64_t)active_irq);
281 arm_dispatch_intr(active_irq, frame);
288 gic_v3_eoi(device_t dev, u_int irq)
291 gic_icc_write(EOIR1, (uint64_t)irq);
295 gic_v3_mask_irq(device_t dev, u_int irq)
297 struct gic_v3_softc *sc;
299 sc = device_get_softc(dev);
301 if (irq <= GIC_LAST_PPI) { /* SGIs and PPIs in corresponding Re-Distributor */
303 GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq), GICD_I_MASK(irq));
304 gic_v3_wait_for_rwp(sc, REDIST);
305 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { /* SPIs in distributor */
306 gic_r_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq));
307 gic_v3_wait_for_rwp(sc, DIST);
308 } else if (irq >= GIC_FIRST_LPI) { /* LPIs */
309 lpi_mask_irq(dev, irq);
311 panic("%s: Unsupported IRQ number %u", __func__, irq);
315 gic_v3_unmask_irq(device_t dev, u_int irq)
317 struct gic_v3_softc *sc;
319 sc = device_get_softc(dev);
321 if (irq <= GIC_LAST_PPI) { /* SGIs and PPIs in corresponding Re-Distributor */
323 GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq), GICD_I_MASK(irq));
324 gic_v3_wait_for_rwp(sc, REDIST);
325 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { /* SPIs in distributor */
326 gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq));
327 gic_v3_wait_for_rwp(sc, DIST);
328 } else if (irq >= GIC_FIRST_LPI) { /* LPIs */
329 lpi_unmask_irq(dev, irq);
331 panic("%s: Unsupported IRQ number %u", __func__, irq);
336 gic_v3_init_secondary(device_t dev)
338 struct gic_v3_softc *sc;
339 gic_v3_initseq_t *init_func;
342 sc = device_get_softc(dev);
344 /* Train init sequence for boot CPU */
345 for (init_func = gic_v3_secondary_init; *init_func != NULL; init_func++) {
346 err = (*init_func)(sc);
349 "Could not initialize GIC for CPU%u\n",
356 * Try to initialize ITS.
357 * If there is no driver attached this routine will fail but that
358 * does not mean failure here as only LPIs will not be functional
359 * on the current CPU.
361 if (its_init_cpu(NULL) != 0) {
363 "Could not initialize ITS for CPU%u. "
364 "No LPIs will arrive on this CPU\n",
369 * ARM64TODO: Unmask timer PPIs. To be removed when appropriate
370 * mechanism is implemented.
371 * Activate the timer interrupts: virtual (27), secure (29),
372 * and non-secure (30). Use hardcoded values here as there
373 * should be no defines for them.
375 gic_v3_unmask_irq(dev, 27);
376 gic_v3_unmask_irq(dev, 29);
377 gic_v3_unmask_irq(dev, 30);
381 gic_v3_ipi_send(device_t dev, cpuset_t cpuset, u_int ipi)
388 /* Set affinity mask to match level 3, 2 and 1 */
389 aff_mask = CPU_AFF1_MASK | CPU_AFF2_MASK | CPU_AFF3_MASK;
391 /* Iterate through all CPUs in set */
392 while (!CPU_EMPTY(&cpuset)) {
394 for (cpu = 0; cpu < mp_ncpus; cpu++) {
395 /* Compose target list for single AFF3:AFF2:AFF1 set */
396 if (CPU_ISSET(cpu, &cpuset)) {
399 * Save affinity of the first CPU to
400 * send IPI to for later comparison.
402 aff = CPU_AFFINITY(cpu);
403 tlist |= (1UL << CPU_AFF0(aff));
404 CPU_CLR(cpu, &cpuset);
406 /* Check for same Affinity level 3, 2 and 1 */
407 if ((aff & aff_mask) == (CPU_AFFINITY(cpu) & aff_mask)) {
408 tlist |= (1UL << CPU_AFF0(CPU_AFFINITY(cpu)));
409 /* Clear CPU in cpuset from target list */
410 CPU_CLR(cpu, &cpuset);
415 KASSERT((tlist & ~GICI_SGI_TLIST_MASK) == 0,
416 ("Target list too long for GICv3 IPI"));
417 /* Send SGI to CPUs in target list */
419 val |= (uint64_t)CPU_AFF3(aff) << GICI_SGI_AFF3_SHIFT;
420 val |= (uint64_t)CPU_AFF2(aff) << GICI_SGI_AFF2_SHIFT;
421 val |= (uint64_t)CPU_AFF1(aff) << GICI_SGI_AFF1_SHIFT;
422 val |= (uint64_t)(ipi & GICI_SGI_IPI_MASK) << GICI_SGI_IPI_SHIFT;
423 gic_icc_write(SGI1R, val);
433 gic_v3_wait_for_rwp(struct gic_v3_softc *sc, enum gic_v3_xdist xdist)
435 struct resource *res;
437 size_t us_left = 1000000;
439 cpuid = PCPU_GET(cpuid);
446 res = sc->gic_redists.pcpu[cpuid];
449 KASSERT(0, ("%s: Attempt to wait for unknown RWP", __func__));
453 while ((bus_read_4(res, GICD_CTLR) & GICD_CTLR_RWP) != 0) {
456 panic("GICD Register write pending for too long");
462 gic_v3_cpu_priority(uint64_t mask)
465 /* Set prority mask */
466 gic_icc_write(PMR, mask & ICC_PMR_EL1_PRIO_MASK);
470 gic_v3_cpu_enable_sre(struct gic_v3_softc *sc)
475 cpuid = PCPU_GET(cpuid);
477 * Set the SRE bit to enable access to GIC CPU interface
478 * via system registers.
480 sre = READ_SPECIALREG(icc_sre_el1);
481 sre |= ICC_SRE_EL1_SRE;
482 WRITE_SPECIALREG(icc_sre_el1, sre);
485 * Now ensure that the bit is set.
487 sre = READ_SPECIALREG(icc_sre_el1);
488 if ((sre & ICC_SRE_EL1_SRE) == 0) {
489 /* We are done. This was disabled in EL2 */
490 device_printf(sc->dev, "ERROR: CPU%u cannot enable CPU interface "
491 "via system registers\n", cpuid);
493 } else if (bootverbose) {
494 device_printf(sc->dev,
495 "CPU%u enabled CPU interface via system registers\n",
503 gic_v3_cpu_init(struct gic_v3_softc *sc)
507 /* Enable access to CPU interface via system registers */
508 err = gic_v3_cpu_enable_sre(sc);
511 /* Priority mask to minimum - accept all interrupts */
512 gic_v3_cpu_priority(GIC_PRIORITY_MIN);
513 /* Disable EOI mode */
514 gic_icc_clear(CTLR, ICC_CTLR_EL1_EOIMODE);
515 /* Enable group 1 (insecure) interrups */
516 gic_icc_set(IGRPEN1, ICC_IGRPEN0_EL1_EN);
523 gic_v3_dist_init(struct gic_v3_softc *sc)
529 * 1. Disable the Distributor
531 gic_d_write(sc, 4, GICD_CTLR, 0);
532 gic_v3_wait_for_rwp(sc, DIST);
535 * 2. Configure the Distributor
537 /* Set all global interrupts to be level triggered, active low. */
538 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ICFGRn)
539 gic_d_write(sc, 4, GICD_ICFGR(i), 0x00000000);
541 /* Set priority to all shared interrupts */
542 for (i = GIC_FIRST_SPI;
543 i < sc->gic_nirqs; i += GICD_I_PER_IPRIORITYn) {
544 /* Set highest priority */
545 gic_d_write(sc, 4, GICD_IPRIORITYR(i), GIC_PRIORITY_MAX);
549 * Disable all interrupts. Leave PPI and SGIs as they are enabled in
550 * Re-Distributor registers.
552 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ISENABLERn)
553 gic_d_write(sc, 4, GICD_ICENABLER(i), 0xFFFFFFFF);
555 gic_v3_wait_for_rwp(sc, DIST);
558 * 3. Enable Distributor
560 /* Enable Distributor with ARE, Group 1 */
561 gic_d_write(sc, 4, GICD_CTLR, GICD_CTLR_ARE_NS | GICD_CTLR_G1A |
565 * 4. Route all interrupts to boot CPU.
567 aff = CPU_AFFINITY(PCPU_GET(cpuid));
568 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i++)
569 gic_d_write(sc, 4, GICD_IROUTER(i), aff);
576 gic_v3_redist_alloc(struct gic_v3_softc *sc)
580 /* Allocate struct resource for all CPU's Re-Distributor registers */
581 for (cpuid = 0; cpuid < mp_ncpus; cpuid++)
582 if (CPU_ISSET(cpuid, &all_cpus) != 0)
583 sc->gic_redists.pcpu[cpuid] =
584 malloc(sizeof(*sc->gic_redists.pcpu[0]),
587 sc->gic_redists.pcpu[cpuid] = NULL;
592 gic_v3_redist_find(struct gic_v3_softc *sc)
594 struct resource r_res;
595 bus_space_handle_t r_bsh;
602 cpuid = PCPU_GET(cpuid);
604 aff = CPU_AFFINITY(cpuid);
605 /* Affinity in format for comparison with typer */
606 aff = (CPU_AFF3(aff) << 24) | (CPU_AFF2(aff) << 16) |
607 (CPU_AFF1(aff) << 8) | CPU_AFF0(aff);
610 device_printf(sc->dev,
611 "Start searching for Re-Distributor\n");
613 /* Iterate through Re-Distributor regions */
614 for (i = 0; i < sc->gic_redists.nregions; i++) {
615 /* Take a copy of the region's resource */
616 r_res = *sc->gic_redists.regions[i];
617 r_bsh = rman_get_bushandle(&r_res);
619 pidr2 = bus_read_4(&r_res, GICR_PIDR2);
620 switch (pidr2 & GICR_PIDR2_ARCH_MASK) {
621 case GICR_PIDR2_ARCH_GICv3: /* fall through */
622 case GICR_PIDR2_ARCH_GICv4:
625 device_printf(sc->dev,
626 "No Re-Distributor found for CPU%u\n", cpuid);
631 typer = bus_read_8(&r_res, GICR_TYPER);
632 if ((typer >> GICR_TYPER_AFF_SHIFT) == aff) {
633 KASSERT(sc->gic_redists.pcpu[cpuid] != NULL,
634 ("Invalid pointer to per-CPU redistributor"));
635 /* Copy res contents to its final destination */
636 *sc->gic_redists.pcpu[cpuid] = r_res;
638 device_printf(sc->dev,
639 "CPU%u Re-Distributor has been found\n",
645 r_bsh += (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
646 if ((typer & GICR_TYPER_VLPIS) != 0) {
648 (GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE);
651 rman_set_bushandle(&r_res, r_bsh);
652 } while ((typer & GICR_TYPER_LAST) == 0);
655 device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid);
660 gic_v3_redist_wake(struct gic_v3_softc *sc)
663 size_t us_left = 1000000;
665 waker = gic_r_read(sc, 4, GICR_WAKER);
666 /* Wake up Re-Distributor for this CPU */
667 waker &= ~GICR_WAKER_PS;
668 gic_r_write(sc, 4, GICR_WAKER, waker);
670 * When clearing ProcessorSleep bit it is required to wait for
671 * ChildrenAsleep to become zero following the processor power-on.
673 while ((gic_r_read(sc, 4, GICR_WAKER) & GICR_WAKER_CA) != 0) {
675 if (us_left-- == 0) {
676 panic("Could not wake Re-Distributor for CPU%u",
682 device_printf(sc->dev, "CPU%u Re-Distributor woke up\n",
690 gic_v3_redist_init(struct gic_v3_softc *sc)
695 err = gic_v3_redist_find(sc);
699 err = gic_v3_redist_wake(sc);
704 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ICENABLER0,
705 GICR_I_ENABLER_PPI_MASK);
707 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ISENABLER0,
708 GICR_I_ENABLER_SGI_MASK);
710 /* Set priority for SGIs and PPIs */
711 for (i = 0; i <= GIC_LAST_PPI; i += GICR_I_PER_IPRIORITYn) {
712 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_IPRIORITYR(i),
716 gic_v3_wait_for_rwp(sc, REDIST);