]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm64/arm64/gic_v3.c
MFV r301238:
[FreeBSD/FreeBSD.git] / sys / arm64 / arm64 / gic_v3.c
1 /*-
2  * Copyright (c) 2015-2016 The FreeBSD Foundation
3  * All rights reserved.
4  *
5  * This software was developed by Andrew Turner under
6  * the sponsorship of the FreeBSD Foundation.
7  *
8  * This software was developed by Semihalf under
9  * the sponsorship of the FreeBSD Foundation.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32
33 #include "opt_platform.h"
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bitstring.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/ktr.h>
44 #include <sys/malloc.h>
45 #include <sys/module.h>
46 #include <sys/rman.h>
47 #include <sys/pcpu.h>
48 #include <sys/proc.h>
49 #include <sys/cpuset.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/smp.h>
53
54 #include <vm/vm.h>
55 #include <vm/pmap.h>
56
57 #include <machine/bus.h>
58 #include <machine/cpu.h>
59 #include <machine/intr.h>
60
61 #include "pic_if.h"
62
63 #include "gic_v3_reg.h"
64 #include "gic_v3_var.h"
65
66 #ifdef INTRNG
67 static pic_disable_intr_t gic_v3_disable_intr;
68 static pic_enable_intr_t gic_v3_enable_intr;
69 static pic_map_intr_t gic_v3_map_intr;
70 static pic_setup_intr_t gic_v3_setup_intr;
71 static pic_teardown_intr_t gic_v3_teardown_intr;
72 static pic_post_filter_t gic_v3_post_filter;
73 static pic_post_ithread_t gic_v3_post_ithread;
74 static pic_pre_ithread_t gic_v3_pre_ithread;
75 static pic_bind_intr_t gic_v3_bind_intr;
76 #ifdef SMP
77 static pic_init_secondary_t gic_v3_init_secondary;
78 static pic_ipi_send_t gic_v3_ipi_send;
79 static pic_ipi_setup_t gic_v3_ipi_setup;
80 #endif
81
82 static u_int gic_irq_cpu;
83 #ifdef SMP
84 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
85 static u_int sgi_first_unused = GIC_FIRST_SGI;
86 #endif
87 #else
88 /* Device and PIC methods */
89 static int gic_v3_bind(device_t, u_int, u_int);
90 static void gic_v3_dispatch(device_t, struct trapframe *);
91 static void gic_v3_eoi(device_t, u_int);
92 static void gic_v3_mask_irq(device_t, u_int);
93 static void gic_v3_unmask_irq(device_t, u_int);
94 #ifdef SMP
95 static void gic_v3_init_secondary(device_t);
96 static void gic_v3_ipi_send(device_t, cpuset_t, u_int);
97 #endif
98 #endif
99
100 static device_method_t gic_v3_methods[] = {
101         /* Device interface */
102         DEVMETHOD(device_detach,        gic_v3_detach),
103
104 #ifdef INTRNG
105         /* Interrupt controller interface */
106         DEVMETHOD(pic_disable_intr,     gic_v3_disable_intr),
107         DEVMETHOD(pic_enable_intr,      gic_v3_enable_intr),
108         DEVMETHOD(pic_map_intr,         gic_v3_map_intr),
109         DEVMETHOD(pic_setup_intr,       gic_v3_setup_intr),
110         DEVMETHOD(pic_teardown_intr,    gic_v3_teardown_intr),
111         DEVMETHOD(pic_post_filter,      gic_v3_post_filter),
112         DEVMETHOD(pic_post_ithread,     gic_v3_post_ithread),
113         DEVMETHOD(pic_pre_ithread,      gic_v3_pre_ithread),
114 #ifdef SMP
115         DEVMETHOD(pic_bind_intr,        gic_v3_bind_intr),
116         DEVMETHOD(pic_init_secondary,   gic_v3_init_secondary),
117         DEVMETHOD(pic_ipi_send,         gic_v3_ipi_send),
118         DEVMETHOD(pic_ipi_setup,        gic_v3_ipi_setup),
119 #endif
120 #else
121         /* PIC interface */
122         DEVMETHOD(pic_bind,             gic_v3_bind),
123         DEVMETHOD(pic_dispatch,         gic_v3_dispatch),
124         DEVMETHOD(pic_eoi,              gic_v3_eoi),
125         DEVMETHOD(pic_mask,             gic_v3_mask_irq),
126         DEVMETHOD(pic_unmask,           gic_v3_unmask_irq),
127 #ifdef SMP
128         DEVMETHOD(pic_init_secondary,   gic_v3_init_secondary),
129         DEVMETHOD(pic_ipi_send,         gic_v3_ipi_send),
130 #endif
131 #endif
132
133         /* End */
134         DEVMETHOD_END
135 };
136
137 DEFINE_CLASS_0(gic, gic_v3_driver, gic_v3_methods,
138     sizeof(struct gic_v3_softc));
139
140 /*
141  * Driver-specific definitions.
142  */
143 MALLOC_DEFINE(M_GIC_V3, "GICv3", GIC_V3_DEVSTR);
144
145 /*
146  * Helper functions and definitions.
147  */
148 /* Destination registers, either Distributor or Re-Distributor */
149 enum gic_v3_xdist {
150         DIST = 0,
151         REDIST,
152 };
153
154 /* Helper routines starting with gic_v3_ */
155 static int gic_v3_dist_init(struct gic_v3_softc *);
156 static int gic_v3_redist_alloc(struct gic_v3_softc *);
157 static int gic_v3_redist_find(struct gic_v3_softc *);
158 static int gic_v3_redist_init(struct gic_v3_softc *);
159 static int gic_v3_cpu_init(struct gic_v3_softc *);
160 static void gic_v3_wait_for_rwp(struct gic_v3_softc *, enum gic_v3_xdist);
161
162 /* A sequence of init functions for primary (boot) CPU */
163 typedef int (*gic_v3_initseq_t) (struct gic_v3_softc *);
164 /* Primary CPU initialization sequence */
165 static gic_v3_initseq_t gic_v3_primary_init[] = {
166         gic_v3_dist_init,
167         gic_v3_redist_alloc,
168         gic_v3_redist_init,
169         gic_v3_cpu_init,
170         NULL
171 };
172
173 #ifdef SMP
174 /* Secondary CPU initialization sequence */
175 static gic_v3_initseq_t gic_v3_secondary_init[] = {
176         gic_v3_redist_init,
177         gic_v3_cpu_init,
178         NULL
179 };
180 #endif
181
182 /*
183  * Device interface.
184  */
185 int
186 gic_v3_attach(device_t dev)
187 {
188         struct gic_v3_softc *sc;
189         gic_v3_initseq_t *init_func;
190         uint32_t typer;
191         int rid;
192         int err;
193         size_t i;
194 #ifdef INTRNG
195         u_int irq;
196         const char *name;
197 #endif
198
199         sc = device_get_softc(dev);
200         sc->gic_registered = FALSE;
201         sc->dev = dev;
202         err = 0;
203
204         /* Initialize mutex */
205         mtx_init(&sc->gic_mtx, "GICv3 lock", NULL, MTX_SPIN);
206
207         /*
208          * Allocate array of struct resource.
209          * One entry for Distributor and all remaining for Re-Distributor.
210          */
211         sc->gic_res = malloc(
212             sizeof(*sc->gic_res) * (sc->gic_redists.nregions + 1),
213             M_GIC_V3, M_WAITOK);
214
215         /* Now allocate corresponding resources */
216         for (i = 0, rid = 0; i < (sc->gic_redists.nregions + 1); i++, rid++) {
217                 sc->gic_res[rid] = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
218                     &rid, RF_ACTIVE);
219                 if (sc->gic_res[rid] == NULL)
220                         return (ENXIO);
221         }
222
223         /*
224          * Distributor interface
225          */
226         sc->gic_dist = sc->gic_res[0];
227
228         /*
229          * Re-Dristributor interface
230          */
231         /* Allocate space under region descriptions */
232         sc->gic_redists.regions = malloc(
233             sizeof(*sc->gic_redists.regions) * sc->gic_redists.nregions,
234             M_GIC_V3, M_WAITOK);
235
236         /* Fill-up bus_space information for each region. */
237         for (i = 0, rid = 1; i < sc->gic_redists.nregions; i++, rid++)
238                 sc->gic_redists.regions[i] = sc->gic_res[rid];
239
240         /* Get the number of supported SPI interrupts */
241         typer = gic_d_read(sc, 4, GICD_TYPER);
242         sc->gic_nirqs = GICD_TYPER_I_NUM(typer);
243         if (sc->gic_nirqs > GIC_I_NUM_MAX)
244                 sc->gic_nirqs = GIC_I_NUM_MAX;
245
246 #ifdef INTRNG
247         sc->gic_irqs = malloc(sizeof(*sc->gic_irqs) * sc->gic_nirqs,
248             M_GIC_V3, M_WAITOK | M_ZERO);
249         name = device_get_nameunit(dev);
250         for (irq = 0; irq < sc->gic_nirqs; irq++) {
251                 struct intr_irqsrc *isrc;
252
253                 sc->gic_irqs[irq].gi_irq = irq;
254                 sc->gic_irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
255                 sc->gic_irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
256
257                 isrc = &sc->gic_irqs[irq].gi_isrc;
258                 if (irq <= GIC_LAST_SGI) {
259                         err = intr_isrc_register(isrc, sc->dev,
260                             INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
261                 } else if (irq <= GIC_LAST_PPI) {
262                         err = intr_isrc_register(isrc, sc->dev,
263                             INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
264                 } else {
265                         err = intr_isrc_register(isrc, sc->dev, 0,
266                             "%s,s%u", name, irq - GIC_FIRST_SPI);
267                 }
268                 if (err != 0) {
269                         /* XXX call intr_isrc_deregister() */
270                         free(sc->gic_irqs, M_DEVBUF);
271                         return (err);
272                 }
273         }
274 #endif
275
276         /* Get the number of supported interrupt identifier bits */
277         sc->gic_idbits = GICD_TYPER_IDBITS(typer);
278
279         if (bootverbose) {
280                 device_printf(dev, "SPIs: %u, IDs: %u\n",
281                     sc->gic_nirqs, (1 << sc->gic_idbits) - 1);
282         }
283
284         /* Train init sequence for boot CPU */
285         for (init_func = gic_v3_primary_init; *init_func != NULL; init_func++) {
286                 err = (*init_func)(sc);
287                 if (err != 0)
288                         return (err);
289         }
290         /*
291          * Full success.
292          * Now register PIC to the interrupts handling layer.
293          */
294 #ifndef INTRNG
295         arm_register_root_pic(dev, sc->gic_nirqs);
296         sc->gic_registered = TRUE;
297 #endif
298
299         return (0);
300 }
301
302 int
303 gic_v3_detach(device_t dev)
304 {
305         struct gic_v3_softc *sc;
306         size_t i;
307         int rid;
308
309         sc = device_get_softc(dev);
310
311         if (device_is_attached(dev)) {
312                 /*
313                  * XXX: We should probably deregister PIC
314                  */
315                 if (sc->gic_registered)
316                         panic("Trying to detach registered PIC");
317         }
318         for (rid = 0; rid < (sc->gic_redists.nregions + 1); rid++)
319                 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->gic_res[rid]);
320
321         for (i = 0; i < mp_ncpus; i++)
322                 free(sc->gic_redists.pcpu[i], M_GIC_V3);
323
324         free(sc->gic_res, M_GIC_V3);
325         free(sc->gic_redists.regions, M_GIC_V3);
326
327         return (0);
328 }
329
330 #ifdef INTRNG
331 int
332 arm_gic_v3_intr(void *arg)
333 {
334         struct gic_v3_softc *sc = arg;
335         struct gic_v3_irqsrc *gi;
336         uint64_t active_irq;
337         struct trapframe *tf;
338         bool first;
339
340         first = true;
341
342         while (1) {
343                 if (CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1) {
344                         /*
345                          * Hardware:            Cavium ThunderX
346                          * Chip revision:       Pass 1.0 (early version)
347                          *                      Pass 1.1 (production)
348                          * ERRATUM:             22978, 23154
349                          */
350                         __asm __volatile(
351                             "nop;nop;nop;nop;nop;nop;nop;nop;   \n"
352                             "mrs %0, ICC_IAR1_EL1               \n"
353                             "nop;nop;nop;nop;                   \n"
354                             "dsb sy                             \n"
355                             : "=&r" (active_irq));
356                 } else {
357                         active_irq = gic_icc_read(IAR1);
358                 }
359
360                 if (__predict_false(active_irq >= sc->gic_nirqs))
361                         return (FILTER_HANDLED);
362
363                 tf = curthread->td_intr_frame;
364                 gi = &sc->gic_irqs[active_irq];
365                 if (active_irq <= GIC_LAST_SGI) {
366                         /* Call EOI for all IPI before dispatch. */
367                         gic_icc_write(EOIR1, (uint64_t)active_irq);
368 #ifdef SMP
369                         intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf);
370 #else
371                         device_printf(sc->dev, "SGI %u on UP system detected\n",
372                             active_irq - GIC_FIRST_SGI);
373 #endif
374                 } else if (active_irq >= GIC_FIRST_PPI &&
375                     active_irq <= GIC_LAST_SPI) {
376                         if (gi->gi_pol == INTR_TRIGGER_EDGE)
377                                 gic_icc_write(EOIR1, gi->gi_irq);
378
379                         if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
380                                 if (gi->gi_pol != INTR_TRIGGER_EDGE)
381                                         gic_icc_write(EOIR1, gi->gi_irq);
382                                 gic_v3_disable_intr(sc->dev, &gi->gi_isrc);
383                                 device_printf(sc->dev,
384                                     "Stray irq %lu disabled\n", active_irq);
385                         }
386                 }
387         }
388 }
389
390 #ifdef FDT
391 static int
392 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
393     enum intr_polarity *polp, enum intr_trigger *trigp)
394 {
395         u_int irq;
396
397         if (ncells < 3)
398                 return (EINVAL);
399
400         /*
401          * The 1st cell is the interrupt type:
402          *      0 = SPI
403          *      1 = PPI
404          * The 2nd cell contains the interrupt number:
405          *      [0 - 987] for SPI
406          *      [0 -  15] for PPI
407          * The 3rd cell is the flags, encoded as follows:
408          *   bits[3:0] trigger type and level flags
409          *      1 = edge triggered
410          *      2 = edge triggered (PPI only)
411          *      4 = level-sensitive
412          *      8 = level-sensitive (PPI only)
413          */
414         switch (cells[0]) {
415         case 0:
416                 irq = GIC_FIRST_SPI + cells[1];
417                 /* SPI irq is checked later. */
418                 break;
419         case 1:
420                 irq = GIC_FIRST_PPI + cells[1];
421                 if (irq > GIC_LAST_PPI) {
422                         device_printf(dev, "unsupported PPI interrupt "
423                             "number %u\n", cells[1]);
424                         return (EINVAL);
425                 }
426                 break;
427         default:
428                 device_printf(dev, "unsupported interrupt type "
429                     "configuration %u\n", cells[0]);
430                 return (EINVAL);
431         }
432
433         switch (cells[2] & 0xf) {
434         case 1:
435                 *trigp = INTR_TRIGGER_EDGE;
436                 *polp = INTR_POLARITY_HIGH;
437                 break;
438         case 2:
439                 *trigp = INTR_TRIGGER_EDGE;
440                 *polp = INTR_POLARITY_LOW;
441                 break;
442         case 4:
443                 *trigp = INTR_TRIGGER_LEVEL;
444                 *polp = INTR_POLARITY_HIGH;
445                 break;
446         case 8:
447                 *trigp = INTR_TRIGGER_LEVEL;
448                 *polp = INTR_POLARITY_LOW;
449                 break;
450         default:
451                 device_printf(dev, "unsupported trigger/polarity "
452                     "configuration 0x%02x\n", cells[2]);
453                 return (EINVAL);
454         }
455
456         /* Check the interrupt is valid */
457         if (irq >= GIC_FIRST_SPI && *polp != INTR_POLARITY_HIGH)
458                 return (EINVAL);
459
460         *irqp = irq;
461         return (0);
462 }
463 #endif
464
465 static int
466 do_gic_v3_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
467     enum intr_polarity *polp, enum intr_trigger *trigp)
468 {
469         struct gic_v3_softc *sc;
470         enum intr_polarity pol;
471         enum intr_trigger trig;
472 #ifdef FDT
473         struct intr_map_data_fdt *daf;
474 #endif
475         u_int irq;
476
477         sc = device_get_softc(dev);
478
479         switch (data->type) {
480 #ifdef FDT
481         case INTR_MAP_DATA_FDT:
482                 daf = (struct intr_map_data_fdt *)data;
483                 if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
484                     &trig) != 0)
485                         return (EINVAL);
486                 break;
487 #endif
488         default:
489                 return (EINVAL);
490         }
491
492         if (irq >= sc->gic_nirqs)
493                 return (EINVAL);
494         switch (pol) {
495         case INTR_POLARITY_CONFORM:
496         case INTR_POLARITY_LOW:
497         case INTR_POLARITY_HIGH:
498                 break;
499         default:
500                 return (EINVAL);
501         }
502         switch (trig) {
503         case INTR_TRIGGER_CONFORM:
504         case INTR_TRIGGER_EDGE:
505         case INTR_TRIGGER_LEVEL:
506                 break;
507         default:
508                 return (EINVAL);
509         }
510
511         *irqp = irq;
512         if (polp != NULL)
513                 *polp = pol;
514         if (trigp != NULL)
515                 *trigp = trig;
516         return (0);
517 }
518
519 static int
520 gic_v3_map_intr(device_t dev, struct intr_map_data *data,
521     struct intr_irqsrc **isrcp)
522 {
523         struct gic_v3_softc *sc;
524         int error;
525         u_int irq;
526
527         error = do_gic_v3_map_intr(dev, data, &irq, NULL, NULL);
528         if (error == 0) {
529                 sc = device_get_softc(dev);
530                 *isrcp = GIC_INTR_ISRC(sc, irq);
531         }
532         return (error);
533 }
534
535 static int
536 gic_v3_setup_intr(device_t dev, struct intr_irqsrc *isrc,
537     struct resource *res, struct intr_map_data *data)
538 {
539         struct gic_v3_softc *sc = device_get_softc(dev);
540         struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
541         enum intr_trigger trig;
542         enum intr_polarity pol;
543         uint32_t reg;
544         u_int irq;
545         int error;
546
547         if (data == NULL)
548                 return (ENOTSUP);
549
550         error = do_gic_v3_map_intr(dev, data, &irq, &pol, &trig);
551         if (error != 0)
552                 return (error);
553
554         if (gi->gi_irq != irq || pol == INTR_POLARITY_CONFORM ||
555             trig == INTR_TRIGGER_CONFORM)
556                 return (EINVAL);
557
558         /* Compare config if this is not first setup. */
559         if (isrc->isrc_handlers != 0) {
560                 if (pol != gi->gi_pol || trig != gi->gi_trig)
561                         return (EINVAL);
562                 else
563                         return (0);
564         }
565
566         gi->gi_pol = pol;
567         gi->gi_trig = trig;
568
569         /*
570          * XXX - In case that per CPU interrupt is going to be enabled in time
571          *       when SMP is already started, we need some IPI call which
572          *       enables it on others CPUs. Further, it's more complicated as
573          *       pic_enable_source() and pic_disable_source() should act on
574          *       per CPU basis only. Thus, it should be solved here somehow.
575          */
576         if (isrc->isrc_flags & INTR_ISRCF_PPI)
577                 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
578
579         if (irq >= GIC_FIRST_PPI && irq <= GIC_LAST_SPI) {
580                 mtx_lock_spin(&sc->gic_mtx);
581
582                 /* Set the trigger and polarity */
583                 if (irq <= GIC_LAST_PPI)
584                         reg = gic_r_read(sc, 4,
585                             GICR_SGI_BASE_SIZE + GICD_ICFGR(irq));
586                 else
587                         reg = gic_d_read(sc, 4, GICD_ICFGR(irq));
588                 if (trig == INTR_TRIGGER_LEVEL)
589                         reg &= ~(2 << ((irq % 16) * 2));
590                 else
591                         reg |= 2 << ((irq % 16) * 2);
592
593                 if (irq <= GIC_LAST_PPI) {
594                         gic_r_write(sc, 4,
595                             GICR_SGI_BASE_SIZE + GICD_ICFGR(irq), reg);
596                         gic_v3_wait_for_rwp(sc, REDIST);
597                 } else {
598                         gic_d_write(sc, 4, GICD_ICFGR(irq), reg);
599                         gic_v3_wait_for_rwp(sc, DIST);
600                 }
601
602                 mtx_unlock_spin(&sc->gic_mtx);
603
604                 gic_v3_bind_intr(dev, isrc);
605         }
606
607         return (0);
608 }
609
610 static int
611 gic_v3_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
612     struct resource *res, struct intr_map_data *data)
613 {
614         struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
615
616         if (isrc->isrc_handlers == 0) {
617                 gi->gi_pol = INTR_POLARITY_CONFORM;
618                 gi->gi_trig = INTR_TRIGGER_CONFORM;
619         }
620
621         return (0);
622 }
623
624 static void
625 gic_v3_disable_intr(device_t dev, struct intr_irqsrc *isrc)
626 {
627         struct gic_v3_softc *sc;
628         struct gic_v3_irqsrc *gi;
629         u_int irq;
630
631         sc = device_get_softc(dev);
632         gi = (struct gic_v3_irqsrc *)isrc;
633         irq = gi->gi_irq;
634
635         if (irq <= GIC_LAST_PPI) {
636                 /* SGIs and PPIs in corresponding Re-Distributor */
637                 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq),
638                     GICD_I_MASK(irq));
639                 gic_v3_wait_for_rwp(sc, REDIST);
640         } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
641                 /* SPIs in distributor */
642                 gic_d_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq));
643                 gic_v3_wait_for_rwp(sc, DIST);
644         } else
645                 panic("%s: Unsupported IRQ %u", __func__, irq);
646 }
647
648 static void
649 gic_v3_enable_intr(device_t dev, struct intr_irqsrc *isrc)
650 {
651         struct gic_v3_softc *sc;
652         struct gic_v3_irqsrc *gi;
653         u_int irq;
654
655         sc = device_get_softc(dev);
656         gi = (struct gic_v3_irqsrc *)isrc;
657         irq = gi->gi_irq;
658
659         if (irq <= GIC_LAST_PPI) {
660                 /* SGIs and PPIs in corresponding Re-Distributor */
661                 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq),
662                     GICD_I_MASK(irq));
663                 gic_v3_wait_for_rwp(sc, REDIST);
664         } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
665                 /* SPIs in distributor */
666                 gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq));
667                 gic_v3_wait_for_rwp(sc, DIST);
668         } else
669                 panic("%s: Unsupported IRQ %u", __func__, irq);
670 }
671
672 static void
673 gic_v3_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
674 {
675         struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
676
677         gic_v3_disable_intr(dev, isrc);
678         gic_icc_write(EOIR1, gi->gi_irq);
679 }
680
681 static void
682 gic_v3_post_ithread(device_t dev, struct intr_irqsrc *isrc)
683 {
684
685         gic_v3_enable_intr(dev, isrc);
686 }
687
688 static void
689 gic_v3_post_filter(device_t dev, struct intr_irqsrc *isrc)
690 {
691         struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
692
693         if (gi->gi_pol == INTR_TRIGGER_EDGE)
694                 return;
695
696         gic_icc_write(EOIR1, gi->gi_irq);
697 }
698
699 static int
700 gic_v3_bind_intr(device_t dev, struct intr_irqsrc *isrc)
701 {
702         struct gic_v3_softc *sc;
703         struct gic_v3_irqsrc *gi;
704         int cpu;
705
706         gi = (struct gic_v3_irqsrc *)isrc;
707         if (gi->gi_irq <= GIC_LAST_PPI)
708                 return (EINVAL);
709
710         KASSERT(gi->gi_irq >= GIC_FIRST_SPI && gi->gi_irq <= GIC_LAST_SPI,
711             ("%s: Attempting to bind an invalid IRQ", __func__));
712
713         sc = device_get_softc(dev);
714
715         if (CPU_EMPTY(&isrc->isrc_cpu)) {
716                 gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
717                 CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
718                 gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq),
719                     CPU_AFFINITY(gic_irq_cpu));
720         } else {
721                 /*
722                  * We can only bind to a single CPU so select
723                  * the first CPU found.
724                  */
725                 cpu = CPU_FFS(&isrc->isrc_cpu) - 1;
726                 gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(cpu));
727         }
728
729         return (0);
730 }
731
732 #ifdef SMP
733 static void
734 gic_v3_init_secondary(device_t dev)
735 {
736         struct gic_v3_softc *sc;
737         gic_v3_initseq_t *init_func;
738         struct intr_irqsrc *isrc;
739         u_int cpu, irq;
740         int err;
741
742         sc = device_get_softc(dev);
743         cpu = PCPU_GET(cpuid);
744
745         /* Train init sequence for boot CPU */
746         for (init_func = gic_v3_secondary_init; *init_func != NULL;
747             init_func++) {
748                 err = (*init_func)(sc);
749                 if (err != 0) {
750                         device_printf(dev,
751                             "Could not initialize GIC for CPU%u\n", cpu);
752                         return;
753                 }
754         }
755
756         /* Unmask attached SGI interrupts. */
757         for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) {
758                 isrc = GIC_INTR_ISRC(sc, irq);
759                 if (intr_isrc_init_on_cpu(isrc, cpu))
760                         gic_v3_enable_intr(dev, isrc);
761         }
762
763         /* Unmask attached PPI interrupts. */
764         for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) {
765                 isrc = GIC_INTR_ISRC(sc, irq);
766                 if (intr_isrc_init_on_cpu(isrc, cpu))
767                         gic_v3_enable_intr(dev, isrc);
768         }
769 }
770
771 static void
772 gic_v3_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
773     u_int ipi)
774 {
775         struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
776         uint64_t aff, val, irq;
777         int i;
778
779 #define GIC_AFF_MASK    (CPU_AFF3_MASK | CPU_AFF2_MASK | CPU_AFF1_MASK)
780 #define GIC_AFFINITY(i) (CPU_AFFINITY(i) & GIC_AFF_MASK)
781         aff = GIC_AFFINITY(0);
782         irq = gi->gi_irq;
783         val = 0;
784
785         /* Iterate through all CPUs in set */
786         for (i = 0; i < mp_ncpus; i++) {
787                 /* Move to the next affinity group */
788                 if (aff != GIC_AFFINITY(i)) {
789                         /* Send the IPI */
790                         if (val != 0) {
791                                 gic_icc_write(SGI1R, val);
792                                 val = 0;
793                         }
794                         aff = GIC_AFFINITY(i);
795                 }
796
797                 /* Send the IPI to this cpu */
798                 if (CPU_ISSET(i, &cpus)) {
799 #define ICC_SGI1R_AFFINITY(aff)                                 \
800     (((uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT) |    \
801      ((uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT) |    \
802      ((uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT))
803                         /* Set the affinity when the first at this level */
804                         if (val == 0)
805                                 val = ICC_SGI1R_AFFINITY(aff) |
806                                     irq << ICC_SGI1R_EL1_SGIID_SHIFT;
807                         /* Set the bit to send the IPI to te CPU */
808                         val |= 1 << CPU_AFF0(CPU_AFFINITY(i));
809                 }
810         }
811
812         /* Send the IPI to the last cpu affinity group */
813         if (val != 0)
814                 gic_icc_write(SGI1R, val);
815 #undef GIC_AFF_MASK
816 #undef GIC_AFFINITY
817 }
818
819 static int
820 gic_v3_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
821 {
822         struct intr_irqsrc *isrc;
823         struct gic_v3_softc *sc = device_get_softc(dev);
824
825         if (sgi_first_unused > GIC_LAST_SGI)
826                 return (ENOSPC);
827
828         isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
829         sgi_to_ipi[sgi_first_unused++] = ipi;
830
831         CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
832
833         *isrcp = isrc;
834         return (0);
835 }
836 #endif /* SMP */
837 #else /* INTRNG */
838 /*
839  * PIC interface.
840  */
841
842 static int
843 gic_v3_bind(device_t dev, u_int irq, u_int cpuid)
844 {
845         uint64_t aff;
846         struct gic_v3_softc *sc;
847
848         sc = device_get_softc(dev);
849
850         if (irq <= GIC_LAST_PPI) {
851                 /* Can't bind PPI to another CPU but it's not an error */
852                 return (0);
853         } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
854                 aff = CPU_AFFINITY(cpuid);
855                 gic_d_write(sc, 4, GICD_IROUTER(irq), aff);
856                 return (0);
857         } else if (irq >= GIC_FIRST_LPI)
858                 return (lpi_migrate(dev, irq, cpuid));
859
860         return (EINVAL);
861 }
862
863 static void
864 gic_v3_dispatch(device_t dev, struct trapframe *frame)
865 {
866         uint64_t active_irq;
867
868         while (1) {
869                 if (CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1) {
870                         /*
871                          * Hardware:            Cavium ThunderX
872                          * Chip revision:       Pass 1.0 (early version)
873                          *                      Pass 1.1 (production)
874                          * ERRATUM:             22978, 23154
875                          */
876                         __asm __volatile(
877                             "nop;nop;nop;nop;nop;nop;nop;nop;   \n"
878                             "mrs %0, ICC_IAR1_EL1               \n"
879                             "nop;nop;nop;nop;                   \n"
880                             "dsb sy                             \n"
881                             : "=&r" (active_irq));
882                 } else {
883                         active_irq = gic_icc_read(IAR1);
884                 }
885
886                 if (__predict_false(active_irq == ICC_IAR1_EL1_SPUR))
887                         break;
888
889                 if (__predict_true((active_irq >= GIC_FIRST_PPI &&
890                     active_irq <= GIC_LAST_SPI) || active_irq >= GIC_FIRST_LPI)) {
891                         arm_dispatch_intr(active_irq, frame);
892                         continue;
893                 }
894
895                 if (active_irq <= GIC_LAST_SGI) {
896                         gic_icc_write(EOIR1, (uint64_t)active_irq);
897                         arm_dispatch_intr(active_irq, frame);
898                         continue;
899                 }
900         }
901 }
902
903 static void
904 gic_v3_eoi(device_t dev, u_int irq)
905 {
906
907         gic_icc_write(EOIR1, (uint64_t)irq);
908 }
909
910 static void
911 gic_v3_mask_irq(device_t dev, u_int irq)
912 {
913         struct gic_v3_softc *sc;
914
915         sc = device_get_softc(dev);
916
917         if (irq <= GIC_LAST_PPI) { /* SGIs and PPIs in corresponding Re-Distributor */
918                 gic_r_write(sc, 4,
919                     GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq), GICD_I_MASK(irq));
920                 gic_v3_wait_for_rwp(sc, REDIST);
921         } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { /* SPIs in distributor */
922                 gic_r_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq));
923                 gic_v3_wait_for_rwp(sc, DIST);
924         } else if (irq >= GIC_FIRST_LPI) { /* LPIs */
925                 lpi_mask_irq(dev, irq);
926         } else
927                 panic("%s: Unsupported IRQ number %u", __func__, irq);
928 }
929
930 static void
931 gic_v3_unmask_irq(device_t dev, u_int irq)
932 {
933         struct gic_v3_softc *sc;
934
935         sc = device_get_softc(dev);
936
937         if (irq <= GIC_LAST_PPI) { /* SGIs and PPIs in corresponding Re-Distributor */
938                 gic_r_write(sc, 4,
939                     GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq), GICD_I_MASK(irq));
940                 gic_v3_wait_for_rwp(sc, REDIST);
941         } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { /* SPIs in distributor */
942                 gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq));
943                 gic_v3_wait_for_rwp(sc, DIST);
944         } else if (irq >= GIC_FIRST_LPI) { /* LPIs */
945                 lpi_unmask_irq(dev, irq);
946         } else
947                 panic("%s: Unsupported IRQ number %u", __func__, irq);
948 }
949
950 #ifdef SMP
951 static void
952 gic_v3_init_secondary(device_t dev)
953 {
954         struct gic_v3_softc *sc;
955         gic_v3_initseq_t *init_func;
956         int err;
957
958         sc = device_get_softc(dev);
959
960         /* Train init sequence for boot CPU */
961         for (init_func = gic_v3_secondary_init; *init_func != NULL; init_func++) {
962                 err = (*init_func)(sc);
963                 if (err != 0) {
964                         device_printf(dev,
965                             "Could not initialize GIC for CPU%u\n",
966                             PCPU_GET(cpuid));
967                         return;
968                 }
969         }
970
971         /*
972          * Try to initialize ITS.
973          * If there is no driver attached this routine will fail but that
974          * does not mean failure here as only LPIs will not be functional
975          * on the current CPU.
976          */
977         if (its_init_cpu(NULL) != 0) {
978                 device_printf(dev,
979                     "Could not initialize ITS for CPU%u. "
980                     "No LPIs will arrive on this CPU\n",
981                     PCPU_GET(cpuid));
982         }
983
984         /*
985          * ARM64TODO:   Unmask timer PPIs. To be removed when appropriate
986          *              mechanism is implemented.
987          *              Activate the timer interrupts: virtual (27), secure (29),
988          *              and non-secure (30). Use hardcoded values here as there
989          *              should be no defines for them.
990          */
991         gic_v3_unmask_irq(dev, 27);
992         gic_v3_unmask_irq(dev, 29);
993         gic_v3_unmask_irq(dev, 30);
994 }
995
996 static void
997 gic_v3_ipi_send(device_t dev, cpuset_t cpuset, u_int ipi)
998 {
999         u_int cpu;
1000         uint64_t aff, tlist;
1001         uint64_t val;
1002         uint64_t aff_mask;
1003
1004         /* Set affinity mask to match level 3, 2 and 1 */
1005         aff_mask = CPU_AFF1_MASK | CPU_AFF2_MASK | CPU_AFF3_MASK;
1006
1007         /* Iterate through all CPUs in set */
1008         while (!CPU_EMPTY(&cpuset)) {
1009                 aff = tlist = 0;
1010                 for (cpu = 0; cpu < mp_ncpus; cpu++) {
1011                         /* Compose target list for single AFF3:AFF2:AFF1 set */
1012                         if (CPU_ISSET(cpu, &cpuset)) {
1013                                 if (!tlist) {
1014                                         /*
1015                                          * Save affinity of the first CPU to
1016                                          * send IPI to for later comparison.
1017                                          */
1018                                         aff = CPU_AFFINITY(cpu);
1019                                         tlist |= (1UL << CPU_AFF0(aff));
1020                                         CPU_CLR(cpu, &cpuset);
1021                                 }
1022                                 /* Check for same Affinity level 3, 2 and 1 */
1023                                 if ((aff & aff_mask) == (CPU_AFFINITY(cpu) & aff_mask)) {
1024                                         tlist |= (1UL << CPU_AFF0(CPU_AFFINITY(cpu)));
1025                                         /* Clear CPU in cpuset from target list */
1026                                         CPU_CLR(cpu, &cpuset);
1027                                 }
1028                         }
1029                 }
1030                 if (tlist) {
1031                         KASSERT((tlist & ~ICC_SGI1R_EL1_TL_MASK) == 0,
1032                             ("Target list too long for GICv3 IPI"));
1033                         /* Send SGI to CPUs in target list */
1034                         val = tlist;
1035                         val |= (uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT;
1036                         val |= (uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT;
1037                         val |= (uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT;
1038                         val |= (uint64_t)(ipi & ICC_SGI1R_EL1_SGIID_MASK) <<
1039                             ICC_SGI1R_EL1_SGIID_SHIFT;
1040                         gic_icc_write(SGI1R, val);
1041                 }
1042         }
1043 }
1044 #endif
1045 #endif /* !INTRNG */
1046
1047 /*
1048  * Helper routines
1049  */
1050 static void
1051 gic_v3_wait_for_rwp(struct gic_v3_softc *sc, enum gic_v3_xdist xdist)
1052 {
1053         struct resource *res;
1054         u_int cpuid;
1055         size_t us_left = 1000000;
1056
1057         cpuid = PCPU_GET(cpuid);
1058
1059         switch (xdist) {
1060         case DIST:
1061                 res = sc->gic_dist;
1062                 break;
1063         case REDIST:
1064                 res = sc->gic_redists.pcpu[cpuid];
1065                 break;
1066         default:
1067                 KASSERT(0, ("%s: Attempt to wait for unknown RWP", __func__));
1068                 return;
1069         }
1070
1071         while ((bus_read_4(res, GICD_CTLR) & GICD_CTLR_RWP) != 0) {
1072                 DELAY(1);
1073                 if (us_left-- == 0)
1074                         panic("GICD Register write pending for too long");
1075         }
1076 }
1077
1078 /* CPU interface. */
1079 static __inline void
1080 gic_v3_cpu_priority(uint64_t mask)
1081 {
1082
1083         /* Set prority mask */
1084         gic_icc_write(PMR, mask & ICC_PMR_EL1_PRIO_MASK);
1085 }
1086
1087 static int
1088 gic_v3_cpu_enable_sre(struct gic_v3_softc *sc)
1089 {
1090         uint64_t sre;
1091         u_int cpuid;
1092
1093         cpuid = PCPU_GET(cpuid);
1094         /*
1095          * Set the SRE bit to enable access to GIC CPU interface
1096          * via system registers.
1097          */
1098         sre = READ_SPECIALREG(icc_sre_el1);
1099         sre |= ICC_SRE_EL1_SRE;
1100         WRITE_SPECIALREG(icc_sre_el1, sre);
1101         isb();
1102         /*
1103          * Now ensure that the bit is set.
1104          */
1105         sre = READ_SPECIALREG(icc_sre_el1);
1106         if ((sre & ICC_SRE_EL1_SRE) == 0) {
1107                 /* We are done. This was disabled in EL2 */
1108                 device_printf(sc->dev, "ERROR: CPU%u cannot enable CPU interface "
1109                     "via system registers\n", cpuid);
1110                 return (ENXIO);
1111         } else if (bootverbose) {
1112                 device_printf(sc->dev,
1113                     "CPU%u enabled CPU interface via system registers\n",
1114                     cpuid);
1115         }
1116
1117         return (0);
1118 }
1119
1120 static int
1121 gic_v3_cpu_init(struct gic_v3_softc *sc)
1122 {
1123         int err;
1124
1125         /* Enable access to CPU interface via system registers */
1126         err = gic_v3_cpu_enable_sre(sc);
1127         if (err != 0)
1128                 return (err);
1129         /* Priority mask to minimum - accept all interrupts */
1130         gic_v3_cpu_priority(GIC_PRIORITY_MIN);
1131         /* Disable EOI mode */
1132         gic_icc_clear(CTLR, ICC_CTLR_EL1_EOIMODE);
1133         /* Enable group 1 (insecure) interrups */
1134         gic_icc_set(IGRPEN1, ICC_IGRPEN0_EL1_EN);
1135
1136         return (0);
1137 }
1138
1139 /* Distributor */
1140 static int
1141 gic_v3_dist_init(struct gic_v3_softc *sc)
1142 {
1143         uint64_t aff;
1144         u_int i;
1145
1146         /*
1147          * 1. Disable the Distributor
1148          */
1149         gic_d_write(sc, 4, GICD_CTLR, 0);
1150         gic_v3_wait_for_rwp(sc, DIST);
1151
1152         /*
1153          * 2. Configure the Distributor
1154          */
1155         /* Set all global interrupts to be level triggered, active low. */
1156         for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ICFGRn)
1157                 gic_d_write(sc, 4, GICD_ICFGR(i), 0x00000000);
1158
1159         /* Set priority to all shared interrupts */
1160         for (i = GIC_FIRST_SPI;
1161             i < sc->gic_nirqs; i += GICD_I_PER_IPRIORITYn) {
1162                 /* Set highest priority */
1163                 gic_d_write(sc, 4, GICD_IPRIORITYR(i), GIC_PRIORITY_MAX);
1164         }
1165
1166         /*
1167          * Disable all interrupts. Leave PPI and SGIs as they are enabled in
1168          * Re-Distributor registers.
1169          */
1170         for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ISENABLERn)
1171                 gic_d_write(sc, 4, GICD_ICENABLER(i), 0xFFFFFFFF);
1172
1173         gic_v3_wait_for_rwp(sc, DIST);
1174
1175         /*
1176          * 3. Enable Distributor
1177          */
1178         /* Enable Distributor with ARE, Group 1 */
1179         gic_d_write(sc, 4, GICD_CTLR, GICD_CTLR_ARE_NS | GICD_CTLR_G1A |
1180             GICD_CTLR_G1);
1181
1182         /*
1183          * 4. Route all interrupts to boot CPU.
1184          */
1185         aff = CPU_AFFINITY(0);
1186         for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i++)
1187                 gic_d_write(sc, 4, GICD_IROUTER(i), aff);
1188
1189         return (0);
1190 }
1191
1192 /* Re-Distributor */
1193 static int
1194 gic_v3_redist_alloc(struct gic_v3_softc *sc)
1195 {
1196         u_int cpuid;
1197
1198         /* Allocate struct resource for all CPU's Re-Distributor registers */
1199         for (cpuid = 0; cpuid < mp_ncpus; cpuid++)
1200                 if (CPU_ISSET(cpuid, &all_cpus) != 0)
1201                         sc->gic_redists.pcpu[cpuid] =
1202                                 malloc(sizeof(*sc->gic_redists.pcpu[0]),
1203                                     M_GIC_V3, M_WAITOK);
1204                 else
1205                         sc->gic_redists.pcpu[cpuid] = NULL;
1206         return (0);
1207 }
1208
1209 static int
1210 gic_v3_redist_find(struct gic_v3_softc *sc)
1211 {
1212         struct resource r_res;
1213         bus_space_handle_t r_bsh;
1214         uint64_t aff;
1215         uint64_t typer;
1216         uint32_t pidr2;
1217         u_int cpuid;
1218         size_t i;
1219
1220         cpuid = PCPU_GET(cpuid);
1221
1222         aff = CPU_AFFINITY(cpuid);
1223         /* Affinity in format for comparison with typer */
1224         aff = (CPU_AFF3(aff) << 24) | (CPU_AFF2(aff) << 16) |
1225             (CPU_AFF1(aff) << 8) | CPU_AFF0(aff);
1226
1227         if (bootverbose) {
1228                 device_printf(sc->dev,
1229                     "Start searching for Re-Distributor\n");
1230         }
1231         /* Iterate through Re-Distributor regions */
1232         for (i = 0; i < sc->gic_redists.nregions; i++) {
1233                 /* Take a copy of the region's resource */
1234                 r_res = *sc->gic_redists.regions[i];
1235                 r_bsh = rman_get_bushandle(&r_res);
1236
1237                 pidr2 = bus_read_4(&r_res, GICR_PIDR2);
1238                 switch (pidr2 & GICR_PIDR2_ARCH_MASK) {
1239                 case GICR_PIDR2_ARCH_GICv3: /* fall through */
1240                 case GICR_PIDR2_ARCH_GICv4:
1241                         break;
1242                 default:
1243                         device_printf(sc->dev,
1244                             "No Re-Distributor found for CPU%u\n", cpuid);
1245                         return (ENODEV);
1246                 }
1247
1248                 do {
1249                         typer = bus_read_8(&r_res, GICR_TYPER);
1250                         if ((typer >> GICR_TYPER_AFF_SHIFT) == aff) {
1251                                 KASSERT(sc->gic_redists.pcpu[cpuid] != NULL,
1252                                     ("Invalid pointer to per-CPU redistributor"));
1253                                 /* Copy res contents to its final destination */
1254                                 *sc->gic_redists.pcpu[cpuid] = r_res;
1255                                 if (bootverbose) {
1256                                         device_printf(sc->dev,
1257                                             "CPU%u Re-Distributor has been found\n",
1258                                             cpuid);
1259                                 }
1260                                 return (0);
1261                         }
1262
1263                         r_bsh += (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1264                         if ((typer & GICR_TYPER_VLPIS) != 0) {
1265                                 r_bsh +=
1266                                     (GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE);
1267                         }
1268
1269                         rman_set_bushandle(&r_res, r_bsh);
1270                 } while ((typer & GICR_TYPER_LAST) == 0);
1271         }
1272
1273         device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid);
1274         return (ENXIO);
1275 }
1276
1277 static int
1278 gic_v3_redist_wake(struct gic_v3_softc *sc)
1279 {
1280         uint32_t waker;
1281         size_t us_left = 1000000;
1282
1283         waker = gic_r_read(sc, 4, GICR_WAKER);
1284         /* Wake up Re-Distributor for this CPU */
1285         waker &= ~GICR_WAKER_PS;
1286         gic_r_write(sc, 4, GICR_WAKER, waker);
1287         /*
1288          * When clearing ProcessorSleep bit it is required to wait for
1289          * ChildrenAsleep to become zero following the processor power-on.
1290          */
1291         while ((gic_r_read(sc, 4, GICR_WAKER) & GICR_WAKER_CA) != 0) {
1292                 DELAY(1);
1293                 if (us_left-- == 0) {
1294                         panic("Could not wake Re-Distributor for CPU%u",
1295                             PCPU_GET(cpuid));
1296                 }
1297         }
1298
1299         if (bootverbose) {
1300                 device_printf(sc->dev, "CPU%u Re-Distributor woke up\n",
1301                     PCPU_GET(cpuid));
1302         }
1303
1304         return (0);
1305 }
1306
1307 static int
1308 gic_v3_redist_init(struct gic_v3_softc *sc)
1309 {
1310         int err;
1311         size_t i;
1312
1313         err = gic_v3_redist_find(sc);
1314         if (err != 0)
1315                 return (err);
1316
1317         err = gic_v3_redist_wake(sc);
1318         if (err != 0)
1319                 return (err);
1320
1321         /* Disable SPIs */
1322         gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ICENABLER0,
1323             GICR_I_ENABLER_PPI_MASK);
1324         /* Enable SGIs */
1325         gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ISENABLER0,
1326             GICR_I_ENABLER_SGI_MASK);
1327
1328         /* Set priority for SGIs and PPIs */
1329         for (i = 0; i <= GIC_LAST_PPI; i += GICR_I_PER_IPRIORITYn) {
1330                 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_IPRIORITYR(i),
1331                     GIC_PRIORITY_MAX);
1332         }
1333
1334         gic_v3_wait_for_rwp(sc, REDIST);
1335
1336         return (0);
1337 }