]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm64/arm64/gicv3_its.c
Merge llvm, clang, compiler-rt, libc++, libunwind, lld, lldb and openmp
[FreeBSD/FreeBSD.git] / sys / arm64 / arm64 / gicv3_its.c
1 /*-
2  * Copyright (c) 2015-2016 The FreeBSD Foundation
3  * All rights reserved.
4  *
5  * This software was developed by Andrew Turner under
6  * the sponsorship of the FreeBSD Foundation.
7  *
8  * This software was developed by Semihalf under
9  * the sponsorship of the FreeBSD Foundation.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32
33 #include "opt_acpi.h"
34 #include "opt_platform.h"
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/cpuset.h>
43 #include <sys/endian.h>
44 #include <sys/kernel.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/module.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/queue.h>
51 #include <sys/rman.h>
52 #include <sys/sbuf.h>
53 #include <sys/smp.h>
54 #include <sys/sysctl.h>
55 #include <sys/vmem.h>
56
57 #include <vm/vm.h>
58 #include <vm/pmap.h>
59
60 #include <machine/bus.h>
61 #include <machine/intr.h>
62
63 #include <arm/arm/gic_common.h>
64 #include <arm64/arm64/gic_v3_reg.h>
65 #include <arm64/arm64/gic_v3_var.h>
66
67 #ifdef FDT
68 #include <dev/ofw/openfirm.h>
69 #include <dev/ofw/ofw_bus.h>
70 #include <dev/ofw/ofw_bus_subr.h>
71 #endif
72 #include <dev/pci/pcireg.h>
73 #include <dev/pci/pcivar.h>
74
75 #include "pcib_if.h"
76 #include "pic_if.h"
77 #include "msi_if.h"
78
79 MALLOC_DEFINE(M_GICV3_ITS, "GICv3 ITS",
80     "ARM GICv3 Interrupt Translation Service");
81
82 #define LPI_NIRQS               (64 * 1024)
83
84 /* The size and alignment of the command circular buffer */
85 #define ITS_CMDQ_SIZE           (64 * 1024)     /* Must be a multiple of 4K */
86 #define ITS_CMDQ_ALIGN          (64 * 1024)
87
88 #define LPI_CONFTAB_SIZE        LPI_NIRQS
89 #define LPI_CONFTAB_ALIGN       (64 * 1024)
90 #define LPI_CONFTAB_MAX_ADDR    ((1ul << 48) - 1) /* We need a 47 bit PA */
91
92 /* 1 bit per SPI, PPI, and SGI (8k), and 1 bit per LPI (LPI_CONFTAB_SIZE) */
93 #define LPI_PENDTAB_SIZE        ((LPI_NIRQS + GIC_FIRST_LPI) / 8)
94 #define LPI_PENDTAB_ALIGN       (64 * 1024)
95 #define LPI_PENDTAB_MAX_ADDR    ((1ul << 48) - 1) /* We need a 47 bit PA */
96
97 #define LPI_INT_TRANS_TAB_ALIGN 256
98 #define LPI_INT_TRANS_TAB_MAX_ADDR ((1ul << 48) - 1)
99
100 /* ITS commands encoding */
101 #define ITS_CMD_MOVI            (0x01)
102 #define ITS_CMD_SYNC            (0x05)
103 #define ITS_CMD_MAPD            (0x08)
104 #define ITS_CMD_MAPC            (0x09)
105 #define ITS_CMD_MAPTI           (0x0a)
106 #define ITS_CMD_MAPI            (0x0b)
107 #define ITS_CMD_INV             (0x0c)
108 #define ITS_CMD_INVALL          (0x0d)
109 /* Command */
110 #define CMD_COMMAND_MASK        (0xFFUL)
111 /* PCI device ID */
112 #define CMD_DEVID_SHIFT         (32)
113 #define CMD_DEVID_MASK          (0xFFFFFFFFUL << CMD_DEVID_SHIFT)
114 /* Size of IRQ ID bitfield */
115 #define CMD_SIZE_MASK           (0xFFUL)
116 /* Virtual LPI ID */
117 #define CMD_ID_MASK             (0xFFFFFFFFUL)
118 /* Physical LPI ID */
119 #define CMD_PID_SHIFT           (32)
120 #define CMD_PID_MASK            (0xFFFFFFFFUL << CMD_PID_SHIFT)
121 /* Collection */
122 #define CMD_COL_MASK            (0xFFFFUL)
123 /* Target (CPU or Re-Distributor) */
124 #define CMD_TARGET_SHIFT        (16)
125 #define CMD_TARGET_MASK         (0xFFFFFFFFUL << CMD_TARGET_SHIFT)
126 /* Interrupt Translation Table address */
127 #define CMD_ITT_MASK            (0xFFFFFFFFFF00UL)
128 /* Valid command bit */
129 #define CMD_VALID_SHIFT         (63)
130 #define CMD_VALID_MASK          (1UL << CMD_VALID_SHIFT)
131
132 #define ITS_TARGET_NONE         0xFBADBEEF
133
134 /* LPI chunk owned by ITS device */
135 struct lpi_chunk {
136         u_int   lpi_base;
137         u_int   lpi_free;       /* First free LPI in set */
138         u_int   lpi_num;        /* Total number of LPIs in chunk */
139         u_int   lpi_busy;       /* Number of busy LPIs in chink */
140 };
141
142 /* ITS device */
143 struct its_dev {
144         TAILQ_ENTRY(its_dev)    entry;
145         /* PCI device */
146         device_t                pci_dev;
147         /* Device ID (i.e. PCI device ID) */
148         uint32_t                devid;
149         /* List of assigned LPIs */
150         struct lpi_chunk        lpis;
151         /* Virtual address of ITT */
152         vm_offset_t             itt;
153         size_t                  itt_size;
154 };
155
156 /*
157  * ITS command descriptor.
158  * Idea for command description passing taken from Linux.
159  */
160 struct its_cmd_desc {
161         uint8_t cmd_type;
162
163         union {
164                 struct {
165                         struct its_dev *its_dev;
166                         struct its_col *col;
167                         uint32_t id;
168                 } cmd_desc_movi;
169
170                 struct {
171                         struct its_col *col;
172                 } cmd_desc_sync;
173
174                 struct {
175                         struct its_col *col;
176                         uint8_t valid;
177                 } cmd_desc_mapc;
178
179                 struct {
180                         struct its_dev *its_dev;
181                         struct its_col *col;
182                         uint32_t pid;
183                         uint32_t id;
184                 } cmd_desc_mapvi;
185
186                 struct {
187                         struct its_dev *its_dev;
188                         struct its_col *col;
189                         uint32_t pid;
190                 } cmd_desc_mapi;
191
192                 struct {
193                         struct its_dev *its_dev;
194                         uint8_t valid;
195                 } cmd_desc_mapd;
196
197                 struct {
198                         struct its_dev *its_dev;
199                         struct its_col *col;
200                         uint32_t pid;
201                 } cmd_desc_inv;
202
203                 struct {
204                         struct its_col *col;
205                 } cmd_desc_invall;
206         };
207 };
208
209 /* ITS command. Each command is 32 bytes long */
210 struct its_cmd {
211         uint64_t        cmd_dword[4];   /* ITS command double word */
212 };
213
214 /* An ITS private table */
215 struct its_ptable {
216         vm_offset_t     ptab_vaddr;
217         unsigned long   ptab_size;
218 };
219
220 /* ITS collection description. */
221 struct its_col {
222         uint64_t        col_target;     /* Target Re-Distributor */
223         uint64_t        col_id;         /* Collection ID */
224 };
225
226 struct gicv3_its_irqsrc {
227         struct intr_irqsrc      gi_isrc;
228         u_int                   gi_id;
229         u_int                   gi_lpi;
230         struct its_dev          *gi_its_dev;
231         TAILQ_ENTRY(gicv3_its_irqsrc) gi_link;
232 };
233
234 struct gicv3_its_softc {
235         device_t        dev;
236         struct intr_pic *sc_pic;
237         struct resource *sc_its_res;
238
239         cpuset_t        sc_cpus;
240         u_int           gic_irq_cpu;
241
242         struct its_ptable sc_its_ptab[GITS_BASER_NUM];
243         struct its_col *sc_its_cols[MAXCPU];    /* Per-CPU collections */
244
245         /*
246          * TODO: We should get these from the parent as we only want a
247          * single copy of each across the interrupt controller.
248          */
249         uint8_t         *sc_conf_base;
250         vm_offset_t sc_pend_base[MAXCPU];
251
252         /* Command handling */
253         struct mtx sc_its_cmd_lock;
254         struct its_cmd *sc_its_cmd_base; /* Command circular buffer address */
255         size_t sc_its_cmd_next_idx;
256
257         vmem_t *sc_irq_alloc;
258         struct gicv3_its_irqsrc **sc_irqs;
259         u_int   sc_irq_base;
260         u_int   sc_irq_length;
261         u_int   sc_irq_count;
262
263         struct mtx sc_its_dev_lock;
264         TAILQ_HEAD(its_dev_list, its_dev) sc_its_dev_list;
265         TAILQ_HEAD(free_irqs, gicv3_its_irqsrc) sc_free_irqs;
266
267 #define ITS_FLAGS_CMDQ_FLUSH            0x00000001
268 #define ITS_FLAGS_LPI_CONF_FLUSH        0x00000002
269 #define ITS_FLAGS_ERRATA_CAVIUM_22375   0x00000004
270         u_int sc_its_flags;
271         bool    trace_enable;
272 };
273
274 static void *conf_base;
275
276 typedef void (its_quirk_func_t)(device_t);
277 static its_quirk_func_t its_quirk_cavium_22375;
278
279 static const struct {
280         const char *desc;
281         uint32_t iidr;
282         uint32_t iidr_mask;
283         its_quirk_func_t *func;
284 } its_quirks[] = {
285         {
286                 /* Cavium ThunderX Pass 1.x */
287                 .desc = "Cavium ThunderX errata: 22375, 24313",
288                 .iidr = GITS_IIDR_RAW(GITS_IIDR_IMPL_CAVIUM,
289                     GITS_IIDR_PROD_THUNDER, GITS_IIDR_VAR_THUNDER_1, 0),
290                 .iidr_mask = ~GITS_IIDR_REVISION_MASK,
291                 .func = its_quirk_cavium_22375,
292         },
293 };
294
295 #define gic_its_read_4(sc, reg)                 \
296     bus_read_4((sc)->sc_its_res, (reg))
297 #define gic_its_read_8(sc, reg)                 \
298     bus_read_8((sc)->sc_its_res, (reg))
299
300 #define gic_its_write_4(sc, reg, val)           \
301     bus_write_4((sc)->sc_its_res, (reg), (val))
302 #define gic_its_write_8(sc, reg, val)           \
303     bus_write_8((sc)->sc_its_res, (reg), (val))
304
305 static device_attach_t gicv3_its_attach;
306 static device_detach_t gicv3_its_detach;
307
308 static pic_disable_intr_t gicv3_its_disable_intr;
309 static pic_enable_intr_t gicv3_its_enable_intr;
310 static pic_map_intr_t gicv3_its_map_intr;
311 static pic_setup_intr_t gicv3_its_setup_intr;
312 static pic_post_filter_t gicv3_its_post_filter;
313 static pic_post_ithread_t gicv3_its_post_ithread;
314 static pic_pre_ithread_t gicv3_its_pre_ithread;
315 static pic_bind_intr_t gicv3_its_bind_intr;
316 #ifdef SMP
317 static pic_init_secondary_t gicv3_its_init_secondary;
318 #endif
319 static msi_alloc_msi_t gicv3_its_alloc_msi;
320 static msi_release_msi_t gicv3_its_release_msi;
321 static msi_alloc_msix_t gicv3_its_alloc_msix;
322 static msi_release_msix_t gicv3_its_release_msix;
323 static msi_map_msi_t gicv3_its_map_msi;
324
325 static void its_cmd_movi(device_t, struct gicv3_its_irqsrc *);
326 static void its_cmd_mapc(device_t, struct its_col *, uint8_t);
327 static void its_cmd_mapti(device_t, struct gicv3_its_irqsrc *);
328 static void its_cmd_mapd(device_t, struct its_dev *, uint8_t);
329 static void its_cmd_inv(device_t, struct its_dev *, struct gicv3_its_irqsrc *);
330 static void its_cmd_invall(device_t, struct its_col *);
331
332 static device_method_t gicv3_its_methods[] = {
333         /* Device interface */
334         DEVMETHOD(device_detach,        gicv3_its_detach),
335
336         /* Interrupt controller interface */
337         DEVMETHOD(pic_disable_intr,     gicv3_its_disable_intr),
338         DEVMETHOD(pic_enable_intr,      gicv3_its_enable_intr),
339         DEVMETHOD(pic_map_intr,         gicv3_its_map_intr),
340         DEVMETHOD(pic_setup_intr,       gicv3_its_setup_intr),
341         DEVMETHOD(pic_post_filter,      gicv3_its_post_filter),
342         DEVMETHOD(pic_post_ithread,     gicv3_its_post_ithread),
343         DEVMETHOD(pic_pre_ithread,      gicv3_its_pre_ithread),
344 #ifdef SMP
345         DEVMETHOD(pic_bind_intr,        gicv3_its_bind_intr),
346         DEVMETHOD(pic_init_secondary,   gicv3_its_init_secondary),
347 #endif
348
349         /* MSI/MSI-X */
350         DEVMETHOD(msi_alloc_msi,        gicv3_its_alloc_msi),
351         DEVMETHOD(msi_release_msi,      gicv3_its_release_msi),
352         DEVMETHOD(msi_alloc_msix,       gicv3_its_alloc_msix),
353         DEVMETHOD(msi_release_msix,     gicv3_its_release_msix),
354         DEVMETHOD(msi_map_msi,          gicv3_its_map_msi),
355
356         /* End */
357         DEVMETHOD_END
358 };
359
360 static DEFINE_CLASS_0(gic, gicv3_its_driver, gicv3_its_methods,
361     sizeof(struct gicv3_its_softc));
362
363 static void
364 gicv3_its_cmdq_init(struct gicv3_its_softc *sc)
365 {
366         vm_paddr_t cmd_paddr;
367         uint64_t reg, tmp;
368
369         /* Set up the command circular buffer */
370         sc->sc_its_cmd_base = contigmalloc(ITS_CMDQ_SIZE, M_GICV3_ITS,
371             M_WAITOK | M_ZERO, 0, (1ul << 48) - 1, ITS_CMDQ_ALIGN, 0);
372         sc->sc_its_cmd_next_idx = 0;
373
374         cmd_paddr = vtophys(sc->sc_its_cmd_base);
375
376         /* Set the base of the command buffer */
377         reg = GITS_CBASER_VALID |
378             (GITS_CBASER_CACHE_NIWAWB << GITS_CBASER_CACHE_SHIFT) |
379             cmd_paddr | (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT) |
380             (ITS_CMDQ_SIZE / 4096 - 1);
381         gic_its_write_8(sc, GITS_CBASER, reg);
382
383         /* Read back to check for fixed value fields */
384         tmp = gic_its_read_8(sc, GITS_CBASER);
385
386         if ((tmp & GITS_CBASER_SHARE_MASK) !=
387             (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT)) {
388                 /* Check if the hardware reported non-shareable */
389                 if ((tmp & GITS_CBASER_SHARE_MASK) ==
390                     (GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT)) {
391                         /* If so remove the cache attribute */
392                         reg &= ~GITS_CBASER_CACHE_MASK;
393                         reg &= ~GITS_CBASER_SHARE_MASK;
394                         /* Set to Non-cacheable, Non-shareable */
395                         reg |= GITS_CBASER_CACHE_NIN << GITS_CBASER_CACHE_SHIFT;
396                         reg |= GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT;
397
398                         gic_its_write_8(sc, GITS_CBASER, reg);
399                 }
400
401                 /* The command queue has to be flushed after each command */
402                 sc->sc_its_flags |= ITS_FLAGS_CMDQ_FLUSH;
403         }
404
405         /* Get the next command from the start of the buffer */
406         gic_its_write_8(sc, GITS_CWRITER, 0x0);
407 }
408
409 static int
410 gicv3_its_table_init(device_t dev, struct gicv3_its_softc *sc)
411 {
412         vm_offset_t table;
413         vm_paddr_t paddr;
414         uint64_t cache, reg, share, tmp, type;
415         size_t esize, its_tbl_size, nidents, nitspages, npages;
416         int i, page_size;
417         int devbits;
418
419         if ((sc->sc_its_flags & ITS_FLAGS_ERRATA_CAVIUM_22375) != 0) {
420                 /*
421                  * GITS_TYPER[17:13] of ThunderX reports that device IDs
422                  * are to be 21 bits in length. The entry size of the ITS
423                  * table can be read from GITS_BASERn[52:48] and on ThunderX
424                  * is supposed to be 8 bytes in length (for device table).
425                  * Finally the page size that is to be used by ITS to access
426                  * this table will be set to 64KB.
427                  *
428                  * This gives 0x200000 entries of size 0x8 bytes covered by
429                  * 256 pages each of which 64KB in size. The number of pages
430                  * (minus 1) should then be written to GITS_BASERn[7:0]. In
431                  * that case this value would be 0xFF but on ThunderX the
432                  * maximum value that HW accepts is 0xFD.
433                  *
434                  * Set an arbitrary number of device ID bits to 20 in order
435                  * to limit the number of entries in ITS device table to
436                  * 0x100000 and the table size to 8MB.
437                  */
438                 devbits = 20;
439                 cache = 0;
440         } else {
441                 devbits = GITS_TYPER_DEVB(gic_its_read_8(sc, GITS_TYPER));
442                 cache = GITS_BASER_CACHE_WAWB;
443         }
444         share = GITS_BASER_SHARE_IS;
445         page_size = PAGE_SIZE_64K;
446
447         for (i = 0; i < GITS_BASER_NUM; i++) {
448                 reg = gic_its_read_8(sc, GITS_BASER(i));
449                 /* The type of table */
450                 type = GITS_BASER_TYPE(reg);
451                 /* The table entry size */
452                 esize = GITS_BASER_ESIZE(reg);
453
454                 switch(type) {
455                 case GITS_BASER_TYPE_DEV:
456                         nidents = (1 << devbits);
457                         its_tbl_size = esize * nidents;
458                         its_tbl_size = roundup2(its_tbl_size, PAGE_SIZE_64K);
459                         break;
460                 case GITS_BASER_TYPE_VP:
461                 case GITS_BASER_TYPE_PP: /* Undocumented? */
462                 case GITS_BASER_TYPE_IC:
463                         its_tbl_size = page_size;
464                         break;
465                 default:
466                         continue;
467                 }
468                 npages = howmany(its_tbl_size, PAGE_SIZE);
469
470                 /* Allocate the table */
471                 table = (vm_offset_t)contigmalloc(npages * PAGE_SIZE,
472                     M_GICV3_ITS, M_WAITOK | M_ZERO, 0, (1ul << 48) - 1,
473                     PAGE_SIZE_64K, 0);
474
475                 sc->sc_its_ptab[i].ptab_vaddr = table;
476                 sc->sc_its_ptab[i].ptab_size = npages * PAGE_SIZE;
477
478                 paddr = vtophys(table);
479
480                 while (1) {
481                         nitspages = howmany(its_tbl_size, page_size);
482
483                         /* Clear the fields we will be setting */
484                         reg &= ~(GITS_BASER_VALID |
485                             GITS_BASER_CACHE_MASK | GITS_BASER_TYPE_MASK |
486                             GITS_BASER_ESIZE_MASK | GITS_BASER_PA_MASK |
487                             GITS_BASER_SHARE_MASK | GITS_BASER_PSZ_MASK |
488                             GITS_BASER_SIZE_MASK);
489                         /* Set the new values */
490                         reg |= GITS_BASER_VALID |
491                             (cache << GITS_BASER_CACHE_SHIFT) |
492                             (type << GITS_BASER_TYPE_SHIFT) |
493                             ((esize - 1) << GITS_BASER_ESIZE_SHIFT) |
494                             paddr | (share << GITS_BASER_SHARE_SHIFT) |
495                             (nitspages - 1);
496
497                         switch (page_size) {
498                         case PAGE_SIZE:         /* 4KB */
499                                 reg |=
500                                     GITS_BASER_PSZ_4K << GITS_BASER_PSZ_SHIFT;
501                                 break;
502                         case PAGE_SIZE_16K:     /* 16KB */
503                                 reg |=
504                                     GITS_BASER_PSZ_16K << GITS_BASER_PSZ_SHIFT;
505                                 break;
506                         case PAGE_SIZE_64K:     /* 64KB */
507                                 reg |=
508                                     GITS_BASER_PSZ_64K << GITS_BASER_PSZ_SHIFT;
509                                 break;
510                         }
511
512                         gic_its_write_8(sc, GITS_BASER(i), reg);
513
514                         /* Read back to check */
515                         tmp = gic_its_read_8(sc, GITS_BASER(i));
516
517                         /* Do the shareability masks line up? */
518                         if ((tmp & GITS_BASER_SHARE_MASK) !=
519                             (reg & GITS_BASER_SHARE_MASK)) {
520                                 share = (tmp & GITS_BASER_SHARE_MASK) >>
521                                     GITS_BASER_SHARE_SHIFT;
522                                 continue;
523                         }
524
525                         if ((tmp & GITS_BASER_PSZ_MASK) !=
526                             (reg & GITS_BASER_PSZ_MASK)) {
527                                 switch (page_size) {
528                                 case PAGE_SIZE_16K:
529                                         page_size = PAGE_SIZE;
530                                         continue;
531                                 case PAGE_SIZE_64K:
532                                         page_size = PAGE_SIZE_16K;
533                                         continue;
534                                 }
535                         }
536
537                         if (tmp != reg) {
538                                 device_printf(dev, "GITS_BASER%d: "
539                                     "unable to be updated: %lx != %lx\n",
540                                     i, reg, tmp);
541                                 return (ENXIO);
542                         }
543
544                         /* We should have made all needed changes */
545                         break;
546                 }
547         }
548
549         return (0);
550 }
551
552 static void
553 gicv3_its_conftable_init(struct gicv3_its_softc *sc)
554 {
555         void *conf_table;
556
557         conf_table = atomic_load_ptr(&conf_base);
558         if (conf_table == NULL) {
559                 conf_table = contigmalloc(LPI_CONFTAB_SIZE,
560                     M_GICV3_ITS, M_WAITOK, 0, LPI_CONFTAB_MAX_ADDR,
561                     LPI_CONFTAB_ALIGN, 0);
562
563                 if (atomic_cmpset_ptr((uintptr_t *)&conf_base,
564                     (uintptr_t)NULL, (uintptr_t)conf_table) == 0) {
565                         contigfree(conf_table, LPI_CONFTAB_SIZE, M_GICV3_ITS);
566                         conf_table = atomic_load_ptr(&conf_base);
567                 }
568         }
569         sc->sc_conf_base = conf_table;
570
571         /* Set the default configuration */
572         memset(sc->sc_conf_base, GIC_PRIORITY_MAX | LPI_CONF_GROUP1,
573             LPI_CONFTAB_SIZE);
574
575         /* Flush the table to memory */
576         cpu_dcache_wb_range((vm_offset_t)sc->sc_conf_base, LPI_CONFTAB_SIZE);
577 }
578
579 static void
580 gicv3_its_pendtables_init(struct gicv3_its_softc *sc)
581 {
582         int i;
583
584         for (i = 0; i <= mp_maxid; i++) {
585                 if (CPU_ISSET(i, &sc->sc_cpus) == 0)
586                         continue;
587
588                 sc->sc_pend_base[i] = (vm_offset_t)contigmalloc(
589                     LPI_PENDTAB_SIZE, M_GICV3_ITS, M_WAITOK | M_ZERO,
590                     0, LPI_PENDTAB_MAX_ADDR, LPI_PENDTAB_ALIGN, 0);
591
592                 /* Flush so the ITS can see the memory */
593                 cpu_dcache_wb_range((vm_offset_t)sc->sc_pend_base[i],
594                     LPI_PENDTAB_SIZE);
595         }
596 }
597
598 static void
599 its_init_cpu_lpi(device_t dev, struct gicv3_its_softc *sc)
600 {
601         device_t gicv3;
602         uint64_t xbaser, tmp;
603         uint32_t ctlr;
604         u_int cpuid;
605
606         gicv3 = device_get_parent(dev);
607         cpuid = PCPU_GET(cpuid);
608
609         /* Disable LPIs */
610         ctlr = gic_r_read_4(gicv3, GICR_CTLR);
611         ctlr &= ~GICR_CTLR_LPI_ENABLE;
612         gic_r_write_4(gicv3, GICR_CTLR, ctlr);
613
614         /* Make sure changes are observable my the GIC */
615         dsb(sy);
616
617         /*
618          * Set the redistributor base
619          */
620         xbaser = vtophys(sc->sc_conf_base) |
621             (GICR_PROPBASER_SHARE_IS << GICR_PROPBASER_SHARE_SHIFT) |
622             (GICR_PROPBASER_CACHE_NIWAWB << GICR_PROPBASER_CACHE_SHIFT) |
623             (flsl(LPI_CONFTAB_SIZE | GIC_FIRST_LPI) - 1);
624         gic_r_write_8(gicv3, GICR_PROPBASER, xbaser);
625
626         /* Check the cache attributes we set */
627         tmp = gic_r_read_8(gicv3, GICR_PROPBASER);
628
629         if ((tmp & GICR_PROPBASER_SHARE_MASK) !=
630             (xbaser & GICR_PROPBASER_SHARE_MASK)) {
631                 if ((tmp & GICR_PROPBASER_SHARE_MASK) ==
632                     (GICR_PROPBASER_SHARE_NS << GICR_PROPBASER_SHARE_SHIFT)) {
633                         /* We need to mark as non-cacheable */
634                         xbaser &= ~(GICR_PROPBASER_SHARE_MASK |
635                             GICR_PROPBASER_CACHE_MASK);
636                         /* Non-cacheable */
637                         xbaser |= GICR_PROPBASER_CACHE_NIN <<
638                             GICR_PROPBASER_CACHE_SHIFT;
639                         /* Non-sareable */
640                         xbaser |= GICR_PROPBASER_SHARE_NS <<
641                             GICR_PROPBASER_SHARE_SHIFT;
642                         gic_r_write_8(gicv3, GICR_PROPBASER, xbaser);
643                 }
644                 sc->sc_its_flags |= ITS_FLAGS_LPI_CONF_FLUSH;
645         }
646
647         /*
648          * Set the LPI pending table base
649          */
650         xbaser = vtophys(sc->sc_pend_base[cpuid]) |
651             (GICR_PENDBASER_CACHE_NIWAWB << GICR_PENDBASER_CACHE_SHIFT) |
652             (GICR_PENDBASER_SHARE_IS << GICR_PENDBASER_SHARE_SHIFT);
653
654         gic_r_write_8(gicv3, GICR_PENDBASER, xbaser);
655
656         tmp = gic_r_read_8(gicv3, GICR_PENDBASER);
657
658         if ((tmp & GICR_PENDBASER_SHARE_MASK) ==
659             (GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT)) {
660                 /* Clear the cahce and shareability bits */
661                 xbaser &= ~(GICR_PENDBASER_CACHE_MASK |
662                     GICR_PENDBASER_SHARE_MASK);
663                 /* Mark as non-shareable */
664                 xbaser |= GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT;
665                 /* And non-cacheable */
666                 xbaser |= GICR_PENDBASER_CACHE_NIN <<
667                     GICR_PENDBASER_CACHE_SHIFT;
668         }
669
670         /* Enable LPIs */
671         ctlr = gic_r_read_4(gicv3, GICR_CTLR);
672         ctlr |= GICR_CTLR_LPI_ENABLE;
673         gic_r_write_4(gicv3, GICR_CTLR, ctlr);
674
675         /* Make sure the GIC has seen everything */
676         dsb(sy);
677 }
678
679 static int
680 its_init_cpu(device_t dev, struct gicv3_its_softc *sc)
681 {
682         device_t gicv3;
683         vm_paddr_t target;
684         u_int cpuid;
685         struct redist_pcpu *rpcpu;
686
687         gicv3 = device_get_parent(dev);
688         cpuid = PCPU_GET(cpuid);
689         if (!CPU_ISSET(cpuid, &sc->sc_cpus))
690                 return (0);
691
692         /* Check if the ITS is enabled on this CPU */
693         if ((gic_r_read_4(gicv3, GICR_TYPER) & GICR_TYPER_PLPIS) == 0)
694                 return (ENXIO);
695
696         rpcpu = gicv3_get_redist(dev);
697
698         /* Do per-cpu LPI init once */
699         if (!rpcpu->lpi_enabled) {
700                 its_init_cpu_lpi(dev, sc);
701                 rpcpu->lpi_enabled = true;
702         }
703
704         if ((gic_its_read_8(sc, GITS_TYPER) & GITS_TYPER_PTA) != 0) {
705                 /* This ITS wants the redistributor physical address */
706                 target = vtophys(rman_get_virtual(&rpcpu->res));
707         } else {
708                 /* This ITS wants the unique processor number */
709                 target = GICR_TYPER_CPUNUM(gic_r_read_8(gicv3, GICR_TYPER)) <<
710                     CMD_TARGET_SHIFT;
711         }
712
713         sc->sc_its_cols[cpuid]->col_target = target;
714         sc->sc_its_cols[cpuid]->col_id = cpuid;
715
716         its_cmd_mapc(dev, sc->sc_its_cols[cpuid], 1);
717         its_cmd_invall(dev, sc->sc_its_cols[cpuid]);
718
719         return (0);
720 }
721
722 static int
723 gicv3_its_sysctl_trace_enable(SYSCTL_HANDLER_ARGS)
724 {
725         struct gicv3_its_softc *sc;
726         int rv;
727
728         sc = arg1;
729
730         rv = sysctl_handle_bool(oidp, &sc->trace_enable, 0, req);
731         if (rv != 0 || req->newptr == NULL)
732                 return (rv);
733         if (sc->trace_enable)
734                 gic_its_write_8(sc, GITS_TRKCTLR, 3);
735         else
736                 gic_its_write_8(sc, GITS_TRKCTLR, 0);
737
738         return (0);
739 }
740
741 static int
742 gicv3_its_sysctl_trace_regs(SYSCTL_HANDLER_ARGS)
743 {
744         struct gicv3_its_softc *sc;
745         struct sbuf *sb;
746         int err;
747
748         sc = arg1;
749         sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
750         if (sb == NULL) {
751                 device_printf(sc->dev, "Could not allocate sbuf for output.\n");
752                 return (ENOMEM);
753         }
754         sbuf_cat(sb, "\n");
755         sbuf_printf(sb, "GITS_TRKCTLR: 0x%08X\n",
756             gic_its_read_4(sc, GITS_TRKCTLR));
757         sbuf_printf(sb, "GITS_TRKR:    0x%08X\n",
758             gic_its_read_4(sc, GITS_TRKR));
759         sbuf_printf(sb, "GITS_TRKDIDR: 0x%08X\n",
760             gic_its_read_4(sc, GITS_TRKDIDR));
761         sbuf_printf(sb, "GITS_TRKPIDR: 0x%08X\n",
762             gic_its_read_4(sc, GITS_TRKPIDR));
763         sbuf_printf(sb, "GITS_TRKVIDR: 0x%08X\n",
764             gic_its_read_4(sc, GITS_TRKVIDR));
765         sbuf_printf(sb, "GITS_TRKTGTR: 0x%08X\n",
766            gic_its_read_4(sc, GITS_TRKTGTR));
767
768         err = sbuf_finish(sb);
769         if (err)
770                 device_printf(sc->dev, "Error finishing sbuf: %d\n", err);
771         sbuf_delete(sb);
772         return(err);
773 }
774
775 static int
776 gicv3_its_init_sysctl(struct gicv3_its_softc *sc)
777 {
778         struct sysctl_oid *oid, *child;
779         struct sysctl_ctx_list *ctx_list;
780
781         ctx_list = device_get_sysctl_ctx(sc->dev);
782         child = device_get_sysctl_tree(sc->dev);
783         oid = SYSCTL_ADD_NODE(ctx_list,
784             SYSCTL_CHILDREN(child), OID_AUTO, "tracing",
785             CTLFLAG_RD| CTLFLAG_MPSAFE, NULL, "Messages tracing");
786         if (oid == NULL)
787                 return (ENXIO);
788
789         /* Add registers */
790         SYSCTL_ADD_PROC(ctx_list,
791             SYSCTL_CHILDREN(oid), OID_AUTO, "enable",
792             CTLTYPE_U8 | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
793             gicv3_its_sysctl_trace_enable, "CU", "Enable tracing");
794         SYSCTL_ADD_PROC(ctx_list,
795             SYSCTL_CHILDREN(oid), OID_AUTO, "capture",
796             CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
797             gicv3_its_sysctl_trace_regs, "", "Captured tracing registers.");
798
799         return (0);
800 }
801
802 static int
803 gicv3_its_attach(device_t dev)
804 {
805         struct gicv3_its_softc *sc;
806         uint32_t iidr;
807         int domain, err, i, rid;
808
809         sc = device_get_softc(dev);
810
811         sc->sc_irq_length = gicv3_get_nirqs(dev);
812         sc->sc_irq_base = GIC_FIRST_LPI;
813         sc->sc_irq_base += device_get_unit(dev) * sc->sc_irq_length;
814
815         rid = 0;
816         sc->sc_its_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
817             RF_ACTIVE);
818         if (sc->sc_its_res == NULL) {
819                 device_printf(dev, "Could not allocate memory\n");
820                 return (ENXIO);
821         }
822
823         iidr = gic_its_read_4(sc, GITS_IIDR);
824         for (i = 0; i < nitems(its_quirks); i++) {
825                 if ((iidr & its_quirks[i].iidr_mask) == its_quirks[i].iidr) {
826                         if (bootverbose) {
827                                 device_printf(dev, "Applying %s\n",
828                                     its_quirks[i].desc);
829                         }
830                         its_quirks[i].func(dev);
831                         break;
832                 }
833         }
834
835         /* Allocate the private tables */
836         err = gicv3_its_table_init(dev, sc);
837         if (err != 0)
838                 return (err);
839
840         /* Protects access to the device list */
841         mtx_init(&sc->sc_its_dev_lock, "ITS device lock", NULL, MTX_SPIN);
842
843         /* Protects access to the ITS command circular buffer. */
844         mtx_init(&sc->sc_its_cmd_lock, "ITS cmd lock", NULL, MTX_SPIN);
845
846         CPU_ZERO(&sc->sc_cpus);
847         if (bus_get_domain(dev, &domain) == 0) {
848                 if (domain < MAXMEMDOM)
849                         CPU_COPY(&cpuset_domain[domain], &sc->sc_cpus);
850         } else {
851                 CPU_COPY(&all_cpus, &sc->sc_cpus);
852         }
853
854         /* Allocate the command circular buffer */
855         gicv3_its_cmdq_init(sc);
856
857         /* Allocate the per-CPU collections */
858         for (int cpu = 0; cpu <= mp_maxid; cpu++)
859                 if (CPU_ISSET(cpu, &sc->sc_cpus) != 0)
860                         sc->sc_its_cols[cpu] = malloc(
861                             sizeof(*sc->sc_its_cols[0]), M_GICV3_ITS,
862                             M_WAITOK | M_ZERO);
863                 else
864                         sc->sc_its_cols[cpu] = NULL;
865
866         /* Enable the ITS */
867         gic_its_write_4(sc, GITS_CTLR,
868             gic_its_read_4(sc, GITS_CTLR) | GITS_CTLR_EN);
869
870         /* Create the LPI configuration table */
871         gicv3_its_conftable_init(sc);
872
873         /* And the pending tebles */
874         gicv3_its_pendtables_init(sc);
875
876         /* Enable LPIs on this CPU */
877         its_init_cpu(dev, sc);
878
879         TAILQ_INIT(&sc->sc_its_dev_list);
880         TAILQ_INIT(&sc->sc_free_irqs);
881
882         /*
883          * Create the vmem object to allocate INTRNG IRQs from. We try to
884          * use all IRQs not already used by the GICv3.
885          * XXX: This assumes there are no other interrupt controllers in the
886          * system.
887          */
888         sc->sc_irq_alloc = vmem_create(device_get_nameunit(dev), 0,
889             gicv3_get_nirqs(dev), 1, 0, M_FIRSTFIT | M_WAITOK);
890
891         sc->sc_irqs = malloc(sizeof(*sc->sc_irqs) * sc->sc_irq_length,
892             M_GICV3_ITS, M_WAITOK | M_ZERO);
893
894         /* For GIC-500 install tracking sysctls. */
895         if ((iidr & (GITS_IIDR_PRODUCT_MASK | GITS_IIDR_IMPLEMENTOR_MASK)) ==
896             GITS_IIDR_RAW(GITS_IIDR_IMPL_ARM, GITS_IIDR_PROD_GIC500, 0, 0))
897                 gicv3_its_init_sysctl(sc);
898
899         return (0);
900 }
901
902 static int
903 gicv3_its_detach(device_t dev)
904 {
905
906         return (ENXIO);
907 }
908
909 static void
910 its_quirk_cavium_22375(device_t dev)
911 {
912         struct gicv3_its_softc *sc;
913
914         sc = device_get_softc(dev);
915         sc->sc_its_flags |= ITS_FLAGS_ERRATA_CAVIUM_22375;
916 }
917
918 static void
919 gicv3_its_disable_intr(device_t dev, struct intr_irqsrc *isrc)
920 {
921         struct gicv3_its_softc *sc;
922         struct gicv3_its_irqsrc *girq;
923         uint8_t *conf;
924
925         sc = device_get_softc(dev);
926         girq = (struct gicv3_its_irqsrc *)isrc;
927         conf = sc->sc_conf_base;
928
929         conf[girq->gi_lpi] &= ~LPI_CONF_ENABLE;
930
931         if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) {
932                 /* Clean D-cache under command. */
933                 cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_lpi], 1);
934         } else {
935                 /* DSB inner shareable, store */
936                 dsb(ishst);
937         }
938
939         its_cmd_inv(dev, girq->gi_its_dev, girq);
940 }
941
942 static void
943 gicv3_its_enable_intr(device_t dev, struct intr_irqsrc *isrc)
944 {
945         struct gicv3_its_softc *sc;
946         struct gicv3_its_irqsrc *girq;
947         uint8_t *conf;
948
949         sc = device_get_softc(dev);
950         girq = (struct gicv3_its_irqsrc *)isrc;
951         conf = sc->sc_conf_base;
952
953         conf[girq->gi_lpi] |= LPI_CONF_ENABLE;
954
955         if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) {
956                 /* Clean D-cache under command. */
957                 cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_lpi], 1);
958         } else {
959                 /* DSB inner shareable, store */
960                 dsb(ishst);
961         }
962
963         its_cmd_inv(dev, girq->gi_its_dev, girq);
964 }
965
966 static int
967 gicv3_its_intr(void *arg, uintptr_t irq)
968 {
969         struct gicv3_its_softc *sc = arg;
970         struct gicv3_its_irqsrc *girq;
971         struct trapframe *tf;
972
973         irq -= sc->sc_irq_base;
974         girq = sc->sc_irqs[irq];
975         if (girq == NULL)
976                 panic("gicv3_its_intr: Invalid interrupt %ld",
977                     irq + sc->sc_irq_base);
978
979         tf = curthread->td_intr_frame;
980         intr_isrc_dispatch(&girq->gi_isrc, tf);
981         return (FILTER_HANDLED);
982 }
983
984 static void
985 gicv3_its_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
986 {
987         struct gicv3_its_irqsrc *girq;
988         struct gicv3_its_softc *sc;
989
990         sc = device_get_softc(dev);
991         girq = (struct gicv3_its_irqsrc *)isrc;
992         gicv3_its_disable_intr(dev, isrc);
993         gic_icc_write(EOIR1, girq->gi_lpi + GIC_FIRST_LPI);
994 }
995
996 static void
997 gicv3_its_post_ithread(device_t dev, struct intr_irqsrc *isrc)
998 {
999
1000         gicv3_its_enable_intr(dev, isrc);
1001 }
1002
1003 static void
1004 gicv3_its_post_filter(device_t dev, struct intr_irqsrc *isrc)
1005 {
1006         struct gicv3_its_irqsrc *girq;
1007         struct gicv3_its_softc *sc;
1008
1009         sc = device_get_softc(dev);
1010         girq = (struct gicv3_its_irqsrc *)isrc;
1011         gic_icc_write(EOIR1, girq->gi_lpi + GIC_FIRST_LPI);
1012 }
1013
1014 static int
1015 gicv3_its_select_cpu(device_t dev, struct intr_irqsrc *isrc)
1016 {
1017         struct gicv3_its_softc *sc;
1018
1019         sc = device_get_softc(dev);
1020         if (CPU_EMPTY(&isrc->isrc_cpu)) {
1021                 sc->gic_irq_cpu = intr_irq_next_cpu(sc->gic_irq_cpu,
1022                     &sc->sc_cpus);
1023                 CPU_SETOF(sc->gic_irq_cpu, &isrc->isrc_cpu);
1024         }
1025
1026         return (0);
1027 }
1028
1029 static int
1030 gicv3_its_bind_intr(device_t dev, struct intr_irqsrc *isrc)
1031 {
1032         struct gicv3_its_irqsrc *girq;
1033
1034         gicv3_its_select_cpu(dev, isrc);
1035
1036         girq = (struct gicv3_its_irqsrc *)isrc;
1037         its_cmd_movi(dev, girq);
1038         return (0);
1039 }
1040
1041 static int
1042 gicv3_its_map_intr(device_t dev, struct intr_map_data *data,
1043     struct intr_irqsrc **isrcp)
1044 {
1045
1046         /*
1047          * This should never happen, we only call this function to map
1048          * interrupts found before the controller driver is ready.
1049          */
1050         panic("gicv3_its_map_intr: Unable to map a MSI interrupt");
1051 }
1052
1053 static int
1054 gicv3_its_setup_intr(device_t dev, struct intr_irqsrc *isrc,
1055     struct resource *res, struct intr_map_data *data)
1056 {
1057
1058         /* Bind the interrupt to a CPU */
1059         gicv3_its_bind_intr(dev, isrc);
1060
1061         return (0);
1062 }
1063
1064 #ifdef SMP
1065 static void
1066 gicv3_its_init_secondary(device_t dev)
1067 {
1068         struct gicv3_its_softc *sc;
1069
1070         sc = device_get_softc(dev);
1071
1072         /*
1073          * This is fatal as otherwise we may bind interrupts to this CPU.
1074          * We need a way to tell the interrupt framework to only bind to a
1075          * subset of given CPUs when it performs the shuffle.
1076          */
1077         if (its_init_cpu(dev, sc) != 0)
1078                 panic("gicv3_its_init_secondary: No usable ITS on CPU%d",
1079                     PCPU_GET(cpuid));
1080 }
1081 #endif
1082
1083 static uint32_t
1084 its_get_devid(device_t pci_dev)
1085 {
1086         uintptr_t id;
1087
1088         if (pci_get_id(pci_dev, PCI_ID_MSI, &id) != 0)
1089                 panic("its_get_devid: Unable to get the MSI DeviceID");
1090
1091         return (id);
1092 }
1093
1094 static struct its_dev *
1095 its_device_find(device_t dev, device_t child)
1096 {
1097         struct gicv3_its_softc *sc;
1098         struct its_dev *its_dev = NULL;
1099
1100         sc = device_get_softc(dev);
1101
1102         mtx_lock_spin(&sc->sc_its_dev_lock);
1103         TAILQ_FOREACH(its_dev, &sc->sc_its_dev_list, entry) {
1104                 if (its_dev->pci_dev == child)
1105                         break;
1106         }
1107         mtx_unlock_spin(&sc->sc_its_dev_lock);
1108
1109         return (its_dev);
1110 }
1111
1112 static struct its_dev *
1113 its_device_get(device_t dev, device_t child, u_int nvecs)
1114 {
1115         struct gicv3_its_softc *sc;
1116         struct its_dev *its_dev;
1117         vmem_addr_t irq_base;
1118         size_t esize;
1119
1120         sc = device_get_softc(dev);
1121
1122         its_dev = its_device_find(dev, child);
1123         if (its_dev != NULL)
1124                 return (its_dev);
1125
1126         its_dev = malloc(sizeof(*its_dev), M_GICV3_ITS, M_NOWAIT | M_ZERO);
1127         if (its_dev == NULL)
1128                 return (NULL);
1129
1130         its_dev->pci_dev = child;
1131         its_dev->devid = its_get_devid(child);
1132
1133         its_dev->lpis.lpi_busy = 0;
1134         its_dev->lpis.lpi_num = nvecs;
1135         its_dev->lpis.lpi_free = nvecs;
1136
1137         if (vmem_alloc(sc->sc_irq_alloc, nvecs, M_FIRSTFIT | M_NOWAIT,
1138             &irq_base) != 0) {
1139                 free(its_dev, M_GICV3_ITS);
1140                 return (NULL);
1141         }
1142         its_dev->lpis.lpi_base = irq_base;
1143
1144         /* Get ITT entry size */
1145         esize = GITS_TYPER_ITTES(gic_its_read_8(sc, GITS_TYPER));
1146
1147         /*
1148          * Allocate ITT for this device.
1149          * PA has to be 256 B aligned. At least two entries for device.
1150          */
1151         its_dev->itt_size = roundup2(MAX(nvecs, 2) * esize, 256);
1152         its_dev->itt = (vm_offset_t)contigmalloc(its_dev->itt_size,
1153             M_GICV3_ITS, M_NOWAIT | M_ZERO, 0, LPI_INT_TRANS_TAB_MAX_ADDR,
1154             LPI_INT_TRANS_TAB_ALIGN, 0);
1155         if (its_dev->itt == 0) {
1156                 vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base, nvecs);
1157                 free(its_dev, M_GICV3_ITS);
1158                 return (NULL);
1159         }
1160
1161         mtx_lock_spin(&sc->sc_its_dev_lock);
1162         TAILQ_INSERT_TAIL(&sc->sc_its_dev_list, its_dev, entry);
1163         mtx_unlock_spin(&sc->sc_its_dev_lock);
1164
1165         /* Map device to its ITT */
1166         its_cmd_mapd(dev, its_dev, 1);
1167
1168         return (its_dev);
1169 }
1170
1171 static void
1172 its_device_release(device_t dev, struct its_dev *its_dev)
1173 {
1174         struct gicv3_its_softc *sc;
1175
1176         KASSERT(its_dev->lpis.lpi_busy == 0,
1177             ("its_device_release: Trying to release an inuse ITS device"));
1178
1179         /* Unmap device in ITS */
1180         its_cmd_mapd(dev, its_dev, 0);
1181
1182         sc = device_get_softc(dev);
1183
1184         /* Remove the device from the list of devices */
1185         mtx_lock_spin(&sc->sc_its_dev_lock);
1186         TAILQ_REMOVE(&sc->sc_its_dev_list, its_dev, entry);
1187         mtx_unlock_spin(&sc->sc_its_dev_lock);
1188
1189         /* Free ITT */
1190         KASSERT(its_dev->itt != 0, ("Invalid ITT in valid ITS device"));
1191         contigfree((void *)its_dev->itt, its_dev->itt_size, M_GICV3_ITS);
1192
1193         /* Free the IRQ allocation */
1194         vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base,
1195             its_dev->lpis.lpi_num);
1196
1197         free(its_dev, M_GICV3_ITS);
1198 }
1199
1200 static struct gicv3_its_irqsrc *
1201 gicv3_its_alloc_irqsrc(device_t dev, struct gicv3_its_softc *sc, u_int irq)
1202 {
1203         struct gicv3_its_irqsrc *girq = NULL;
1204
1205         KASSERT(sc->sc_irqs[irq] == NULL,
1206             ("%s: Interrupt %u already allocated", __func__, irq));
1207         mtx_lock_spin(&sc->sc_its_dev_lock);
1208         if (!TAILQ_EMPTY(&sc->sc_free_irqs)) {
1209                 girq = TAILQ_FIRST(&sc->sc_free_irqs);
1210                 TAILQ_REMOVE(&sc->sc_free_irqs, girq, gi_link);
1211         }
1212         mtx_unlock_spin(&sc->sc_its_dev_lock);
1213         if (girq == NULL) {
1214                 girq = malloc(sizeof(*girq), M_GICV3_ITS,
1215                     M_NOWAIT | M_ZERO);
1216                 if (girq == NULL)
1217                         return (NULL);
1218                 girq->gi_id = -1;
1219                 if (intr_isrc_register(&girq->gi_isrc, dev, 0,
1220                     "%s,%u", device_get_nameunit(dev), irq) != 0) {
1221                         free(girq, M_GICV3_ITS);
1222                         return (NULL);
1223                 }
1224         }
1225         girq->gi_lpi = irq + sc->sc_irq_base - GIC_FIRST_LPI;
1226         sc->sc_irqs[irq] = girq;
1227
1228         return (girq);
1229 }
1230
1231 static void
1232 gicv3_its_release_irqsrc(struct gicv3_its_softc *sc,
1233     struct gicv3_its_irqsrc *girq)
1234 {
1235         u_int irq;
1236
1237         mtx_assert(&sc->sc_its_dev_lock, MA_OWNED);
1238
1239         irq = girq->gi_lpi + GIC_FIRST_LPI - sc->sc_irq_base;
1240         sc->sc_irqs[irq] = NULL;
1241
1242         girq->gi_id = -1;
1243         girq->gi_its_dev = NULL;
1244         TAILQ_INSERT_TAIL(&sc->sc_free_irqs, girq, gi_link);
1245 }
1246
1247 static int
1248 gicv3_its_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1249     device_t *pic, struct intr_irqsrc **srcs)
1250 {
1251         struct gicv3_its_softc *sc;
1252         struct gicv3_its_irqsrc *girq;
1253         struct its_dev *its_dev;
1254         u_int irq;
1255         int i;
1256
1257         its_dev = its_device_get(dev, child, count);
1258         if (its_dev == NULL)
1259                 return (ENXIO);
1260
1261         KASSERT(its_dev->lpis.lpi_free >= count,
1262             ("gicv3_its_alloc_msi: No free LPIs"));
1263         sc = device_get_softc(dev);
1264         irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num -
1265             its_dev->lpis.lpi_free;
1266
1267         /* Allocate the irqsrc for each MSI */
1268         for (i = 0; i < count; i++, irq++) {
1269                 its_dev->lpis.lpi_free--;
1270                 srcs[i] = (struct intr_irqsrc *)gicv3_its_alloc_irqsrc(dev,
1271                     sc, irq);
1272                 if (srcs[i] == NULL)
1273                         break;
1274         }
1275
1276         /* The allocation failed, release them */
1277         if (i != count) {
1278                 mtx_lock_spin(&sc->sc_its_dev_lock);
1279                 for (i = 0; i < count; i++) {
1280                         girq = (struct gicv3_its_irqsrc *)srcs[i];
1281                         if (girq == NULL)
1282                                 break;
1283                         gicv3_its_release_irqsrc(sc, girq);
1284                         srcs[i] = NULL;
1285                 }
1286                 mtx_unlock_spin(&sc->sc_its_dev_lock);
1287                 return (ENXIO);
1288         }
1289
1290         /* Finish the allocation now we have all MSI irqsrcs */
1291         for (i = 0; i < count; i++) {
1292                 girq = (struct gicv3_its_irqsrc *)srcs[i];
1293                 girq->gi_id = i;
1294                 girq->gi_its_dev = its_dev;
1295
1296                 /* Map the message to the given IRQ */
1297                 gicv3_its_select_cpu(dev, (struct intr_irqsrc *)girq);
1298                 its_cmd_mapti(dev, girq);
1299         }
1300         its_dev->lpis.lpi_busy += count;
1301         *pic = dev;
1302
1303         return (0);
1304 }
1305
1306 static int
1307 gicv3_its_release_msi(device_t dev, device_t child, int count,
1308     struct intr_irqsrc **isrc)
1309 {
1310         struct gicv3_its_softc *sc;
1311         struct gicv3_its_irqsrc *girq;
1312         struct its_dev *its_dev;
1313         int i;
1314
1315         its_dev = its_device_find(dev, child);
1316
1317         KASSERT(its_dev != NULL,
1318             ("gicv3_its_release_msi: Releasing a MSI interrupt with "
1319              "no ITS device"));
1320         KASSERT(its_dev->lpis.lpi_busy >= count,
1321             ("gicv3_its_release_msi: Releasing more interrupts than "
1322              "were allocated: releasing %d, allocated %d", count,
1323              its_dev->lpis.lpi_busy));
1324
1325         sc = device_get_softc(dev);
1326         mtx_lock_spin(&sc->sc_its_dev_lock);
1327         for (i = 0; i < count; i++) {
1328                 girq = (struct gicv3_its_irqsrc *)isrc[i];
1329                 gicv3_its_release_irqsrc(sc, girq);
1330         }
1331         mtx_unlock_spin(&sc->sc_its_dev_lock);
1332         its_dev->lpis.lpi_busy -= count;
1333
1334         if (its_dev->lpis.lpi_busy == 0)
1335                 its_device_release(dev, its_dev);
1336
1337         return (0);
1338 }
1339
1340 static int
1341 gicv3_its_alloc_msix(device_t dev, device_t child, device_t *pic,
1342     struct intr_irqsrc **isrcp)
1343 {
1344         struct gicv3_its_softc *sc;
1345         struct gicv3_its_irqsrc *girq;
1346         struct its_dev *its_dev;
1347         u_int nvecs, irq;
1348
1349         nvecs = pci_msix_count(child);
1350         its_dev = its_device_get(dev, child, nvecs);
1351         if (its_dev == NULL)
1352                 return (ENXIO);
1353
1354         KASSERT(its_dev->lpis.lpi_free > 0,
1355             ("gicv3_its_alloc_msix: No free LPIs"));
1356         sc = device_get_softc(dev);
1357         irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num -
1358             its_dev->lpis.lpi_free;
1359
1360         girq = gicv3_its_alloc_irqsrc(dev, sc, irq);
1361         if (girq == NULL)
1362                 return (ENXIO);
1363         girq->gi_id = its_dev->lpis.lpi_busy;
1364         girq->gi_its_dev = its_dev;
1365
1366         its_dev->lpis.lpi_free--;
1367         its_dev->lpis.lpi_busy++;
1368
1369         /* Map the message to the given IRQ */
1370         gicv3_its_select_cpu(dev, (struct intr_irqsrc *)girq);
1371         its_cmd_mapti(dev, girq);
1372
1373         *pic = dev;
1374         *isrcp = (struct intr_irqsrc *)girq;
1375
1376         return (0);
1377 }
1378
1379 static int
1380 gicv3_its_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1381 {
1382         struct gicv3_its_softc *sc;
1383         struct gicv3_its_irqsrc *girq;
1384         struct its_dev *its_dev;
1385
1386         its_dev = its_device_find(dev, child);
1387
1388         KASSERT(its_dev != NULL,
1389             ("gicv3_its_release_msix: Releasing a MSI-X interrupt with "
1390              "no ITS device"));
1391         KASSERT(its_dev->lpis.lpi_busy > 0,
1392             ("gicv3_its_release_msix: Releasing more interrupts than "
1393              "were allocated: allocated %d", its_dev->lpis.lpi_busy));
1394
1395         sc = device_get_softc(dev);
1396         girq = (struct gicv3_its_irqsrc *)isrc;
1397         gicv3_its_release_irqsrc(sc, girq);
1398         its_dev->lpis.lpi_busy--;
1399
1400         if (its_dev->lpis.lpi_busy == 0)
1401                 its_device_release(dev, its_dev);
1402
1403         return (0);
1404 }
1405
1406 static int
1407 gicv3_its_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1408     uint64_t *addr, uint32_t *data)
1409 {
1410         struct gicv3_its_softc *sc;
1411         struct gicv3_its_irqsrc *girq;
1412
1413         sc = device_get_softc(dev);
1414         girq = (struct gicv3_its_irqsrc *)isrc;
1415
1416         *addr = vtophys(rman_get_virtual(sc->sc_its_res)) + GITS_TRANSLATER;
1417         *data = girq->gi_id;
1418
1419         return (0);
1420 }
1421
1422 /*
1423  * Commands handling.
1424  */
1425
1426 static __inline void
1427 cmd_format_command(struct its_cmd *cmd, uint8_t cmd_type)
1428 {
1429         /* Command field: DW0 [7:0] */
1430         cmd->cmd_dword[0] &= htole64(~CMD_COMMAND_MASK);
1431         cmd->cmd_dword[0] |= htole64(cmd_type);
1432 }
1433
1434 static __inline void
1435 cmd_format_devid(struct its_cmd *cmd, uint32_t devid)
1436 {
1437         /* Device ID field: DW0 [63:32] */
1438         cmd->cmd_dword[0] &= htole64(~CMD_DEVID_MASK);
1439         cmd->cmd_dword[0] |= htole64((uint64_t)devid << CMD_DEVID_SHIFT);
1440 }
1441
1442 static __inline void
1443 cmd_format_size(struct its_cmd *cmd, uint16_t size)
1444 {
1445         /* Size field: DW1 [4:0] */
1446         cmd->cmd_dword[1] &= htole64(~CMD_SIZE_MASK);
1447         cmd->cmd_dword[1] |= htole64((size & CMD_SIZE_MASK));
1448 }
1449
1450 static __inline void
1451 cmd_format_id(struct its_cmd *cmd, uint32_t id)
1452 {
1453         /* ID field: DW1 [31:0] */
1454         cmd->cmd_dword[1] &= htole64(~CMD_ID_MASK);
1455         cmd->cmd_dword[1] |= htole64(id);
1456 }
1457
1458 static __inline void
1459 cmd_format_pid(struct its_cmd *cmd, uint32_t pid)
1460 {
1461         /* Physical ID field: DW1 [63:32] */
1462         cmd->cmd_dword[1] &= htole64(~CMD_PID_MASK);
1463         cmd->cmd_dword[1] |= htole64((uint64_t)pid << CMD_PID_SHIFT);
1464 }
1465
1466 static __inline void
1467 cmd_format_col(struct its_cmd *cmd, uint16_t col_id)
1468 {
1469         /* Collection field: DW2 [16:0] */
1470         cmd->cmd_dword[2] &= htole64(~CMD_COL_MASK);
1471         cmd->cmd_dword[2] |= htole64(col_id);
1472 }
1473
1474 static __inline void
1475 cmd_format_target(struct its_cmd *cmd, uint64_t target)
1476 {
1477         /* Target Address field: DW2 [47:16] */
1478         cmd->cmd_dword[2] &= htole64(~CMD_TARGET_MASK);
1479         cmd->cmd_dword[2] |= htole64(target & CMD_TARGET_MASK);
1480 }
1481
1482 static __inline void
1483 cmd_format_itt(struct its_cmd *cmd, uint64_t itt)
1484 {
1485         /* ITT Address field: DW2 [47:8] */
1486         cmd->cmd_dword[2] &= htole64(~CMD_ITT_MASK);
1487         cmd->cmd_dword[2] |= htole64(itt & CMD_ITT_MASK);
1488 }
1489
1490 static __inline void
1491 cmd_format_valid(struct its_cmd *cmd, uint8_t valid)
1492 {
1493         /* Valid field: DW2 [63] */
1494         cmd->cmd_dword[2] &= htole64(~CMD_VALID_MASK);
1495         cmd->cmd_dword[2] |= htole64((uint64_t)valid << CMD_VALID_SHIFT);
1496 }
1497
1498 static inline bool
1499 its_cmd_queue_full(struct gicv3_its_softc *sc)
1500 {
1501         size_t read_idx, next_write_idx;
1502
1503         /* Get the index of the next command */
1504         next_write_idx = (sc->sc_its_cmd_next_idx + 1) %
1505             (ITS_CMDQ_SIZE / sizeof(struct its_cmd));
1506         /* And the index of the current command being read */
1507         read_idx = gic_its_read_4(sc, GITS_CREADR) / sizeof(struct its_cmd);
1508
1509         /*
1510          * The queue is full when the write offset points
1511          * at the command before the current read offset.
1512          */
1513         return (next_write_idx == read_idx);
1514 }
1515
1516 static inline void
1517 its_cmd_sync(struct gicv3_its_softc *sc, struct its_cmd *cmd)
1518 {
1519
1520         if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0) {
1521                 /* Clean D-cache under command. */
1522                 cpu_dcache_wb_range((vm_offset_t)cmd, sizeof(*cmd));
1523         } else {
1524                 /* DSB inner shareable, store */
1525                 dsb(ishst);
1526         }
1527
1528 }
1529
1530 static inline uint64_t
1531 its_cmd_cwriter_offset(struct gicv3_its_softc *sc, struct its_cmd *cmd)
1532 {
1533         uint64_t off;
1534
1535         off = (cmd - sc->sc_its_cmd_base) * sizeof(*cmd);
1536
1537         return (off);
1538 }
1539
1540 static void
1541 its_cmd_wait_completion(device_t dev, struct its_cmd *cmd_first,
1542     struct its_cmd *cmd_last)
1543 {
1544         struct gicv3_its_softc *sc;
1545         uint64_t first, last, read;
1546         size_t us_left;
1547
1548         sc = device_get_softc(dev);
1549
1550         /*
1551          * XXX ARM64TODO: This is obviously a significant delay.
1552          * The reason for that is that currently the time frames for
1553          * the command to complete are not known.
1554          */
1555         us_left = 1000000;
1556
1557         first = its_cmd_cwriter_offset(sc, cmd_first);
1558         last = its_cmd_cwriter_offset(sc, cmd_last);
1559
1560         for (;;) {
1561                 read = gic_its_read_8(sc, GITS_CREADR);
1562                 if (first < last) {
1563                         if (read < first || read >= last)
1564                                 break;
1565                 } else if (read < first && read >= last)
1566                         break;
1567
1568                 if (us_left-- == 0) {
1569                         /* This means timeout */
1570                         device_printf(dev,
1571                             "Timeout while waiting for CMD completion.\n");
1572                         return;
1573                 }
1574                 DELAY(1);
1575         }
1576 }
1577
1578
1579 static struct its_cmd *
1580 its_cmd_alloc_locked(device_t dev)
1581 {
1582         struct gicv3_its_softc *sc;
1583         struct its_cmd *cmd;
1584         size_t us_left;
1585
1586         sc = device_get_softc(dev);
1587
1588         /*
1589          * XXX ARM64TODO: This is obviously a significant delay.
1590          * The reason for that is that currently the time frames for
1591          * the command to complete (and therefore free the descriptor)
1592          * are not known.
1593          */
1594         us_left = 1000000;
1595
1596         mtx_assert(&sc->sc_its_cmd_lock, MA_OWNED);
1597         while (its_cmd_queue_full(sc)) {
1598                 if (us_left-- == 0) {
1599                         /* Timeout while waiting for free command */
1600                         device_printf(dev,
1601                             "Timeout while waiting for free command\n");
1602                         return (NULL);
1603                 }
1604                 DELAY(1);
1605         }
1606
1607         cmd = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx];
1608         sc->sc_its_cmd_next_idx++;
1609         sc->sc_its_cmd_next_idx %= ITS_CMDQ_SIZE / sizeof(struct its_cmd);
1610
1611         return (cmd);
1612 }
1613
1614 static uint64_t
1615 its_cmd_prepare(struct its_cmd *cmd, struct its_cmd_desc *desc)
1616 {
1617         uint64_t target;
1618         uint8_t cmd_type;
1619         u_int size;
1620
1621         cmd_type = desc->cmd_type;
1622         target = ITS_TARGET_NONE;
1623
1624         switch (cmd_type) {
1625         case ITS_CMD_MOVI:      /* Move interrupt ID to another collection */
1626                 target = desc->cmd_desc_movi.col->col_target;
1627                 cmd_format_command(cmd, ITS_CMD_MOVI);
1628                 cmd_format_id(cmd, desc->cmd_desc_movi.id);
1629                 cmd_format_col(cmd, desc->cmd_desc_movi.col->col_id);
1630                 cmd_format_devid(cmd, desc->cmd_desc_movi.its_dev->devid);
1631                 break;
1632         case ITS_CMD_SYNC:      /* Wait for previous commands completion */
1633                 target = desc->cmd_desc_sync.col->col_target;
1634                 cmd_format_command(cmd, ITS_CMD_SYNC);
1635                 cmd_format_target(cmd, target);
1636                 break;
1637         case ITS_CMD_MAPD:      /* Assign ITT to device */
1638                 cmd_format_command(cmd, ITS_CMD_MAPD);
1639                 cmd_format_itt(cmd, vtophys(desc->cmd_desc_mapd.its_dev->itt));
1640                 /*
1641                  * Size describes number of bits to encode interrupt IDs
1642                  * supported by the device minus one.
1643                  * When V (valid) bit is zero, this field should be written
1644                  * as zero.
1645                  */
1646                 if (desc->cmd_desc_mapd.valid != 0) {
1647                         size = fls(desc->cmd_desc_mapd.its_dev->lpis.lpi_num);
1648                         size = MAX(1, size) - 1;
1649                 } else
1650                         size = 0;
1651
1652                 cmd_format_size(cmd, size);
1653                 cmd_format_devid(cmd, desc->cmd_desc_mapd.its_dev->devid);
1654                 cmd_format_valid(cmd, desc->cmd_desc_mapd.valid);
1655                 break;
1656         case ITS_CMD_MAPC:      /* Map collection to Re-Distributor */
1657                 target = desc->cmd_desc_mapc.col->col_target;
1658                 cmd_format_command(cmd, ITS_CMD_MAPC);
1659                 cmd_format_col(cmd, desc->cmd_desc_mapc.col->col_id);
1660                 cmd_format_valid(cmd, desc->cmd_desc_mapc.valid);
1661                 cmd_format_target(cmd, target);
1662                 break;
1663         case ITS_CMD_MAPTI:
1664                 target = desc->cmd_desc_mapvi.col->col_target;
1665                 cmd_format_command(cmd, ITS_CMD_MAPTI);
1666                 cmd_format_devid(cmd, desc->cmd_desc_mapvi.its_dev->devid);
1667                 cmd_format_id(cmd, desc->cmd_desc_mapvi.id);
1668                 cmd_format_pid(cmd, desc->cmd_desc_mapvi.pid);
1669                 cmd_format_col(cmd, desc->cmd_desc_mapvi.col->col_id);
1670                 break;
1671         case ITS_CMD_MAPI:
1672                 target = desc->cmd_desc_mapi.col->col_target;
1673                 cmd_format_command(cmd, ITS_CMD_MAPI);
1674                 cmd_format_devid(cmd, desc->cmd_desc_mapi.its_dev->devid);
1675                 cmd_format_id(cmd, desc->cmd_desc_mapi.pid);
1676                 cmd_format_col(cmd, desc->cmd_desc_mapi.col->col_id);
1677                 break;
1678         case ITS_CMD_INV:
1679                 target = desc->cmd_desc_inv.col->col_target;
1680                 cmd_format_command(cmd, ITS_CMD_INV);
1681                 cmd_format_devid(cmd, desc->cmd_desc_inv.its_dev->devid);
1682                 cmd_format_id(cmd, desc->cmd_desc_inv.pid);
1683                 break;
1684         case ITS_CMD_INVALL:
1685                 cmd_format_command(cmd, ITS_CMD_INVALL);
1686                 cmd_format_col(cmd, desc->cmd_desc_invall.col->col_id);
1687                 break;
1688         default:
1689                 panic("its_cmd_prepare: Invalid command: %x", cmd_type);
1690         }
1691
1692         return (target);
1693 }
1694
1695 static int
1696 its_cmd_send(device_t dev, struct its_cmd_desc *desc)
1697 {
1698         struct gicv3_its_softc *sc;
1699         struct its_cmd *cmd, *cmd_sync, *cmd_write;
1700         struct its_col col_sync;
1701         struct its_cmd_desc desc_sync;
1702         uint64_t target, cwriter;
1703
1704         sc = device_get_softc(dev);
1705         mtx_lock_spin(&sc->sc_its_cmd_lock);
1706         cmd = its_cmd_alloc_locked(dev);
1707         if (cmd == NULL) {
1708                 device_printf(dev, "could not allocate ITS command\n");
1709                 mtx_unlock_spin(&sc->sc_its_cmd_lock);
1710                 return (EBUSY);
1711         }
1712
1713         target = its_cmd_prepare(cmd, desc);
1714         its_cmd_sync(sc, cmd);
1715
1716         if (target != ITS_TARGET_NONE) {
1717                 cmd_sync = its_cmd_alloc_locked(dev);
1718                 if (cmd_sync != NULL) {
1719                         desc_sync.cmd_type = ITS_CMD_SYNC;
1720                         col_sync.col_target = target;
1721                         desc_sync.cmd_desc_sync.col = &col_sync;
1722                         its_cmd_prepare(cmd_sync, &desc_sync);
1723                         its_cmd_sync(sc, cmd_sync);
1724                 }
1725         }
1726
1727         /* Update GITS_CWRITER */
1728         cwriter = sc->sc_its_cmd_next_idx * sizeof(struct its_cmd);
1729         gic_its_write_8(sc, GITS_CWRITER, cwriter);
1730         cmd_write = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx];
1731         mtx_unlock_spin(&sc->sc_its_cmd_lock);
1732
1733         its_cmd_wait_completion(dev, cmd, cmd_write);
1734
1735         return (0);
1736 }
1737
1738 /* Handlers to send commands */
1739 static void
1740 its_cmd_movi(device_t dev, struct gicv3_its_irqsrc *girq)
1741 {
1742         struct gicv3_its_softc *sc;
1743         struct its_cmd_desc desc;
1744         struct its_col *col;
1745
1746         sc = device_get_softc(dev);
1747         col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1];
1748
1749         desc.cmd_type = ITS_CMD_MOVI;
1750         desc.cmd_desc_movi.its_dev = girq->gi_its_dev;
1751         desc.cmd_desc_movi.col = col;
1752         desc.cmd_desc_movi.id = girq->gi_id;
1753
1754         its_cmd_send(dev, &desc);
1755 }
1756
1757 static void
1758 its_cmd_mapc(device_t dev, struct its_col *col, uint8_t valid)
1759 {
1760         struct its_cmd_desc desc;
1761
1762         desc.cmd_type = ITS_CMD_MAPC;
1763         desc.cmd_desc_mapc.col = col;
1764         /*
1765          * Valid bit set - map the collection.
1766          * Valid bit cleared - unmap the collection.
1767          */
1768         desc.cmd_desc_mapc.valid = valid;
1769
1770         its_cmd_send(dev, &desc);
1771 }
1772
1773 static void
1774 its_cmd_mapti(device_t dev, struct gicv3_its_irqsrc *girq)
1775 {
1776         struct gicv3_its_softc *sc;
1777         struct its_cmd_desc desc;
1778         struct its_col *col;
1779         u_int col_id;
1780
1781         sc = device_get_softc(dev);
1782
1783         col_id = CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1;
1784         col = sc->sc_its_cols[col_id];
1785
1786         desc.cmd_type = ITS_CMD_MAPTI;
1787         desc.cmd_desc_mapvi.its_dev = girq->gi_its_dev;
1788         desc.cmd_desc_mapvi.col = col;
1789         /* The EventID sent to the device */
1790         desc.cmd_desc_mapvi.id = girq->gi_id;
1791         /* The physical interrupt presented to softeware */
1792         desc.cmd_desc_mapvi.pid = girq->gi_lpi + GIC_FIRST_LPI;
1793
1794         its_cmd_send(dev, &desc);
1795 }
1796
1797 static void
1798 its_cmd_mapd(device_t dev, struct its_dev *its_dev, uint8_t valid)
1799 {
1800         struct its_cmd_desc desc;
1801
1802         desc.cmd_type = ITS_CMD_MAPD;
1803         desc.cmd_desc_mapd.its_dev = its_dev;
1804         desc.cmd_desc_mapd.valid = valid;
1805
1806         its_cmd_send(dev, &desc);
1807 }
1808
1809 static void
1810 its_cmd_inv(device_t dev, struct its_dev *its_dev,
1811     struct gicv3_its_irqsrc *girq)
1812 {
1813         struct gicv3_its_softc *sc;
1814         struct its_cmd_desc desc;
1815         struct its_col *col;
1816
1817         sc = device_get_softc(dev);
1818         col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1];
1819
1820         desc.cmd_type = ITS_CMD_INV;
1821         /* The EventID sent to the device */
1822         desc.cmd_desc_inv.pid = girq->gi_id;
1823         desc.cmd_desc_inv.its_dev = its_dev;
1824         desc.cmd_desc_inv.col = col;
1825
1826         its_cmd_send(dev, &desc);
1827 }
1828
1829 static void
1830 its_cmd_invall(device_t dev, struct its_col *col)
1831 {
1832         struct its_cmd_desc desc;
1833
1834         desc.cmd_type = ITS_CMD_INVALL;
1835         desc.cmd_desc_invall.col = col;
1836
1837         its_cmd_send(dev, &desc);
1838 }
1839
1840 #ifdef FDT
1841 static device_probe_t gicv3_its_fdt_probe;
1842 static device_attach_t gicv3_its_fdt_attach;
1843
1844 static device_method_t gicv3_its_fdt_methods[] = {
1845         /* Device interface */
1846         DEVMETHOD(device_probe,         gicv3_its_fdt_probe),
1847         DEVMETHOD(device_attach,        gicv3_its_fdt_attach),
1848
1849         /* End */
1850         DEVMETHOD_END
1851 };
1852
1853 #define its_baseclasses its_fdt_baseclasses
1854 DEFINE_CLASS_1(its, gicv3_its_fdt_driver, gicv3_its_fdt_methods,
1855     sizeof(struct gicv3_its_softc), gicv3_its_driver);
1856 #undef its_baseclasses
1857 static devclass_t gicv3_its_fdt_devclass;
1858
1859 EARLY_DRIVER_MODULE(its_fdt, gic, gicv3_its_fdt_driver,
1860     gicv3_its_fdt_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
1861
1862 static int
1863 gicv3_its_fdt_probe(device_t dev)
1864 {
1865
1866         if (!ofw_bus_status_okay(dev))
1867                 return (ENXIO);
1868
1869         if (!ofw_bus_is_compatible(dev, "arm,gic-v3-its"))
1870                 return (ENXIO);
1871
1872         device_set_desc(dev, "ARM GIC Interrupt Translation Service");
1873         return (BUS_PROBE_DEFAULT);
1874 }
1875
1876 static int
1877 gicv3_its_fdt_attach(device_t dev)
1878 {
1879         struct gicv3_its_softc *sc;
1880         phandle_t xref;
1881         int err;
1882
1883         sc = device_get_softc(dev);
1884         sc->dev = dev;
1885         err = gicv3_its_attach(dev);
1886         if (err != 0)
1887                 return (err);
1888
1889         /* Register this device as a interrupt controller */
1890         xref = OF_xref_from_node(ofw_bus_get_node(dev));
1891         sc->sc_pic = intr_pic_register(dev, xref);
1892         intr_pic_add_handler(device_get_parent(dev), sc->sc_pic,
1893             gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length);
1894
1895         /* Register this device to handle MSI interrupts */
1896         intr_msi_register(dev, xref);
1897
1898         return (0);
1899 }
1900 #endif
1901
1902 #ifdef DEV_ACPI
1903 static device_probe_t gicv3_its_acpi_probe;
1904 static device_attach_t gicv3_its_acpi_attach;
1905
1906 static device_method_t gicv3_its_acpi_methods[] = {
1907         /* Device interface */
1908         DEVMETHOD(device_probe,         gicv3_its_acpi_probe),
1909         DEVMETHOD(device_attach,        gicv3_its_acpi_attach),
1910
1911         /* End */
1912         DEVMETHOD_END
1913 };
1914
1915 #define its_baseclasses its_acpi_baseclasses
1916 DEFINE_CLASS_1(its, gicv3_its_acpi_driver, gicv3_its_acpi_methods,
1917     sizeof(struct gicv3_its_softc), gicv3_its_driver);
1918 #undef its_baseclasses
1919 static devclass_t gicv3_its_acpi_devclass;
1920
1921 EARLY_DRIVER_MODULE(its_acpi, gic, gicv3_its_acpi_driver,
1922     gicv3_its_acpi_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
1923
1924 static int
1925 gicv3_its_acpi_probe(device_t dev)
1926 {
1927
1928         if (gic_get_bus(dev) != GIC_BUS_ACPI)
1929                 return (EINVAL);
1930
1931         if (gic_get_hw_rev(dev) < 3)
1932                 return (EINVAL);
1933
1934         device_set_desc(dev, "ARM GIC Interrupt Translation Service");
1935         return (BUS_PROBE_DEFAULT);
1936 }
1937
1938 static int
1939 gicv3_its_acpi_attach(device_t dev)
1940 {
1941         struct gicv3_its_softc *sc;
1942         struct gic_v3_devinfo *di;
1943         int err;
1944
1945         sc = device_get_softc(dev);
1946         sc->dev = dev;
1947         err = gicv3_its_attach(dev);
1948         if (err != 0)
1949                 return (err);
1950
1951         di = device_get_ivars(dev);
1952         sc->sc_pic = intr_pic_register(dev, di->msi_xref);
1953         intr_pic_add_handler(device_get_parent(dev), sc->sc_pic,
1954             gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length);
1955
1956         /* Register this device to handle MSI interrupts */
1957         intr_msi_register(dev, di->msi_xref);
1958
1959         return (0);
1960 }
1961 #endif