]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm64/arm64/gicv3_its.c
Check if iommu ctx is valid before passing it to functions.
[FreeBSD/FreeBSD.git] / sys / arm64 / arm64 / gicv3_its.c
1 /*-
2  * Copyright (c) 2015-2016 The FreeBSD Foundation
3  *
4  * This software was developed by Andrew Turner under
5  * the sponsorship of the FreeBSD Foundation.
6  *
7  * This software was developed by Semihalf under
8  * the sponsorship of the FreeBSD Foundation.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31
32 #include "opt_acpi.h"
33 #include "opt_platform.h"
34 #include "opt_iommu.h"
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/cpuset.h>
43 #include <sys/endian.h>
44 #include <sys/kernel.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/module.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/taskqueue.h>
51 #include <sys/tree.h>
52 #include <sys/queue.h>
53 #include <sys/rman.h>
54 #include <sys/sbuf.h>
55 #include <sys/smp.h>
56 #include <sys/sysctl.h>
57 #include <sys/vmem.h>
58
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #include <vm/vm_page.h>
62
63 #include <machine/bus.h>
64 #include <machine/intr.h>
65
66 #include <arm/arm/gic_common.h>
67 #include <arm64/arm64/gic_v3_reg.h>
68 #include <arm64/arm64/gic_v3_var.h>
69
70 #ifdef FDT
71 #include <dev/ofw/openfirm.h>
72 #include <dev/ofw/ofw_bus.h>
73 #include <dev/ofw/ofw_bus_subr.h>
74 #endif
75 #include <dev/pci/pcireg.h>
76 #include <dev/pci/pcivar.h>
77
78 #ifdef IOMMU
79 #include <dev/iommu/iommu.h>
80 #include <dev/iommu/iommu_gas.h>
81 #endif
82
83 #include "pcib_if.h"
84 #include "pic_if.h"
85 #include "msi_if.h"
86
87 MALLOC_DEFINE(M_GICV3_ITS, "GICv3 ITS",
88     "ARM GICv3 Interrupt Translation Service");
89
90 #define LPI_NIRQS               (64 * 1024)
91
92 /* The size and alignment of the command circular buffer */
93 #define ITS_CMDQ_SIZE           (64 * 1024)     /* Must be a multiple of 4K */
94 #define ITS_CMDQ_ALIGN          (64 * 1024)
95
96 #define LPI_CONFTAB_SIZE        LPI_NIRQS
97 #define LPI_CONFTAB_ALIGN       (64 * 1024)
98 #define LPI_CONFTAB_MAX_ADDR    ((1ul << 48) - 1) /* We need a 47 bit PA */
99
100 /* 1 bit per SPI, PPI, and SGI (8k), and 1 bit per LPI (LPI_CONFTAB_SIZE) */
101 #define LPI_PENDTAB_SIZE        ((LPI_NIRQS + GIC_FIRST_LPI) / 8)
102 #define LPI_PENDTAB_ALIGN       (64 * 1024)
103 #define LPI_PENDTAB_MAX_ADDR    ((1ul << 48) - 1) /* We need a 47 bit PA */
104
105 #define LPI_INT_TRANS_TAB_ALIGN 256
106 #define LPI_INT_TRANS_TAB_MAX_ADDR ((1ul << 48) - 1)
107
108 /* ITS commands encoding */
109 #define ITS_CMD_MOVI            (0x01)
110 #define ITS_CMD_SYNC            (0x05)
111 #define ITS_CMD_MAPD            (0x08)
112 #define ITS_CMD_MAPC            (0x09)
113 #define ITS_CMD_MAPTI           (0x0a)
114 #define ITS_CMD_MAPI            (0x0b)
115 #define ITS_CMD_INV             (0x0c)
116 #define ITS_CMD_INVALL          (0x0d)
117 /* Command */
118 #define CMD_COMMAND_MASK        (0xFFUL)
119 /* PCI device ID */
120 #define CMD_DEVID_SHIFT         (32)
121 #define CMD_DEVID_MASK          (0xFFFFFFFFUL << CMD_DEVID_SHIFT)
122 /* Size of IRQ ID bitfield */
123 #define CMD_SIZE_MASK           (0xFFUL)
124 /* Virtual LPI ID */
125 #define CMD_ID_MASK             (0xFFFFFFFFUL)
126 /* Physical LPI ID */
127 #define CMD_PID_SHIFT           (32)
128 #define CMD_PID_MASK            (0xFFFFFFFFUL << CMD_PID_SHIFT)
129 /* Collection */
130 #define CMD_COL_MASK            (0xFFFFUL)
131 /* Target (CPU or Re-Distributor) */
132 #define CMD_TARGET_SHIFT        (16)
133 #define CMD_TARGET_MASK         (0xFFFFFFFFUL << CMD_TARGET_SHIFT)
134 /* Interrupt Translation Table address */
135 #define CMD_ITT_MASK            (0xFFFFFFFFFF00UL)
136 /* Valid command bit */
137 #define CMD_VALID_SHIFT         (63)
138 #define CMD_VALID_MASK          (1UL << CMD_VALID_SHIFT)
139
140 #define ITS_TARGET_NONE         0xFBADBEEF
141
142 /* LPI chunk owned by ITS device */
143 struct lpi_chunk {
144         u_int   lpi_base;
145         u_int   lpi_free;       /* First free LPI in set */
146         u_int   lpi_num;        /* Total number of LPIs in chunk */
147         u_int   lpi_busy;       /* Number of busy LPIs in chink */
148 };
149
150 /* ITS device */
151 struct its_dev {
152         TAILQ_ENTRY(its_dev)    entry;
153         /* PCI device */
154         device_t                pci_dev;
155         /* Device ID (i.e. PCI device ID) */
156         uint32_t                devid;
157         /* List of assigned LPIs */
158         struct lpi_chunk        lpis;
159         /* Virtual address of ITT */
160         vm_offset_t             itt;
161         size_t                  itt_size;
162 };
163
164 /*
165  * ITS command descriptor.
166  * Idea for command description passing taken from Linux.
167  */
168 struct its_cmd_desc {
169         uint8_t cmd_type;
170
171         union {
172                 struct {
173                         struct its_dev *its_dev;
174                         struct its_col *col;
175                         uint32_t id;
176                 } cmd_desc_movi;
177
178                 struct {
179                         struct its_col *col;
180                 } cmd_desc_sync;
181
182                 struct {
183                         struct its_col *col;
184                         uint8_t valid;
185                 } cmd_desc_mapc;
186
187                 struct {
188                         struct its_dev *its_dev;
189                         struct its_col *col;
190                         uint32_t pid;
191                         uint32_t id;
192                 } cmd_desc_mapvi;
193
194                 struct {
195                         struct its_dev *its_dev;
196                         struct its_col *col;
197                         uint32_t pid;
198                 } cmd_desc_mapi;
199
200                 struct {
201                         struct its_dev *its_dev;
202                         uint8_t valid;
203                 } cmd_desc_mapd;
204
205                 struct {
206                         struct its_dev *its_dev;
207                         struct its_col *col;
208                         uint32_t pid;
209                 } cmd_desc_inv;
210
211                 struct {
212                         struct its_col *col;
213                 } cmd_desc_invall;
214         };
215 };
216
217 /* ITS command. Each command is 32 bytes long */
218 struct its_cmd {
219         uint64_t        cmd_dword[4];   /* ITS command double word */
220 };
221
222 /* An ITS private table */
223 struct its_ptable {
224         vm_offset_t     ptab_vaddr;
225         unsigned long   ptab_size;
226 };
227
228 /* ITS collection description. */
229 struct its_col {
230         uint64_t        col_target;     /* Target Re-Distributor */
231         uint64_t        col_id;         /* Collection ID */
232 };
233
234 struct gicv3_its_irqsrc {
235         struct intr_irqsrc      gi_isrc;
236         u_int                   gi_id;
237         u_int                   gi_lpi;
238         struct its_dev          *gi_its_dev;
239         TAILQ_ENTRY(gicv3_its_irqsrc) gi_link;
240 };
241
242 struct gicv3_its_softc {
243         device_t        dev;
244         struct intr_pic *sc_pic;
245         struct resource *sc_its_res;
246
247         cpuset_t        sc_cpus;
248         u_int           gic_irq_cpu;
249
250         struct its_ptable sc_its_ptab[GITS_BASER_NUM];
251         struct its_col *sc_its_cols[MAXCPU];    /* Per-CPU collections */
252
253         /*
254          * TODO: We should get these from the parent as we only want a
255          * single copy of each across the interrupt controller.
256          */
257         uint8_t         *sc_conf_base;
258         vm_offset_t sc_pend_base[MAXCPU];
259
260         /* Command handling */
261         struct mtx sc_its_cmd_lock;
262         struct its_cmd *sc_its_cmd_base; /* Command circular buffer address */
263         size_t sc_its_cmd_next_idx;
264
265         vmem_t *sc_irq_alloc;
266         struct gicv3_its_irqsrc **sc_irqs;
267         u_int   sc_irq_base;
268         u_int   sc_irq_length;
269         u_int   sc_irq_count;
270
271         struct mtx sc_its_dev_lock;
272         TAILQ_HEAD(its_dev_list, its_dev) sc_its_dev_list;
273         TAILQ_HEAD(free_irqs, gicv3_its_irqsrc) sc_free_irqs;
274
275 #define ITS_FLAGS_CMDQ_FLUSH            0x00000001
276 #define ITS_FLAGS_LPI_CONF_FLUSH        0x00000002
277 #define ITS_FLAGS_ERRATA_CAVIUM_22375   0x00000004
278         u_int sc_its_flags;
279         bool    trace_enable;
280         vm_page_t ma; /* fake msi page */
281 };
282
283 static void *conf_base;
284
285 typedef void (its_quirk_func_t)(device_t);
286 static its_quirk_func_t its_quirk_cavium_22375;
287
288 static const struct {
289         const char *desc;
290         uint32_t iidr;
291         uint32_t iidr_mask;
292         its_quirk_func_t *func;
293 } its_quirks[] = {
294         {
295                 /* Cavium ThunderX Pass 1.x */
296                 .desc = "Cavium ThunderX errata: 22375, 24313",
297                 .iidr = GITS_IIDR_RAW(GITS_IIDR_IMPL_CAVIUM,
298                     GITS_IIDR_PROD_THUNDER, GITS_IIDR_VAR_THUNDER_1, 0),
299                 .iidr_mask = ~GITS_IIDR_REVISION_MASK,
300                 .func = its_quirk_cavium_22375,
301         },
302 };
303
304 #define gic_its_read_4(sc, reg)                 \
305     bus_read_4((sc)->sc_its_res, (reg))
306 #define gic_its_read_8(sc, reg)                 \
307     bus_read_8((sc)->sc_its_res, (reg))
308
309 #define gic_its_write_4(sc, reg, val)           \
310     bus_write_4((sc)->sc_its_res, (reg), (val))
311 #define gic_its_write_8(sc, reg, val)           \
312     bus_write_8((sc)->sc_its_res, (reg), (val))
313
314 static device_attach_t gicv3_its_attach;
315 static device_detach_t gicv3_its_detach;
316
317 static pic_disable_intr_t gicv3_its_disable_intr;
318 static pic_enable_intr_t gicv3_its_enable_intr;
319 static pic_map_intr_t gicv3_its_map_intr;
320 static pic_setup_intr_t gicv3_its_setup_intr;
321 static pic_post_filter_t gicv3_its_post_filter;
322 static pic_post_ithread_t gicv3_its_post_ithread;
323 static pic_pre_ithread_t gicv3_its_pre_ithread;
324 static pic_bind_intr_t gicv3_its_bind_intr;
325 #ifdef SMP
326 static pic_init_secondary_t gicv3_its_init_secondary;
327 #endif
328 static msi_alloc_msi_t gicv3_its_alloc_msi;
329 static msi_release_msi_t gicv3_its_release_msi;
330 static msi_alloc_msix_t gicv3_its_alloc_msix;
331 static msi_release_msix_t gicv3_its_release_msix;
332 static msi_map_msi_t gicv3_its_map_msi;
333 #ifdef IOMMU
334 static msi_iommu_init_t gicv3_iommu_init;
335 static msi_iommu_deinit_t gicv3_iommu_deinit;
336 #endif
337
338 static void its_cmd_movi(device_t, struct gicv3_its_irqsrc *);
339 static void its_cmd_mapc(device_t, struct its_col *, uint8_t);
340 static void its_cmd_mapti(device_t, struct gicv3_its_irqsrc *);
341 static void its_cmd_mapd(device_t, struct its_dev *, uint8_t);
342 static void its_cmd_inv(device_t, struct its_dev *, struct gicv3_its_irqsrc *);
343 static void its_cmd_invall(device_t, struct its_col *);
344
345 static device_method_t gicv3_its_methods[] = {
346         /* Device interface */
347         DEVMETHOD(device_detach,        gicv3_its_detach),
348
349         /* Interrupt controller interface */
350         DEVMETHOD(pic_disable_intr,     gicv3_its_disable_intr),
351         DEVMETHOD(pic_enable_intr,      gicv3_its_enable_intr),
352         DEVMETHOD(pic_map_intr,         gicv3_its_map_intr),
353         DEVMETHOD(pic_setup_intr,       gicv3_its_setup_intr),
354         DEVMETHOD(pic_post_filter,      gicv3_its_post_filter),
355         DEVMETHOD(pic_post_ithread,     gicv3_its_post_ithread),
356         DEVMETHOD(pic_pre_ithread,      gicv3_its_pre_ithread),
357 #ifdef SMP
358         DEVMETHOD(pic_bind_intr,        gicv3_its_bind_intr),
359         DEVMETHOD(pic_init_secondary,   gicv3_its_init_secondary),
360 #endif
361
362         /* MSI/MSI-X */
363         DEVMETHOD(msi_alloc_msi,        gicv3_its_alloc_msi),
364         DEVMETHOD(msi_release_msi,      gicv3_its_release_msi),
365         DEVMETHOD(msi_alloc_msix,       gicv3_its_alloc_msix),
366         DEVMETHOD(msi_release_msix,     gicv3_its_release_msix),
367         DEVMETHOD(msi_map_msi,          gicv3_its_map_msi),
368 #ifdef IOMMU
369         DEVMETHOD(msi_iommu_init,       gicv3_iommu_init),
370         DEVMETHOD(msi_iommu_deinit,     gicv3_iommu_deinit),
371 #endif
372
373         /* End */
374         DEVMETHOD_END
375 };
376
377 static DEFINE_CLASS_0(gic, gicv3_its_driver, gicv3_its_methods,
378     sizeof(struct gicv3_its_softc));
379
380 static void
381 gicv3_its_cmdq_init(struct gicv3_its_softc *sc)
382 {
383         vm_paddr_t cmd_paddr;
384         uint64_t reg, tmp;
385
386         /* Set up the command circular buffer */
387         sc->sc_its_cmd_base = contigmalloc(ITS_CMDQ_SIZE, M_GICV3_ITS,
388             M_WAITOK | M_ZERO, 0, (1ul << 48) - 1, ITS_CMDQ_ALIGN, 0);
389         sc->sc_its_cmd_next_idx = 0;
390
391         cmd_paddr = vtophys(sc->sc_its_cmd_base);
392
393         /* Set the base of the command buffer */
394         reg = GITS_CBASER_VALID |
395             (GITS_CBASER_CACHE_NIWAWB << GITS_CBASER_CACHE_SHIFT) |
396             cmd_paddr | (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT) |
397             (ITS_CMDQ_SIZE / 4096 - 1);
398         gic_its_write_8(sc, GITS_CBASER, reg);
399
400         /* Read back to check for fixed value fields */
401         tmp = gic_its_read_8(sc, GITS_CBASER);
402
403         if ((tmp & GITS_CBASER_SHARE_MASK) !=
404             (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT)) {
405                 /* Check if the hardware reported non-shareable */
406                 if ((tmp & GITS_CBASER_SHARE_MASK) ==
407                     (GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT)) {
408                         /* If so remove the cache attribute */
409                         reg &= ~GITS_CBASER_CACHE_MASK;
410                         reg &= ~GITS_CBASER_SHARE_MASK;
411                         /* Set to Non-cacheable, Non-shareable */
412                         reg |= GITS_CBASER_CACHE_NIN << GITS_CBASER_CACHE_SHIFT;
413                         reg |= GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT;
414
415                         gic_its_write_8(sc, GITS_CBASER, reg);
416                 }
417
418                 /* The command queue has to be flushed after each command */
419                 sc->sc_its_flags |= ITS_FLAGS_CMDQ_FLUSH;
420         }
421
422         /* Get the next command from the start of the buffer */
423         gic_its_write_8(sc, GITS_CWRITER, 0x0);
424 }
425
426 static int
427 gicv3_its_table_init(device_t dev, struct gicv3_its_softc *sc)
428 {
429         vm_offset_t table;
430         vm_paddr_t paddr;
431         uint64_t cache, reg, share, tmp, type;
432         size_t esize, its_tbl_size, nidents, nitspages, npages;
433         int i, page_size;
434         int devbits;
435
436         if ((sc->sc_its_flags & ITS_FLAGS_ERRATA_CAVIUM_22375) != 0) {
437                 /*
438                  * GITS_TYPER[17:13] of ThunderX reports that device IDs
439                  * are to be 21 bits in length. The entry size of the ITS
440                  * table can be read from GITS_BASERn[52:48] and on ThunderX
441                  * is supposed to be 8 bytes in length (for device table).
442                  * Finally the page size that is to be used by ITS to access
443                  * this table will be set to 64KB.
444                  *
445                  * This gives 0x200000 entries of size 0x8 bytes covered by
446                  * 256 pages each of which 64KB in size. The number of pages
447                  * (minus 1) should then be written to GITS_BASERn[7:0]. In
448                  * that case this value would be 0xFF but on ThunderX the
449                  * maximum value that HW accepts is 0xFD.
450                  *
451                  * Set an arbitrary number of device ID bits to 20 in order
452                  * to limit the number of entries in ITS device table to
453                  * 0x100000 and the table size to 8MB.
454                  */
455                 devbits = 20;
456                 cache = 0;
457         } else {
458                 devbits = GITS_TYPER_DEVB(gic_its_read_8(sc, GITS_TYPER));
459                 cache = GITS_BASER_CACHE_WAWB;
460         }
461         share = GITS_BASER_SHARE_IS;
462         page_size = PAGE_SIZE_64K;
463
464         for (i = 0; i < GITS_BASER_NUM; i++) {
465                 reg = gic_its_read_8(sc, GITS_BASER(i));
466                 /* The type of table */
467                 type = GITS_BASER_TYPE(reg);
468                 /* The table entry size */
469                 esize = GITS_BASER_ESIZE(reg);
470
471                 switch(type) {
472                 case GITS_BASER_TYPE_DEV:
473                         nidents = (1 << devbits);
474                         its_tbl_size = esize * nidents;
475                         its_tbl_size = roundup2(its_tbl_size, PAGE_SIZE_64K);
476                         break;
477                 case GITS_BASER_TYPE_VP:
478                 case GITS_BASER_TYPE_PP: /* Undocumented? */
479                 case GITS_BASER_TYPE_IC:
480                         its_tbl_size = page_size;
481                         break;
482                 default:
483                         continue;
484                 }
485                 npages = howmany(its_tbl_size, PAGE_SIZE);
486
487                 /* Allocate the table */
488                 table = (vm_offset_t)contigmalloc(npages * PAGE_SIZE,
489                     M_GICV3_ITS, M_WAITOK | M_ZERO, 0, (1ul << 48) - 1,
490                     PAGE_SIZE_64K, 0);
491
492                 sc->sc_its_ptab[i].ptab_vaddr = table;
493                 sc->sc_its_ptab[i].ptab_size = npages * PAGE_SIZE;
494
495                 paddr = vtophys(table);
496
497                 while (1) {
498                         nitspages = howmany(its_tbl_size, page_size);
499
500                         /* Clear the fields we will be setting */
501                         reg &= ~(GITS_BASER_VALID |
502                             GITS_BASER_CACHE_MASK | GITS_BASER_TYPE_MASK |
503                             GITS_BASER_ESIZE_MASK | GITS_BASER_PA_MASK |
504                             GITS_BASER_SHARE_MASK | GITS_BASER_PSZ_MASK |
505                             GITS_BASER_SIZE_MASK);
506                         /* Set the new values */
507                         reg |= GITS_BASER_VALID |
508                             (cache << GITS_BASER_CACHE_SHIFT) |
509                             (type << GITS_BASER_TYPE_SHIFT) |
510                             ((esize - 1) << GITS_BASER_ESIZE_SHIFT) |
511                             paddr | (share << GITS_BASER_SHARE_SHIFT) |
512                             (nitspages - 1);
513
514                         switch (page_size) {
515                         case PAGE_SIZE_4K:      /* 4KB */
516                                 reg |=
517                                     GITS_BASER_PSZ_4K << GITS_BASER_PSZ_SHIFT;
518                                 break;
519                         case PAGE_SIZE_16K:     /* 16KB */
520                                 reg |=
521                                     GITS_BASER_PSZ_16K << GITS_BASER_PSZ_SHIFT;
522                                 break;
523                         case PAGE_SIZE_64K:     /* 64KB */
524                                 reg |=
525                                     GITS_BASER_PSZ_64K << GITS_BASER_PSZ_SHIFT;
526                                 break;
527                         }
528
529                         gic_its_write_8(sc, GITS_BASER(i), reg);
530
531                         /* Read back to check */
532                         tmp = gic_its_read_8(sc, GITS_BASER(i));
533
534                         /* Do the shareability masks line up? */
535                         if ((tmp & GITS_BASER_SHARE_MASK) !=
536                             (reg & GITS_BASER_SHARE_MASK)) {
537                                 share = (tmp & GITS_BASER_SHARE_MASK) >>
538                                     GITS_BASER_SHARE_SHIFT;
539                                 continue;
540                         }
541
542                         if ((tmp & GITS_BASER_PSZ_MASK) !=
543                             (reg & GITS_BASER_PSZ_MASK)) {
544                                 switch (page_size) {
545                                 case PAGE_SIZE_16K:
546                                         page_size = PAGE_SIZE_4K;
547                                         continue;
548                                 case PAGE_SIZE_64K:
549                                         page_size = PAGE_SIZE_16K;
550                                         continue;
551                                 }
552                         }
553
554                         if (tmp != reg) {
555                                 device_printf(dev, "GITS_BASER%d: "
556                                     "unable to be updated: %lx != %lx\n",
557                                     i, reg, tmp);
558                                 return (ENXIO);
559                         }
560
561                         /* We should have made all needed changes */
562                         break;
563                 }
564         }
565
566         return (0);
567 }
568
569 static void
570 gicv3_its_conftable_init(struct gicv3_its_softc *sc)
571 {
572         void *conf_table;
573
574         conf_table = atomic_load_ptr(&conf_base);
575         if (conf_table == NULL) {
576                 conf_table = contigmalloc(LPI_CONFTAB_SIZE,
577                     M_GICV3_ITS, M_WAITOK, 0, LPI_CONFTAB_MAX_ADDR,
578                     LPI_CONFTAB_ALIGN, 0);
579
580                 if (atomic_cmpset_ptr((uintptr_t *)&conf_base,
581                     (uintptr_t)NULL, (uintptr_t)conf_table) == 0) {
582                         contigfree(conf_table, LPI_CONFTAB_SIZE, M_GICV3_ITS);
583                         conf_table = atomic_load_ptr(&conf_base);
584                 }
585         }
586         sc->sc_conf_base = conf_table;
587
588         /* Set the default configuration */
589         memset(sc->sc_conf_base, GIC_PRIORITY_MAX | LPI_CONF_GROUP1,
590             LPI_CONFTAB_SIZE);
591
592         /* Flush the table to memory */
593         cpu_dcache_wb_range((vm_offset_t)sc->sc_conf_base, LPI_CONFTAB_SIZE);
594 }
595
596 static void
597 gicv3_its_pendtables_init(struct gicv3_its_softc *sc)
598 {
599         int i;
600
601         for (i = 0; i <= mp_maxid; i++) {
602                 if (CPU_ISSET(i, &sc->sc_cpus) == 0)
603                         continue;
604
605                 sc->sc_pend_base[i] = (vm_offset_t)contigmalloc(
606                     LPI_PENDTAB_SIZE, M_GICV3_ITS, M_WAITOK | M_ZERO,
607                     0, LPI_PENDTAB_MAX_ADDR, LPI_PENDTAB_ALIGN, 0);
608
609                 /* Flush so the ITS can see the memory */
610                 cpu_dcache_wb_range((vm_offset_t)sc->sc_pend_base[i],
611                     LPI_PENDTAB_SIZE);
612         }
613 }
614
615 static void
616 its_init_cpu_lpi(device_t dev, struct gicv3_its_softc *sc)
617 {
618         device_t gicv3;
619         uint64_t xbaser, tmp;
620         uint32_t ctlr;
621         u_int cpuid;
622
623         gicv3 = device_get_parent(dev);
624         cpuid = PCPU_GET(cpuid);
625
626         /* Disable LPIs */
627         ctlr = gic_r_read_4(gicv3, GICR_CTLR);
628         ctlr &= ~GICR_CTLR_LPI_ENABLE;
629         gic_r_write_4(gicv3, GICR_CTLR, ctlr);
630
631         /* Make sure changes are observable my the GIC */
632         dsb(sy);
633
634         /*
635          * Set the redistributor base
636          */
637         xbaser = vtophys(sc->sc_conf_base) |
638             (GICR_PROPBASER_SHARE_IS << GICR_PROPBASER_SHARE_SHIFT) |
639             (GICR_PROPBASER_CACHE_NIWAWB << GICR_PROPBASER_CACHE_SHIFT) |
640             (flsl(LPI_CONFTAB_SIZE | GIC_FIRST_LPI) - 1);
641         gic_r_write_8(gicv3, GICR_PROPBASER, xbaser);
642
643         /* Check the cache attributes we set */
644         tmp = gic_r_read_8(gicv3, GICR_PROPBASER);
645
646         if ((tmp & GICR_PROPBASER_SHARE_MASK) !=
647             (xbaser & GICR_PROPBASER_SHARE_MASK)) {
648                 if ((tmp & GICR_PROPBASER_SHARE_MASK) ==
649                     (GICR_PROPBASER_SHARE_NS << GICR_PROPBASER_SHARE_SHIFT)) {
650                         /* We need to mark as non-cacheable */
651                         xbaser &= ~(GICR_PROPBASER_SHARE_MASK |
652                             GICR_PROPBASER_CACHE_MASK);
653                         /* Non-cacheable */
654                         xbaser |= GICR_PROPBASER_CACHE_NIN <<
655                             GICR_PROPBASER_CACHE_SHIFT;
656                         /* Non-sareable */
657                         xbaser |= GICR_PROPBASER_SHARE_NS <<
658                             GICR_PROPBASER_SHARE_SHIFT;
659                         gic_r_write_8(gicv3, GICR_PROPBASER, xbaser);
660                 }
661                 sc->sc_its_flags |= ITS_FLAGS_LPI_CONF_FLUSH;
662         }
663
664         /*
665          * Set the LPI pending table base
666          */
667         xbaser = vtophys(sc->sc_pend_base[cpuid]) |
668             (GICR_PENDBASER_CACHE_NIWAWB << GICR_PENDBASER_CACHE_SHIFT) |
669             (GICR_PENDBASER_SHARE_IS << GICR_PENDBASER_SHARE_SHIFT);
670
671         gic_r_write_8(gicv3, GICR_PENDBASER, xbaser);
672
673         tmp = gic_r_read_8(gicv3, GICR_PENDBASER);
674
675         if ((tmp & GICR_PENDBASER_SHARE_MASK) ==
676             (GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT)) {
677                 /* Clear the cahce and shareability bits */
678                 xbaser &= ~(GICR_PENDBASER_CACHE_MASK |
679                     GICR_PENDBASER_SHARE_MASK);
680                 /* Mark as non-shareable */
681                 xbaser |= GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT;
682                 /* And non-cacheable */
683                 xbaser |= GICR_PENDBASER_CACHE_NIN <<
684                     GICR_PENDBASER_CACHE_SHIFT;
685         }
686
687         /* Enable LPIs */
688         ctlr = gic_r_read_4(gicv3, GICR_CTLR);
689         ctlr |= GICR_CTLR_LPI_ENABLE;
690         gic_r_write_4(gicv3, GICR_CTLR, ctlr);
691
692         /* Make sure the GIC has seen everything */
693         dsb(sy);
694 }
695
696 static int
697 its_init_cpu(device_t dev, struct gicv3_its_softc *sc)
698 {
699         device_t gicv3;
700         vm_paddr_t target;
701         u_int cpuid;
702         struct redist_pcpu *rpcpu;
703
704         gicv3 = device_get_parent(dev);
705         cpuid = PCPU_GET(cpuid);
706         if (!CPU_ISSET(cpuid, &sc->sc_cpus))
707                 return (0);
708
709         /* Check if the ITS is enabled on this CPU */
710         if ((gic_r_read_8(gicv3, GICR_TYPER) & GICR_TYPER_PLPIS) == 0)
711                 return (ENXIO);
712
713         rpcpu = gicv3_get_redist(dev);
714
715         /* Do per-cpu LPI init once */
716         if (!rpcpu->lpi_enabled) {
717                 its_init_cpu_lpi(dev, sc);
718                 rpcpu->lpi_enabled = true;
719         }
720
721         if ((gic_its_read_8(sc, GITS_TYPER) & GITS_TYPER_PTA) != 0) {
722                 /* This ITS wants the redistributor physical address */
723                 target = vtophys(rman_get_virtual(&rpcpu->res));
724         } else {
725                 /* This ITS wants the unique processor number */
726                 target = GICR_TYPER_CPUNUM(gic_r_read_8(gicv3, GICR_TYPER)) <<
727                     CMD_TARGET_SHIFT;
728         }
729
730         sc->sc_its_cols[cpuid]->col_target = target;
731         sc->sc_its_cols[cpuid]->col_id = cpuid;
732
733         its_cmd_mapc(dev, sc->sc_its_cols[cpuid], 1);
734         its_cmd_invall(dev, sc->sc_its_cols[cpuid]);
735
736         return (0);
737 }
738
739 static int
740 gicv3_its_sysctl_trace_enable(SYSCTL_HANDLER_ARGS)
741 {
742         struct gicv3_its_softc *sc;
743         int rv;
744
745         sc = arg1;
746
747         rv = sysctl_handle_bool(oidp, &sc->trace_enable, 0, req);
748         if (rv != 0 || req->newptr == NULL)
749                 return (rv);
750         if (sc->trace_enable)
751                 gic_its_write_8(sc, GITS_TRKCTLR, 3);
752         else
753                 gic_its_write_8(sc, GITS_TRKCTLR, 0);
754
755         return (0);
756 }
757
758 static int
759 gicv3_its_sysctl_trace_regs(SYSCTL_HANDLER_ARGS)
760 {
761         struct gicv3_its_softc *sc;
762         struct sbuf *sb;
763         int err;
764
765         sc = arg1;
766         sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
767         if (sb == NULL) {
768                 device_printf(sc->dev, "Could not allocate sbuf for output.\n");
769                 return (ENOMEM);
770         }
771         sbuf_cat(sb, "\n");
772         sbuf_printf(sb, "GITS_TRKCTLR: 0x%08X\n",
773             gic_its_read_4(sc, GITS_TRKCTLR));
774         sbuf_printf(sb, "GITS_TRKR:    0x%08X\n",
775             gic_its_read_4(sc, GITS_TRKR));
776         sbuf_printf(sb, "GITS_TRKDIDR: 0x%08X\n",
777             gic_its_read_4(sc, GITS_TRKDIDR));
778         sbuf_printf(sb, "GITS_TRKPIDR: 0x%08X\n",
779             gic_its_read_4(sc, GITS_TRKPIDR));
780         sbuf_printf(sb, "GITS_TRKVIDR: 0x%08X\n",
781             gic_its_read_4(sc, GITS_TRKVIDR));
782         sbuf_printf(sb, "GITS_TRKTGTR: 0x%08X\n",
783            gic_its_read_4(sc, GITS_TRKTGTR));
784
785         err = sbuf_finish(sb);
786         if (err)
787                 device_printf(sc->dev, "Error finishing sbuf: %d\n", err);
788         sbuf_delete(sb);
789         return(err);
790 }
791
792 static int
793 gicv3_its_init_sysctl(struct gicv3_its_softc *sc)
794 {
795         struct sysctl_oid *oid, *child;
796         struct sysctl_ctx_list *ctx_list;
797
798         ctx_list = device_get_sysctl_ctx(sc->dev);
799         child = device_get_sysctl_tree(sc->dev);
800         oid = SYSCTL_ADD_NODE(ctx_list,
801             SYSCTL_CHILDREN(child), OID_AUTO, "tracing",
802             CTLFLAG_RD| CTLFLAG_MPSAFE, NULL, "Messages tracing");
803         if (oid == NULL)
804                 return (ENXIO);
805
806         /* Add registers */
807         SYSCTL_ADD_PROC(ctx_list,
808             SYSCTL_CHILDREN(oid), OID_AUTO, "enable",
809             CTLTYPE_U8 | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
810             gicv3_its_sysctl_trace_enable, "CU", "Enable tracing");
811         SYSCTL_ADD_PROC(ctx_list,
812             SYSCTL_CHILDREN(oid), OID_AUTO, "capture",
813             CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
814             gicv3_its_sysctl_trace_regs, "", "Captured tracing registers.");
815
816         return (0);
817 }
818
819 static int
820 gicv3_its_attach(device_t dev)
821 {
822         struct gicv3_its_softc *sc;
823         int domain, err, i, rid;
824         uint64_t phys;
825         uint32_t iidr;
826
827         sc = device_get_softc(dev);
828
829         sc->sc_irq_length = gicv3_get_nirqs(dev);
830         sc->sc_irq_base = GIC_FIRST_LPI;
831         sc->sc_irq_base += device_get_unit(dev) * sc->sc_irq_length;
832
833         rid = 0;
834         sc->sc_its_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
835             RF_ACTIVE);
836         if (sc->sc_its_res == NULL) {
837                 device_printf(dev, "Could not allocate memory\n");
838                 return (ENXIO);
839         }
840
841         phys = rounddown2(vtophys(rman_get_virtual(sc->sc_its_res)) +
842             GITS_TRANSLATER, PAGE_SIZE);
843         sc->ma = malloc(sizeof(struct vm_page), M_DEVBUF, M_WAITOK | M_ZERO);
844         vm_page_initfake(sc->ma, phys, VM_MEMATTR_DEFAULT);
845
846         iidr = gic_its_read_4(sc, GITS_IIDR);
847         for (i = 0; i < nitems(its_quirks); i++) {
848                 if ((iidr & its_quirks[i].iidr_mask) == its_quirks[i].iidr) {
849                         if (bootverbose) {
850                                 device_printf(dev, "Applying %s\n",
851                                     its_quirks[i].desc);
852                         }
853                         its_quirks[i].func(dev);
854                         break;
855                 }
856         }
857
858         /* Allocate the private tables */
859         err = gicv3_its_table_init(dev, sc);
860         if (err != 0)
861                 return (err);
862
863         /* Protects access to the device list */
864         mtx_init(&sc->sc_its_dev_lock, "ITS device lock", NULL, MTX_SPIN);
865
866         /* Protects access to the ITS command circular buffer. */
867         mtx_init(&sc->sc_its_cmd_lock, "ITS cmd lock", NULL, MTX_SPIN);
868
869         CPU_ZERO(&sc->sc_cpus);
870         if (bus_get_domain(dev, &domain) == 0) {
871                 if (domain < MAXMEMDOM)
872                         CPU_COPY(&cpuset_domain[domain], &sc->sc_cpus);
873         } else {
874                 CPU_COPY(&all_cpus, &sc->sc_cpus);
875         }
876
877         /* Allocate the command circular buffer */
878         gicv3_its_cmdq_init(sc);
879
880         /* Allocate the per-CPU collections */
881         for (int cpu = 0; cpu <= mp_maxid; cpu++)
882                 if (CPU_ISSET(cpu, &sc->sc_cpus) != 0)
883                         sc->sc_its_cols[cpu] = malloc(
884                             sizeof(*sc->sc_its_cols[0]), M_GICV3_ITS,
885                             M_WAITOK | M_ZERO);
886                 else
887                         sc->sc_its_cols[cpu] = NULL;
888
889         /* Enable the ITS */
890         gic_its_write_4(sc, GITS_CTLR,
891             gic_its_read_4(sc, GITS_CTLR) | GITS_CTLR_EN);
892
893         /* Create the LPI configuration table */
894         gicv3_its_conftable_init(sc);
895
896         /* And the pending tebles */
897         gicv3_its_pendtables_init(sc);
898
899         /* Enable LPIs on this CPU */
900         its_init_cpu(dev, sc);
901
902         TAILQ_INIT(&sc->sc_its_dev_list);
903         TAILQ_INIT(&sc->sc_free_irqs);
904
905         /*
906          * Create the vmem object to allocate INTRNG IRQs from. We try to
907          * use all IRQs not already used by the GICv3.
908          * XXX: This assumes there are no other interrupt controllers in the
909          * system.
910          */
911         sc->sc_irq_alloc = vmem_create(device_get_nameunit(dev), 0,
912             gicv3_get_nirqs(dev), 1, 0, M_FIRSTFIT | M_WAITOK);
913
914         sc->sc_irqs = malloc(sizeof(*sc->sc_irqs) * sc->sc_irq_length,
915             M_GICV3_ITS, M_WAITOK | M_ZERO);
916
917         /* For GIC-500 install tracking sysctls. */
918         if ((iidr & (GITS_IIDR_PRODUCT_MASK | GITS_IIDR_IMPLEMENTOR_MASK)) ==
919             GITS_IIDR_RAW(GITS_IIDR_IMPL_ARM, GITS_IIDR_PROD_GIC500, 0, 0))
920                 gicv3_its_init_sysctl(sc);
921
922         return (0);
923 }
924
925 static int
926 gicv3_its_detach(device_t dev)
927 {
928
929         return (ENXIO);
930 }
931
932 static void
933 its_quirk_cavium_22375(device_t dev)
934 {
935         struct gicv3_its_softc *sc;
936
937         sc = device_get_softc(dev);
938         sc->sc_its_flags |= ITS_FLAGS_ERRATA_CAVIUM_22375;
939 }
940
941 static void
942 gicv3_its_disable_intr(device_t dev, struct intr_irqsrc *isrc)
943 {
944         struct gicv3_its_softc *sc;
945         struct gicv3_its_irqsrc *girq;
946         uint8_t *conf;
947
948         sc = device_get_softc(dev);
949         girq = (struct gicv3_its_irqsrc *)isrc;
950         conf = sc->sc_conf_base;
951
952         conf[girq->gi_lpi] &= ~LPI_CONF_ENABLE;
953
954         if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) {
955                 /* Clean D-cache under command. */
956                 cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_lpi], 1);
957         } else {
958                 /* DSB inner shareable, store */
959                 dsb(ishst);
960         }
961
962         its_cmd_inv(dev, girq->gi_its_dev, girq);
963 }
964
965 static void
966 gicv3_its_enable_intr(device_t dev, struct intr_irqsrc *isrc)
967 {
968         struct gicv3_its_softc *sc;
969         struct gicv3_its_irqsrc *girq;
970         uint8_t *conf;
971
972         sc = device_get_softc(dev);
973         girq = (struct gicv3_its_irqsrc *)isrc;
974         conf = sc->sc_conf_base;
975
976         conf[girq->gi_lpi] |= LPI_CONF_ENABLE;
977
978         if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) {
979                 /* Clean D-cache under command. */
980                 cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_lpi], 1);
981         } else {
982                 /* DSB inner shareable, store */
983                 dsb(ishst);
984         }
985
986         its_cmd_inv(dev, girq->gi_its_dev, girq);
987 }
988
989 static int
990 gicv3_its_intr(void *arg, uintptr_t irq)
991 {
992         struct gicv3_its_softc *sc = arg;
993         struct gicv3_its_irqsrc *girq;
994         struct trapframe *tf;
995
996         irq -= sc->sc_irq_base;
997         girq = sc->sc_irqs[irq];
998         if (girq == NULL)
999                 panic("gicv3_its_intr: Invalid interrupt %ld",
1000                     irq + sc->sc_irq_base);
1001
1002         tf = curthread->td_intr_frame;
1003         intr_isrc_dispatch(&girq->gi_isrc, tf);
1004         return (FILTER_HANDLED);
1005 }
1006
1007 static void
1008 gicv3_its_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
1009 {
1010         struct gicv3_its_irqsrc *girq;
1011         struct gicv3_its_softc *sc;
1012
1013         sc = device_get_softc(dev);
1014         girq = (struct gicv3_its_irqsrc *)isrc;
1015         gicv3_its_disable_intr(dev, isrc);
1016         gic_icc_write(EOIR1, girq->gi_lpi + GIC_FIRST_LPI);
1017 }
1018
1019 static void
1020 gicv3_its_post_ithread(device_t dev, struct intr_irqsrc *isrc)
1021 {
1022
1023         gicv3_its_enable_intr(dev, isrc);
1024 }
1025
1026 static void
1027 gicv3_its_post_filter(device_t dev, struct intr_irqsrc *isrc)
1028 {
1029         struct gicv3_its_irqsrc *girq;
1030         struct gicv3_its_softc *sc;
1031
1032         sc = device_get_softc(dev);
1033         girq = (struct gicv3_its_irqsrc *)isrc;
1034         gic_icc_write(EOIR1, girq->gi_lpi + GIC_FIRST_LPI);
1035 }
1036
1037 static int
1038 gicv3_its_select_cpu(device_t dev, struct intr_irqsrc *isrc)
1039 {
1040         struct gicv3_its_softc *sc;
1041
1042         sc = device_get_softc(dev);
1043         if (CPU_EMPTY(&isrc->isrc_cpu)) {
1044                 sc->gic_irq_cpu = intr_irq_next_cpu(sc->gic_irq_cpu,
1045                     &sc->sc_cpus);
1046                 CPU_SETOF(sc->gic_irq_cpu, &isrc->isrc_cpu);
1047         }
1048
1049         return (0);
1050 }
1051
1052 static int
1053 gicv3_its_bind_intr(device_t dev, struct intr_irqsrc *isrc)
1054 {
1055         struct gicv3_its_irqsrc *girq;
1056
1057         gicv3_its_select_cpu(dev, isrc);
1058
1059         girq = (struct gicv3_its_irqsrc *)isrc;
1060         its_cmd_movi(dev, girq);
1061         return (0);
1062 }
1063
1064 static int
1065 gicv3_its_map_intr(device_t dev, struct intr_map_data *data,
1066     struct intr_irqsrc **isrcp)
1067 {
1068
1069         /*
1070          * This should never happen, we only call this function to map
1071          * interrupts found before the controller driver is ready.
1072          */
1073         panic("gicv3_its_map_intr: Unable to map a MSI interrupt");
1074 }
1075
1076 static int
1077 gicv3_its_setup_intr(device_t dev, struct intr_irqsrc *isrc,
1078     struct resource *res, struct intr_map_data *data)
1079 {
1080
1081         /* Bind the interrupt to a CPU */
1082         gicv3_its_bind_intr(dev, isrc);
1083
1084         return (0);
1085 }
1086
1087 #ifdef SMP
1088 static void
1089 gicv3_its_init_secondary(device_t dev)
1090 {
1091         struct gicv3_its_softc *sc;
1092
1093         sc = device_get_softc(dev);
1094
1095         /*
1096          * This is fatal as otherwise we may bind interrupts to this CPU.
1097          * We need a way to tell the interrupt framework to only bind to a
1098          * subset of given CPUs when it performs the shuffle.
1099          */
1100         if (its_init_cpu(dev, sc) != 0)
1101                 panic("gicv3_its_init_secondary: No usable ITS on CPU%d",
1102                     PCPU_GET(cpuid));
1103 }
1104 #endif
1105
1106 static uint32_t
1107 its_get_devid(device_t pci_dev)
1108 {
1109         uintptr_t id;
1110
1111         if (pci_get_id(pci_dev, PCI_ID_MSI, &id) != 0)
1112                 panic("%s: %s: Unable to get the MSI DeviceID", __func__,
1113                     device_get_nameunit(pci_dev));
1114
1115         return (id);
1116 }
1117
1118 static struct its_dev *
1119 its_device_find(device_t dev, device_t child)
1120 {
1121         struct gicv3_its_softc *sc;
1122         struct its_dev *its_dev = NULL;
1123
1124         sc = device_get_softc(dev);
1125
1126         mtx_lock_spin(&sc->sc_its_dev_lock);
1127         TAILQ_FOREACH(its_dev, &sc->sc_its_dev_list, entry) {
1128                 if (its_dev->pci_dev == child)
1129                         break;
1130         }
1131         mtx_unlock_spin(&sc->sc_its_dev_lock);
1132
1133         return (its_dev);
1134 }
1135
1136 static struct its_dev *
1137 its_device_get(device_t dev, device_t child, u_int nvecs)
1138 {
1139         struct gicv3_its_softc *sc;
1140         struct its_dev *its_dev;
1141         vmem_addr_t irq_base;
1142         size_t esize;
1143
1144         sc = device_get_softc(dev);
1145
1146         its_dev = its_device_find(dev, child);
1147         if (its_dev != NULL)
1148                 return (its_dev);
1149
1150         its_dev = malloc(sizeof(*its_dev), M_GICV3_ITS, M_NOWAIT | M_ZERO);
1151         if (its_dev == NULL)
1152                 return (NULL);
1153
1154         its_dev->pci_dev = child;
1155         its_dev->devid = its_get_devid(child);
1156
1157         its_dev->lpis.lpi_busy = 0;
1158         its_dev->lpis.lpi_num = nvecs;
1159         its_dev->lpis.lpi_free = nvecs;
1160
1161         if (vmem_alloc(sc->sc_irq_alloc, nvecs, M_FIRSTFIT | M_NOWAIT,
1162             &irq_base) != 0) {
1163                 free(its_dev, M_GICV3_ITS);
1164                 return (NULL);
1165         }
1166         its_dev->lpis.lpi_base = irq_base;
1167
1168         /* Get ITT entry size */
1169         esize = GITS_TYPER_ITTES(gic_its_read_8(sc, GITS_TYPER));
1170
1171         /*
1172          * Allocate ITT for this device.
1173          * PA has to be 256 B aligned. At least two entries for device.
1174          */
1175         its_dev->itt_size = roundup2(MAX(nvecs, 2) * esize, 256);
1176         its_dev->itt = (vm_offset_t)contigmalloc(its_dev->itt_size,
1177             M_GICV3_ITS, M_NOWAIT | M_ZERO, 0, LPI_INT_TRANS_TAB_MAX_ADDR,
1178             LPI_INT_TRANS_TAB_ALIGN, 0);
1179         if (its_dev->itt == 0) {
1180                 vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base, nvecs);
1181                 free(its_dev, M_GICV3_ITS);
1182                 return (NULL);
1183         }
1184
1185         mtx_lock_spin(&sc->sc_its_dev_lock);
1186         TAILQ_INSERT_TAIL(&sc->sc_its_dev_list, its_dev, entry);
1187         mtx_unlock_spin(&sc->sc_its_dev_lock);
1188
1189         /* Map device to its ITT */
1190         its_cmd_mapd(dev, its_dev, 1);
1191
1192         return (its_dev);
1193 }
1194
1195 static void
1196 its_device_release(device_t dev, struct its_dev *its_dev)
1197 {
1198         struct gicv3_its_softc *sc;
1199
1200         KASSERT(its_dev->lpis.lpi_busy == 0,
1201             ("its_device_release: Trying to release an inuse ITS device"));
1202
1203         /* Unmap device in ITS */
1204         its_cmd_mapd(dev, its_dev, 0);
1205
1206         sc = device_get_softc(dev);
1207
1208         /* Remove the device from the list of devices */
1209         mtx_lock_spin(&sc->sc_its_dev_lock);
1210         TAILQ_REMOVE(&sc->sc_its_dev_list, its_dev, entry);
1211         mtx_unlock_spin(&sc->sc_its_dev_lock);
1212
1213         /* Free ITT */
1214         KASSERT(its_dev->itt != 0, ("Invalid ITT in valid ITS device"));
1215         contigfree((void *)its_dev->itt, its_dev->itt_size, M_GICV3_ITS);
1216
1217         /* Free the IRQ allocation */
1218         vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base,
1219             its_dev->lpis.lpi_num);
1220
1221         free(its_dev, M_GICV3_ITS);
1222 }
1223
1224 static struct gicv3_its_irqsrc *
1225 gicv3_its_alloc_irqsrc(device_t dev, struct gicv3_its_softc *sc, u_int irq)
1226 {
1227         struct gicv3_its_irqsrc *girq = NULL;
1228
1229         KASSERT(sc->sc_irqs[irq] == NULL,
1230             ("%s: Interrupt %u already allocated", __func__, irq));
1231         mtx_lock_spin(&sc->sc_its_dev_lock);
1232         if (!TAILQ_EMPTY(&sc->sc_free_irqs)) {
1233                 girq = TAILQ_FIRST(&sc->sc_free_irqs);
1234                 TAILQ_REMOVE(&sc->sc_free_irqs, girq, gi_link);
1235         }
1236         mtx_unlock_spin(&sc->sc_its_dev_lock);
1237         if (girq == NULL) {
1238                 girq = malloc(sizeof(*girq), M_GICV3_ITS,
1239                     M_NOWAIT | M_ZERO);
1240                 if (girq == NULL)
1241                         return (NULL);
1242                 girq->gi_id = -1;
1243                 if (intr_isrc_register(&girq->gi_isrc, dev, 0,
1244                     "%s,%u", device_get_nameunit(dev), irq) != 0) {
1245                         free(girq, M_GICV3_ITS);
1246                         return (NULL);
1247                 }
1248         }
1249         girq->gi_lpi = irq + sc->sc_irq_base - GIC_FIRST_LPI;
1250         sc->sc_irqs[irq] = girq;
1251
1252         return (girq);
1253 }
1254
1255 static void
1256 gicv3_its_release_irqsrc(struct gicv3_its_softc *sc,
1257     struct gicv3_its_irqsrc *girq)
1258 {
1259         u_int irq;
1260
1261         mtx_assert(&sc->sc_its_dev_lock, MA_OWNED);
1262
1263         irq = girq->gi_lpi + GIC_FIRST_LPI - sc->sc_irq_base;
1264         sc->sc_irqs[irq] = NULL;
1265
1266         girq->gi_id = -1;
1267         girq->gi_its_dev = NULL;
1268         TAILQ_INSERT_TAIL(&sc->sc_free_irqs, girq, gi_link);
1269 }
1270
1271 static int
1272 gicv3_its_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1273     device_t *pic, struct intr_irqsrc **srcs)
1274 {
1275         struct gicv3_its_softc *sc;
1276         struct gicv3_its_irqsrc *girq;
1277         struct its_dev *its_dev;
1278         u_int irq;
1279         int i;
1280
1281         its_dev = its_device_get(dev, child, count);
1282         if (its_dev == NULL)
1283                 return (ENXIO);
1284
1285         KASSERT(its_dev->lpis.lpi_free >= count,
1286             ("gicv3_its_alloc_msi: No free LPIs"));
1287         sc = device_get_softc(dev);
1288         irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num -
1289             its_dev->lpis.lpi_free;
1290
1291         /* Allocate the irqsrc for each MSI */
1292         for (i = 0; i < count; i++, irq++) {
1293                 its_dev->lpis.lpi_free--;
1294                 srcs[i] = (struct intr_irqsrc *)gicv3_its_alloc_irqsrc(dev,
1295                     sc, irq);
1296                 if (srcs[i] == NULL)
1297                         break;
1298         }
1299
1300         /* The allocation failed, release them */
1301         if (i != count) {
1302                 mtx_lock_spin(&sc->sc_its_dev_lock);
1303                 for (i = 0; i < count; i++) {
1304                         girq = (struct gicv3_its_irqsrc *)srcs[i];
1305                         if (girq == NULL)
1306                                 break;
1307                         gicv3_its_release_irqsrc(sc, girq);
1308                         srcs[i] = NULL;
1309                 }
1310                 mtx_unlock_spin(&sc->sc_its_dev_lock);
1311                 return (ENXIO);
1312         }
1313
1314         /* Finish the allocation now we have all MSI irqsrcs */
1315         for (i = 0; i < count; i++) {
1316                 girq = (struct gicv3_its_irqsrc *)srcs[i];
1317                 girq->gi_id = i;
1318                 girq->gi_its_dev = its_dev;
1319
1320                 /* Map the message to the given IRQ */
1321                 gicv3_its_select_cpu(dev, (struct intr_irqsrc *)girq);
1322                 its_cmd_mapti(dev, girq);
1323         }
1324         its_dev->lpis.lpi_busy += count;
1325         *pic = dev;
1326
1327         return (0);
1328 }
1329
1330 static int
1331 gicv3_its_release_msi(device_t dev, device_t child, int count,
1332     struct intr_irqsrc **isrc)
1333 {
1334         struct gicv3_its_softc *sc;
1335         struct gicv3_its_irqsrc *girq;
1336         struct its_dev *its_dev;
1337         int i;
1338
1339         its_dev = its_device_find(dev, child);
1340
1341         KASSERT(its_dev != NULL,
1342             ("gicv3_its_release_msi: Releasing a MSI interrupt with "
1343              "no ITS device"));
1344         KASSERT(its_dev->lpis.lpi_busy >= count,
1345             ("gicv3_its_release_msi: Releasing more interrupts than "
1346              "were allocated: releasing %d, allocated %d", count,
1347              its_dev->lpis.lpi_busy));
1348
1349         sc = device_get_softc(dev);
1350         mtx_lock_spin(&sc->sc_its_dev_lock);
1351         for (i = 0; i < count; i++) {
1352                 girq = (struct gicv3_its_irqsrc *)isrc[i];
1353                 gicv3_its_release_irqsrc(sc, girq);
1354         }
1355         mtx_unlock_spin(&sc->sc_its_dev_lock);
1356         its_dev->lpis.lpi_busy -= count;
1357
1358         if (its_dev->lpis.lpi_busy == 0)
1359                 its_device_release(dev, its_dev);
1360
1361         return (0);
1362 }
1363
1364 static int
1365 gicv3_its_alloc_msix(device_t dev, device_t child, device_t *pic,
1366     struct intr_irqsrc **isrcp)
1367 {
1368         struct gicv3_its_softc *sc;
1369         struct gicv3_its_irqsrc *girq;
1370         struct its_dev *its_dev;
1371         u_int nvecs, irq;
1372
1373         nvecs = pci_msix_count(child);
1374         its_dev = its_device_get(dev, child, nvecs);
1375         if (its_dev == NULL)
1376                 return (ENXIO);
1377
1378         KASSERT(its_dev->lpis.lpi_free > 0,
1379             ("gicv3_its_alloc_msix: No free LPIs"));
1380         sc = device_get_softc(dev);
1381         irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num -
1382             its_dev->lpis.lpi_free;
1383
1384         girq = gicv3_its_alloc_irqsrc(dev, sc, irq);
1385         if (girq == NULL)
1386                 return (ENXIO);
1387         girq->gi_id = its_dev->lpis.lpi_busy;
1388         girq->gi_its_dev = its_dev;
1389
1390         its_dev->lpis.lpi_free--;
1391         its_dev->lpis.lpi_busy++;
1392
1393         /* Map the message to the given IRQ */
1394         gicv3_its_select_cpu(dev, (struct intr_irqsrc *)girq);
1395         its_cmd_mapti(dev, girq);
1396
1397         *pic = dev;
1398         *isrcp = (struct intr_irqsrc *)girq;
1399
1400         return (0);
1401 }
1402
1403 static int
1404 gicv3_its_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1405 {
1406         struct gicv3_its_softc *sc;
1407         struct gicv3_its_irqsrc *girq;
1408         struct its_dev *its_dev;
1409
1410         its_dev = its_device_find(dev, child);
1411
1412         KASSERT(its_dev != NULL,
1413             ("gicv3_its_release_msix: Releasing a MSI-X interrupt with "
1414              "no ITS device"));
1415         KASSERT(its_dev->lpis.lpi_busy > 0,
1416             ("gicv3_its_release_msix: Releasing more interrupts than "
1417              "were allocated: allocated %d", its_dev->lpis.lpi_busy));
1418
1419         sc = device_get_softc(dev);
1420         girq = (struct gicv3_its_irqsrc *)isrc;
1421         mtx_lock_spin(&sc->sc_its_dev_lock);
1422         gicv3_its_release_irqsrc(sc, girq);
1423         mtx_unlock_spin(&sc->sc_its_dev_lock);
1424         its_dev->lpis.lpi_busy--;
1425
1426         if (its_dev->lpis.lpi_busy == 0)
1427                 its_device_release(dev, its_dev);
1428
1429         return (0);
1430 }
1431
1432 static int
1433 gicv3_its_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1434     uint64_t *addr, uint32_t *data)
1435 {
1436         struct gicv3_its_softc *sc;
1437         struct gicv3_its_irqsrc *girq;
1438
1439         sc = device_get_softc(dev);
1440         girq = (struct gicv3_its_irqsrc *)isrc;
1441
1442         *addr = vtophys(rman_get_virtual(sc->sc_its_res)) + GITS_TRANSLATER;
1443         *data = girq->gi_id;
1444
1445         return (0);
1446 }
1447
1448 #ifdef IOMMU
1449 static int
1450 gicv3_iommu_init(device_t dev, device_t child, struct iommu_domain **domain)
1451 {
1452         struct gicv3_its_softc *sc;
1453         struct iommu_ctx *ctx;
1454         int error;
1455
1456         sc = device_get_softc(dev);
1457         ctx = iommu_get_dev_ctx(child);
1458         if (ctx == NULL)
1459                 return (ENXIO);
1460         error = iommu_map_msi(ctx, PAGE_SIZE, GITS_TRANSLATER,
1461             IOMMU_MAP_ENTRY_WRITE, IOMMU_MF_CANWAIT, &sc->ma);
1462         *domain = iommu_get_ctx_domain(ctx);
1463
1464         return (error);
1465 }
1466
1467 static void
1468 gicv3_iommu_deinit(device_t dev, device_t child)
1469 {
1470         struct iommu_ctx *ctx;
1471
1472         ctx = iommu_get_dev_ctx(child);
1473         if (ctx == NULL)
1474                 return;
1475
1476         iommu_unmap_msi(ctx);
1477 }
1478 #endif
1479
1480 /*
1481  * Commands handling.
1482  */
1483
1484 static __inline void
1485 cmd_format_command(struct its_cmd *cmd, uint8_t cmd_type)
1486 {
1487         /* Command field: DW0 [7:0] */
1488         cmd->cmd_dword[0] &= htole64(~CMD_COMMAND_MASK);
1489         cmd->cmd_dword[0] |= htole64(cmd_type);
1490 }
1491
1492 static __inline void
1493 cmd_format_devid(struct its_cmd *cmd, uint32_t devid)
1494 {
1495         /* Device ID field: DW0 [63:32] */
1496         cmd->cmd_dword[0] &= htole64(~CMD_DEVID_MASK);
1497         cmd->cmd_dword[0] |= htole64((uint64_t)devid << CMD_DEVID_SHIFT);
1498 }
1499
1500 static __inline void
1501 cmd_format_size(struct its_cmd *cmd, uint16_t size)
1502 {
1503         /* Size field: DW1 [4:0] */
1504         cmd->cmd_dword[1] &= htole64(~CMD_SIZE_MASK);
1505         cmd->cmd_dword[1] |= htole64((size & CMD_SIZE_MASK));
1506 }
1507
1508 static __inline void
1509 cmd_format_id(struct its_cmd *cmd, uint32_t id)
1510 {
1511         /* ID field: DW1 [31:0] */
1512         cmd->cmd_dword[1] &= htole64(~CMD_ID_MASK);
1513         cmd->cmd_dword[1] |= htole64(id);
1514 }
1515
1516 static __inline void
1517 cmd_format_pid(struct its_cmd *cmd, uint32_t pid)
1518 {
1519         /* Physical ID field: DW1 [63:32] */
1520         cmd->cmd_dword[1] &= htole64(~CMD_PID_MASK);
1521         cmd->cmd_dword[1] |= htole64((uint64_t)pid << CMD_PID_SHIFT);
1522 }
1523
1524 static __inline void
1525 cmd_format_col(struct its_cmd *cmd, uint16_t col_id)
1526 {
1527         /* Collection field: DW2 [16:0] */
1528         cmd->cmd_dword[2] &= htole64(~CMD_COL_MASK);
1529         cmd->cmd_dword[2] |= htole64(col_id);
1530 }
1531
1532 static __inline void
1533 cmd_format_target(struct its_cmd *cmd, uint64_t target)
1534 {
1535         /* Target Address field: DW2 [47:16] */
1536         cmd->cmd_dword[2] &= htole64(~CMD_TARGET_MASK);
1537         cmd->cmd_dword[2] |= htole64(target & CMD_TARGET_MASK);
1538 }
1539
1540 static __inline void
1541 cmd_format_itt(struct its_cmd *cmd, uint64_t itt)
1542 {
1543         /* ITT Address field: DW2 [47:8] */
1544         cmd->cmd_dword[2] &= htole64(~CMD_ITT_MASK);
1545         cmd->cmd_dword[2] |= htole64(itt & CMD_ITT_MASK);
1546 }
1547
1548 static __inline void
1549 cmd_format_valid(struct its_cmd *cmd, uint8_t valid)
1550 {
1551         /* Valid field: DW2 [63] */
1552         cmd->cmd_dword[2] &= htole64(~CMD_VALID_MASK);
1553         cmd->cmd_dword[2] |= htole64((uint64_t)valid << CMD_VALID_SHIFT);
1554 }
1555
1556 static inline bool
1557 its_cmd_queue_full(struct gicv3_its_softc *sc)
1558 {
1559         size_t read_idx, next_write_idx;
1560
1561         /* Get the index of the next command */
1562         next_write_idx = (sc->sc_its_cmd_next_idx + 1) %
1563             (ITS_CMDQ_SIZE / sizeof(struct its_cmd));
1564         /* And the index of the current command being read */
1565         read_idx = gic_its_read_4(sc, GITS_CREADR) / sizeof(struct its_cmd);
1566
1567         /*
1568          * The queue is full when the write offset points
1569          * at the command before the current read offset.
1570          */
1571         return (next_write_idx == read_idx);
1572 }
1573
1574 static inline void
1575 its_cmd_sync(struct gicv3_its_softc *sc, struct its_cmd *cmd)
1576 {
1577
1578         if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0) {
1579                 /* Clean D-cache under command. */
1580                 cpu_dcache_wb_range((vm_offset_t)cmd, sizeof(*cmd));
1581         } else {
1582                 /* DSB inner shareable, store */
1583                 dsb(ishst);
1584         }
1585
1586 }
1587
1588 static inline uint64_t
1589 its_cmd_cwriter_offset(struct gicv3_its_softc *sc, struct its_cmd *cmd)
1590 {
1591         uint64_t off;
1592
1593         off = (cmd - sc->sc_its_cmd_base) * sizeof(*cmd);
1594
1595         return (off);
1596 }
1597
1598 static void
1599 its_cmd_wait_completion(device_t dev, struct its_cmd *cmd_first,
1600     struct its_cmd *cmd_last)
1601 {
1602         struct gicv3_its_softc *sc;
1603         uint64_t first, last, read;
1604         size_t us_left;
1605
1606         sc = device_get_softc(dev);
1607
1608         /*
1609          * XXX ARM64TODO: This is obviously a significant delay.
1610          * The reason for that is that currently the time frames for
1611          * the command to complete are not known.
1612          */
1613         us_left = 1000000;
1614
1615         first = its_cmd_cwriter_offset(sc, cmd_first);
1616         last = its_cmd_cwriter_offset(sc, cmd_last);
1617
1618         for (;;) {
1619                 read = gic_its_read_8(sc, GITS_CREADR);
1620                 if (first < last) {
1621                         if (read < first || read >= last)
1622                                 break;
1623                 } else if (read < first && read >= last)
1624                         break;
1625
1626                 if (us_left-- == 0) {
1627                         /* This means timeout */
1628                         device_printf(dev,
1629                             "Timeout while waiting for CMD completion.\n");
1630                         return;
1631                 }
1632                 DELAY(1);
1633         }
1634 }
1635
1636 static struct its_cmd *
1637 its_cmd_alloc_locked(device_t dev)
1638 {
1639         struct gicv3_its_softc *sc;
1640         struct its_cmd *cmd;
1641         size_t us_left;
1642
1643         sc = device_get_softc(dev);
1644
1645         /*
1646          * XXX ARM64TODO: This is obviously a significant delay.
1647          * The reason for that is that currently the time frames for
1648          * the command to complete (and therefore free the descriptor)
1649          * are not known.
1650          */
1651         us_left = 1000000;
1652
1653         mtx_assert(&sc->sc_its_cmd_lock, MA_OWNED);
1654         while (its_cmd_queue_full(sc)) {
1655                 if (us_left-- == 0) {
1656                         /* Timeout while waiting for free command */
1657                         device_printf(dev,
1658                             "Timeout while waiting for free command\n");
1659                         return (NULL);
1660                 }
1661                 DELAY(1);
1662         }
1663
1664         cmd = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx];
1665         sc->sc_its_cmd_next_idx++;
1666         sc->sc_its_cmd_next_idx %= ITS_CMDQ_SIZE / sizeof(struct its_cmd);
1667
1668         return (cmd);
1669 }
1670
1671 static uint64_t
1672 its_cmd_prepare(struct its_cmd *cmd, struct its_cmd_desc *desc)
1673 {
1674         uint64_t target;
1675         uint8_t cmd_type;
1676         u_int size;
1677
1678         cmd_type = desc->cmd_type;
1679         target = ITS_TARGET_NONE;
1680
1681         switch (cmd_type) {
1682         case ITS_CMD_MOVI:      /* Move interrupt ID to another collection */
1683                 target = desc->cmd_desc_movi.col->col_target;
1684                 cmd_format_command(cmd, ITS_CMD_MOVI);
1685                 cmd_format_id(cmd, desc->cmd_desc_movi.id);
1686                 cmd_format_col(cmd, desc->cmd_desc_movi.col->col_id);
1687                 cmd_format_devid(cmd, desc->cmd_desc_movi.its_dev->devid);
1688                 break;
1689         case ITS_CMD_SYNC:      /* Wait for previous commands completion */
1690                 target = desc->cmd_desc_sync.col->col_target;
1691                 cmd_format_command(cmd, ITS_CMD_SYNC);
1692                 cmd_format_target(cmd, target);
1693                 break;
1694         case ITS_CMD_MAPD:      /* Assign ITT to device */
1695                 cmd_format_command(cmd, ITS_CMD_MAPD);
1696                 cmd_format_itt(cmd, vtophys(desc->cmd_desc_mapd.its_dev->itt));
1697                 /*
1698                  * Size describes number of bits to encode interrupt IDs
1699                  * supported by the device minus one.
1700                  * When V (valid) bit is zero, this field should be written
1701                  * as zero.
1702                  */
1703                 if (desc->cmd_desc_mapd.valid != 0) {
1704                         size = fls(desc->cmd_desc_mapd.its_dev->lpis.lpi_num);
1705                         size = MAX(1, size) - 1;
1706                 } else
1707                         size = 0;
1708
1709                 cmd_format_size(cmd, size);
1710                 cmd_format_devid(cmd, desc->cmd_desc_mapd.its_dev->devid);
1711                 cmd_format_valid(cmd, desc->cmd_desc_mapd.valid);
1712                 break;
1713         case ITS_CMD_MAPC:      /* Map collection to Re-Distributor */
1714                 target = desc->cmd_desc_mapc.col->col_target;
1715                 cmd_format_command(cmd, ITS_CMD_MAPC);
1716                 cmd_format_col(cmd, desc->cmd_desc_mapc.col->col_id);
1717                 cmd_format_valid(cmd, desc->cmd_desc_mapc.valid);
1718                 cmd_format_target(cmd, target);
1719                 break;
1720         case ITS_CMD_MAPTI:
1721                 target = desc->cmd_desc_mapvi.col->col_target;
1722                 cmd_format_command(cmd, ITS_CMD_MAPTI);
1723                 cmd_format_devid(cmd, desc->cmd_desc_mapvi.its_dev->devid);
1724                 cmd_format_id(cmd, desc->cmd_desc_mapvi.id);
1725                 cmd_format_pid(cmd, desc->cmd_desc_mapvi.pid);
1726                 cmd_format_col(cmd, desc->cmd_desc_mapvi.col->col_id);
1727                 break;
1728         case ITS_CMD_MAPI:
1729                 target = desc->cmd_desc_mapi.col->col_target;
1730                 cmd_format_command(cmd, ITS_CMD_MAPI);
1731                 cmd_format_devid(cmd, desc->cmd_desc_mapi.its_dev->devid);
1732                 cmd_format_id(cmd, desc->cmd_desc_mapi.pid);
1733                 cmd_format_col(cmd, desc->cmd_desc_mapi.col->col_id);
1734                 break;
1735         case ITS_CMD_INV:
1736                 target = desc->cmd_desc_inv.col->col_target;
1737                 cmd_format_command(cmd, ITS_CMD_INV);
1738                 cmd_format_devid(cmd, desc->cmd_desc_inv.its_dev->devid);
1739                 cmd_format_id(cmd, desc->cmd_desc_inv.pid);
1740                 break;
1741         case ITS_CMD_INVALL:
1742                 cmd_format_command(cmd, ITS_CMD_INVALL);
1743                 cmd_format_col(cmd, desc->cmd_desc_invall.col->col_id);
1744                 break;
1745         default:
1746                 panic("its_cmd_prepare: Invalid command: %x", cmd_type);
1747         }
1748
1749         return (target);
1750 }
1751
1752 static int
1753 its_cmd_send(device_t dev, struct its_cmd_desc *desc)
1754 {
1755         struct gicv3_its_softc *sc;
1756         struct its_cmd *cmd, *cmd_sync, *cmd_write;
1757         struct its_col col_sync;
1758         struct its_cmd_desc desc_sync;
1759         uint64_t target, cwriter;
1760
1761         sc = device_get_softc(dev);
1762         mtx_lock_spin(&sc->sc_its_cmd_lock);
1763         cmd = its_cmd_alloc_locked(dev);
1764         if (cmd == NULL) {
1765                 device_printf(dev, "could not allocate ITS command\n");
1766                 mtx_unlock_spin(&sc->sc_its_cmd_lock);
1767                 return (EBUSY);
1768         }
1769
1770         target = its_cmd_prepare(cmd, desc);
1771         its_cmd_sync(sc, cmd);
1772
1773         if (target != ITS_TARGET_NONE) {
1774                 cmd_sync = its_cmd_alloc_locked(dev);
1775                 if (cmd_sync != NULL) {
1776                         desc_sync.cmd_type = ITS_CMD_SYNC;
1777                         col_sync.col_target = target;
1778                         desc_sync.cmd_desc_sync.col = &col_sync;
1779                         its_cmd_prepare(cmd_sync, &desc_sync);
1780                         its_cmd_sync(sc, cmd_sync);
1781                 }
1782         }
1783
1784         /* Update GITS_CWRITER */
1785         cwriter = sc->sc_its_cmd_next_idx * sizeof(struct its_cmd);
1786         gic_its_write_8(sc, GITS_CWRITER, cwriter);
1787         cmd_write = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx];
1788         mtx_unlock_spin(&sc->sc_its_cmd_lock);
1789
1790         its_cmd_wait_completion(dev, cmd, cmd_write);
1791
1792         return (0);
1793 }
1794
1795 /* Handlers to send commands */
1796 static void
1797 its_cmd_movi(device_t dev, struct gicv3_its_irqsrc *girq)
1798 {
1799         struct gicv3_its_softc *sc;
1800         struct its_cmd_desc desc;
1801         struct its_col *col;
1802
1803         sc = device_get_softc(dev);
1804         col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1];
1805
1806         desc.cmd_type = ITS_CMD_MOVI;
1807         desc.cmd_desc_movi.its_dev = girq->gi_its_dev;
1808         desc.cmd_desc_movi.col = col;
1809         desc.cmd_desc_movi.id = girq->gi_id;
1810
1811         its_cmd_send(dev, &desc);
1812 }
1813
1814 static void
1815 its_cmd_mapc(device_t dev, struct its_col *col, uint8_t valid)
1816 {
1817         struct its_cmd_desc desc;
1818
1819         desc.cmd_type = ITS_CMD_MAPC;
1820         desc.cmd_desc_mapc.col = col;
1821         /*
1822          * Valid bit set - map the collection.
1823          * Valid bit cleared - unmap the collection.
1824          */
1825         desc.cmd_desc_mapc.valid = valid;
1826
1827         its_cmd_send(dev, &desc);
1828 }
1829
1830 static void
1831 its_cmd_mapti(device_t dev, struct gicv3_its_irqsrc *girq)
1832 {
1833         struct gicv3_its_softc *sc;
1834         struct its_cmd_desc desc;
1835         struct its_col *col;
1836         u_int col_id;
1837
1838         sc = device_get_softc(dev);
1839
1840         col_id = CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1;
1841         col = sc->sc_its_cols[col_id];
1842
1843         desc.cmd_type = ITS_CMD_MAPTI;
1844         desc.cmd_desc_mapvi.its_dev = girq->gi_its_dev;
1845         desc.cmd_desc_mapvi.col = col;
1846         /* The EventID sent to the device */
1847         desc.cmd_desc_mapvi.id = girq->gi_id;
1848         /* The physical interrupt presented to softeware */
1849         desc.cmd_desc_mapvi.pid = girq->gi_lpi + GIC_FIRST_LPI;
1850
1851         its_cmd_send(dev, &desc);
1852 }
1853
1854 static void
1855 its_cmd_mapd(device_t dev, struct its_dev *its_dev, uint8_t valid)
1856 {
1857         struct its_cmd_desc desc;
1858
1859         desc.cmd_type = ITS_CMD_MAPD;
1860         desc.cmd_desc_mapd.its_dev = its_dev;
1861         desc.cmd_desc_mapd.valid = valid;
1862
1863         its_cmd_send(dev, &desc);
1864 }
1865
1866 static void
1867 its_cmd_inv(device_t dev, struct its_dev *its_dev,
1868     struct gicv3_its_irqsrc *girq)
1869 {
1870         struct gicv3_its_softc *sc;
1871         struct its_cmd_desc desc;
1872         struct its_col *col;
1873
1874         sc = device_get_softc(dev);
1875         col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1];
1876
1877         desc.cmd_type = ITS_CMD_INV;
1878         /* The EventID sent to the device */
1879         desc.cmd_desc_inv.pid = girq->gi_id;
1880         desc.cmd_desc_inv.its_dev = its_dev;
1881         desc.cmd_desc_inv.col = col;
1882
1883         its_cmd_send(dev, &desc);
1884 }
1885
1886 static void
1887 its_cmd_invall(device_t dev, struct its_col *col)
1888 {
1889         struct its_cmd_desc desc;
1890
1891         desc.cmd_type = ITS_CMD_INVALL;
1892         desc.cmd_desc_invall.col = col;
1893
1894         its_cmd_send(dev, &desc);
1895 }
1896
1897 #ifdef FDT
1898 static device_probe_t gicv3_its_fdt_probe;
1899 static device_attach_t gicv3_its_fdt_attach;
1900
1901 static device_method_t gicv3_its_fdt_methods[] = {
1902         /* Device interface */
1903         DEVMETHOD(device_probe,         gicv3_its_fdt_probe),
1904         DEVMETHOD(device_attach,        gicv3_its_fdt_attach),
1905
1906         /* End */
1907         DEVMETHOD_END
1908 };
1909
1910 #define its_baseclasses its_fdt_baseclasses
1911 DEFINE_CLASS_1(its, gicv3_its_fdt_driver, gicv3_its_fdt_methods,
1912     sizeof(struct gicv3_its_softc), gicv3_its_driver);
1913 #undef its_baseclasses
1914 static devclass_t gicv3_its_fdt_devclass;
1915
1916 EARLY_DRIVER_MODULE(its_fdt, gic, gicv3_its_fdt_driver,
1917     gicv3_its_fdt_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
1918
1919 static int
1920 gicv3_its_fdt_probe(device_t dev)
1921 {
1922
1923         if (!ofw_bus_status_okay(dev))
1924                 return (ENXIO);
1925
1926         if (!ofw_bus_is_compatible(dev, "arm,gic-v3-its"))
1927                 return (ENXIO);
1928
1929         device_set_desc(dev, "ARM GIC Interrupt Translation Service");
1930         return (BUS_PROBE_DEFAULT);
1931 }
1932
1933 static int
1934 gicv3_its_fdt_attach(device_t dev)
1935 {
1936         struct gicv3_its_softc *sc;
1937         phandle_t xref;
1938         int err;
1939
1940         sc = device_get_softc(dev);
1941         sc->dev = dev;
1942         err = gicv3_its_attach(dev);
1943         if (err != 0)
1944                 return (err);
1945
1946         /* Register this device as a interrupt controller */
1947         xref = OF_xref_from_node(ofw_bus_get_node(dev));
1948         sc->sc_pic = intr_pic_register(dev, xref);
1949         err = intr_pic_add_handler(device_get_parent(dev), sc->sc_pic,
1950             gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length);
1951         if (err != 0) {
1952                 device_printf(dev, "Failed to add PIC handler: %d\n", err);
1953                 return (err);
1954         }
1955
1956         /* Register this device to handle MSI interrupts */
1957         err = intr_msi_register(dev, xref);
1958         if (err != 0) {
1959                 device_printf(dev, "Failed to register for MSIs: %d\n", err);
1960                 return (err);
1961         }
1962
1963         return (0);
1964 }
1965 #endif
1966
1967 #ifdef DEV_ACPI
1968 static device_probe_t gicv3_its_acpi_probe;
1969 static device_attach_t gicv3_its_acpi_attach;
1970
1971 static device_method_t gicv3_its_acpi_methods[] = {
1972         /* Device interface */
1973         DEVMETHOD(device_probe,         gicv3_its_acpi_probe),
1974         DEVMETHOD(device_attach,        gicv3_its_acpi_attach),
1975
1976         /* End */
1977         DEVMETHOD_END
1978 };
1979
1980 #define its_baseclasses its_acpi_baseclasses
1981 DEFINE_CLASS_1(its, gicv3_its_acpi_driver, gicv3_its_acpi_methods,
1982     sizeof(struct gicv3_its_softc), gicv3_its_driver);
1983 #undef its_baseclasses
1984 static devclass_t gicv3_its_acpi_devclass;
1985
1986 EARLY_DRIVER_MODULE(its_acpi, gic, gicv3_its_acpi_driver,
1987     gicv3_its_acpi_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
1988
1989 static int
1990 gicv3_its_acpi_probe(device_t dev)
1991 {
1992
1993         if (gic_get_bus(dev) != GIC_BUS_ACPI)
1994                 return (EINVAL);
1995
1996         if (gic_get_hw_rev(dev) < 3)
1997                 return (EINVAL);
1998
1999         device_set_desc(dev, "ARM GIC Interrupt Translation Service");
2000         return (BUS_PROBE_DEFAULT);
2001 }
2002
2003 static int
2004 gicv3_its_acpi_attach(device_t dev)
2005 {
2006         struct gicv3_its_softc *sc;
2007         struct gic_v3_devinfo *di;
2008         int err;
2009
2010         sc = device_get_softc(dev);
2011         sc->dev = dev;
2012         err = gicv3_its_attach(dev);
2013         if (err != 0)
2014                 return (err);
2015
2016         di = device_get_ivars(dev);
2017         sc->sc_pic = intr_pic_register(dev, di->msi_xref);
2018         err = intr_pic_add_handler(device_get_parent(dev), sc->sc_pic,
2019             gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length);
2020         if (err != 0) {
2021                 device_printf(dev, "Failed to add PIC handler: %d\n", err);
2022                 return (err);
2023         }
2024
2025         /* Register this device to handle MSI interrupts */
2026         err = intr_msi_register(dev, di->msi_xref);
2027         if (err != 0) {
2028                 device_printf(dev, "Failed to register for MSIs: %d\n", err);
2029                 return (err);
2030         }
2031
2032         return (0);
2033 }
2034 #endif