]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm64/arm64/gicv3_its.c
MFV r349134:
[FreeBSD/FreeBSD.git] / sys / arm64 / arm64 / gicv3_its.c
1 /*-
2  * Copyright (c) 2015-2016 The FreeBSD Foundation
3  * All rights reserved.
4  *
5  * This software was developed by Andrew Turner under
6  * the sponsorship of the FreeBSD Foundation.
7  *
8  * This software was developed by Semihalf under
9  * the sponsorship of the FreeBSD Foundation.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32
33 #include "opt_acpi.h"
34 #include "opt_platform.h"
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/cpuset.h>
43 #include <sys/endian.h>
44 #include <sys/kernel.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/module.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/queue.h>
51 #include <sys/rman.h>
52 #include <sys/smp.h>
53 #include <sys/vmem.h>
54
55 #include <vm/vm.h>
56 #include <vm/pmap.h>
57
58 #include <machine/bus.h>
59 #include <machine/intr.h>
60
61 #include <arm/arm/gic_common.h>
62 #include <arm64/arm64/gic_v3_reg.h>
63 #include <arm64/arm64/gic_v3_var.h>
64
65 #ifdef FDT
66 #include <dev/ofw/openfirm.h>
67 #include <dev/ofw/ofw_bus.h>
68 #include <dev/ofw/ofw_bus_subr.h>
69 #endif
70 #include <dev/pci/pcireg.h>
71 #include <dev/pci/pcivar.h>
72
73 #include "pcib_if.h"
74 #include "pic_if.h"
75 #include "msi_if.h"
76
77 MALLOC_DEFINE(M_GICV3_ITS, "GICv3 ITS",
78     "ARM GICv3 Interrupt Translation Service");
79
80 #define LPI_NIRQS               (64 * 1024)
81
82 /* The size and alignment of the command circular buffer */
83 #define ITS_CMDQ_SIZE           (64 * 1024)     /* Must be a multiple of 4K */
84 #define ITS_CMDQ_ALIGN          (64 * 1024)
85
86 #define LPI_CONFTAB_SIZE        LPI_NIRQS
87 #define LPI_CONFTAB_ALIGN       (64 * 1024)
88 #define LPI_CONFTAB_MAX_ADDR    ((1ul << 48) - 1) /* We need a 47 bit PA */
89
90 /* 1 bit per SPI, PPI, and SGI (8k), and 1 bit per LPI (LPI_CONFTAB_SIZE) */
91 #define LPI_PENDTAB_SIZE        ((LPI_NIRQS + GIC_FIRST_LPI) / 8)
92 #define LPI_PENDTAB_ALIGN       (64 * 1024)
93 #define LPI_PENDTAB_MAX_ADDR    ((1ul << 48) - 1) /* We need a 47 bit PA */
94
95 #define LPI_INT_TRANS_TAB_ALIGN 256
96 #define LPI_INT_TRANS_TAB_MAX_ADDR ((1ul << 48) - 1)
97
98 /* ITS commands encoding */
99 #define ITS_CMD_MOVI            (0x01)
100 #define ITS_CMD_SYNC            (0x05)
101 #define ITS_CMD_MAPD            (0x08)
102 #define ITS_CMD_MAPC            (0x09)
103 #define ITS_CMD_MAPTI           (0x0a)
104 #define ITS_CMD_MAPI            (0x0b)
105 #define ITS_CMD_INV             (0x0c)
106 #define ITS_CMD_INVALL          (0x0d)
107 /* Command */
108 #define CMD_COMMAND_MASK        (0xFFUL)
109 /* PCI device ID */
110 #define CMD_DEVID_SHIFT         (32)
111 #define CMD_DEVID_MASK          (0xFFFFFFFFUL << CMD_DEVID_SHIFT)
112 /* Size of IRQ ID bitfield */
113 #define CMD_SIZE_MASK           (0xFFUL)
114 /* Virtual LPI ID */
115 #define CMD_ID_MASK             (0xFFFFFFFFUL)
116 /* Physical LPI ID */
117 #define CMD_PID_SHIFT           (32)
118 #define CMD_PID_MASK            (0xFFFFFFFFUL << CMD_PID_SHIFT)
119 /* Collection */
120 #define CMD_COL_MASK            (0xFFFFUL)
121 /* Target (CPU or Re-Distributor) */
122 #define CMD_TARGET_SHIFT        (16)
123 #define CMD_TARGET_MASK         (0xFFFFFFFFUL << CMD_TARGET_SHIFT)
124 /* Interrupt Translation Table address */
125 #define CMD_ITT_MASK            (0xFFFFFFFFFF00UL)
126 /* Valid command bit */
127 #define CMD_VALID_SHIFT         (63)
128 #define CMD_VALID_MASK          (1UL << CMD_VALID_SHIFT)
129
130 #define ITS_TARGET_NONE         0xFBADBEEF
131
132 /* LPI chunk owned by ITS device */
133 struct lpi_chunk {
134         u_int   lpi_base;
135         u_int   lpi_free;       /* First free LPI in set */
136         u_int   lpi_num;        /* Total number of LPIs in chunk */
137         u_int   lpi_busy;       /* Number of busy LPIs in chink */
138 };
139
140 /* ITS device */
141 struct its_dev {
142         TAILQ_ENTRY(its_dev)    entry;
143         /* PCI device */
144         device_t                pci_dev;
145         /* Device ID (i.e. PCI device ID) */
146         uint32_t                devid;
147         /* List of assigned LPIs */
148         struct lpi_chunk        lpis;
149         /* Virtual address of ITT */
150         vm_offset_t             itt;
151         size_t                  itt_size;
152 };
153
154 /*
155  * ITS command descriptor.
156  * Idea for command description passing taken from Linux.
157  */
158 struct its_cmd_desc {
159         uint8_t cmd_type;
160
161         union {
162                 struct {
163                         struct its_dev *its_dev;
164                         struct its_col *col;
165                         uint32_t id;
166                 } cmd_desc_movi;
167
168                 struct {
169                         struct its_col *col;
170                 } cmd_desc_sync;
171
172                 struct {
173                         struct its_col *col;
174                         uint8_t valid;
175                 } cmd_desc_mapc;
176
177                 struct {
178                         struct its_dev *its_dev;
179                         struct its_col *col;
180                         uint32_t pid;
181                         uint32_t id;
182                 } cmd_desc_mapvi;
183
184                 struct {
185                         struct its_dev *its_dev;
186                         struct its_col *col;
187                         uint32_t pid;
188                 } cmd_desc_mapi;
189
190                 struct {
191                         struct its_dev *its_dev;
192                         uint8_t valid;
193                 } cmd_desc_mapd;
194
195                 struct {
196                         struct its_dev *its_dev;
197                         struct its_col *col;
198                         uint32_t pid;
199                 } cmd_desc_inv;
200
201                 struct {
202                         struct its_col *col;
203                 } cmd_desc_invall;
204         };
205 };
206
207 /* ITS command. Each command is 32 bytes long */
208 struct its_cmd {
209         uint64_t        cmd_dword[4];   /* ITS command double word */
210 };
211
212 /* An ITS private table */
213 struct its_ptable {
214         vm_offset_t     ptab_vaddr;
215         unsigned long   ptab_size;
216 };
217
218 /* ITS collection description. */
219 struct its_col {
220         uint64_t        col_target;     /* Target Re-Distributor */
221         uint64_t        col_id;         /* Collection ID */
222 };
223
224 struct gicv3_its_irqsrc {
225         struct intr_irqsrc      gi_isrc;
226         u_int                   gi_irq;
227         struct its_dev          *gi_its_dev;
228 };
229
230 struct gicv3_its_softc {
231         struct intr_pic *sc_pic;
232         struct resource *sc_its_res;
233
234         cpuset_t        sc_cpus;
235         u_int           gic_irq_cpu;
236
237         struct its_ptable sc_its_ptab[GITS_BASER_NUM];
238         struct its_col *sc_its_cols[MAXCPU];    /* Per-CPU collections */
239
240         /*
241          * TODO: We should get these from the parent as we only want a
242          * single copy of each across the interrupt controller.
243          */
244         vm_offset_t sc_conf_base;
245         vm_offset_t sc_pend_base[MAXCPU];
246
247         /* Command handling */
248         struct mtx sc_its_cmd_lock;
249         struct its_cmd *sc_its_cmd_base; /* Command circular buffer address */
250         size_t sc_its_cmd_next_idx;
251
252         vmem_t *sc_irq_alloc;
253         struct gicv3_its_irqsrc *sc_irqs;
254         u_int   sc_irq_base;
255         u_int   sc_irq_length;
256
257         struct mtx sc_its_dev_lock;
258         TAILQ_HEAD(its_dev_list, its_dev) sc_its_dev_list;
259
260 #define ITS_FLAGS_CMDQ_FLUSH            0x00000001
261 #define ITS_FLAGS_LPI_CONF_FLUSH        0x00000002
262 #define ITS_FLAGS_ERRATA_CAVIUM_22375   0x00000004
263         u_int sc_its_flags;
264 };
265
266 typedef void (its_quirk_func_t)(device_t);
267 static its_quirk_func_t its_quirk_cavium_22375;
268
269 static const struct {
270         const char *desc;
271         uint32_t iidr;
272         uint32_t iidr_mask;
273         its_quirk_func_t *func;
274 } its_quirks[] = {
275         {
276                 /* Cavium ThunderX Pass 1.x */
277                 .desc = "Cavium ThunderX errata: 22375, 24313",
278                 .iidr = GITS_IIDR_RAW(GITS_IIDR_IMPL_CAVIUM,
279                     GITS_IIDR_PROD_THUNDER, GITS_IIDR_VAR_THUNDER_1, 0),
280                 .iidr_mask = ~GITS_IIDR_REVISION_MASK,
281                 .func = its_quirk_cavium_22375,
282         },
283 };
284
285 #define gic_its_read_4(sc, reg)                 \
286     bus_read_4((sc)->sc_its_res, (reg))
287 #define gic_its_read_8(sc, reg)                 \
288     bus_read_8((sc)->sc_its_res, (reg))
289
290 #define gic_its_write_4(sc, reg, val)           \
291     bus_write_4((sc)->sc_its_res, (reg), (val))
292 #define gic_its_write_8(sc, reg, val)           \
293     bus_write_8((sc)->sc_its_res, (reg), (val))
294
295 static device_attach_t gicv3_its_attach;
296 static device_detach_t gicv3_its_detach;
297
298 static pic_disable_intr_t gicv3_its_disable_intr;
299 static pic_enable_intr_t gicv3_its_enable_intr;
300 static pic_map_intr_t gicv3_its_map_intr;
301 static pic_setup_intr_t gicv3_its_setup_intr;
302 static pic_post_filter_t gicv3_its_post_filter;
303 static pic_post_ithread_t gicv3_its_post_ithread;
304 static pic_pre_ithread_t gicv3_its_pre_ithread;
305 static pic_bind_intr_t gicv3_its_bind_intr;
306 #ifdef SMP
307 static pic_init_secondary_t gicv3_its_init_secondary;
308 #endif
309 static msi_alloc_msi_t gicv3_its_alloc_msi;
310 static msi_release_msi_t gicv3_its_release_msi;
311 static msi_alloc_msix_t gicv3_its_alloc_msix;
312 static msi_release_msix_t gicv3_its_release_msix;
313 static msi_map_msi_t gicv3_its_map_msi;
314
315 static void its_cmd_movi(device_t, struct gicv3_its_irqsrc *);
316 static void its_cmd_mapc(device_t, struct its_col *, uint8_t);
317 static void its_cmd_mapti(device_t, struct gicv3_its_irqsrc *);
318 static void its_cmd_mapd(device_t, struct its_dev *, uint8_t);
319 static void its_cmd_inv(device_t, struct its_dev *, struct gicv3_its_irqsrc *);
320 static void its_cmd_invall(device_t, struct its_col *);
321
322 static device_method_t gicv3_its_methods[] = {
323         /* Device interface */
324         DEVMETHOD(device_detach,        gicv3_its_detach),
325
326         /* Interrupt controller interface */
327         DEVMETHOD(pic_disable_intr,     gicv3_its_disable_intr),
328         DEVMETHOD(pic_enable_intr,      gicv3_its_enable_intr),
329         DEVMETHOD(pic_map_intr,         gicv3_its_map_intr),
330         DEVMETHOD(pic_setup_intr,       gicv3_its_setup_intr),
331         DEVMETHOD(pic_post_filter,      gicv3_its_post_filter),
332         DEVMETHOD(pic_post_ithread,     gicv3_its_post_ithread),
333         DEVMETHOD(pic_pre_ithread,      gicv3_its_pre_ithread),
334 #ifdef SMP
335         DEVMETHOD(pic_bind_intr,        gicv3_its_bind_intr),
336         DEVMETHOD(pic_init_secondary,   gicv3_its_init_secondary),
337 #endif
338
339         /* MSI/MSI-X */
340         DEVMETHOD(msi_alloc_msi,        gicv3_its_alloc_msi),
341         DEVMETHOD(msi_release_msi,      gicv3_its_release_msi),
342         DEVMETHOD(msi_alloc_msix,       gicv3_its_alloc_msix),
343         DEVMETHOD(msi_release_msix,     gicv3_its_release_msix),
344         DEVMETHOD(msi_map_msi,          gicv3_its_map_msi),
345
346         /* End */
347         DEVMETHOD_END
348 };
349
350 static DEFINE_CLASS_0(gic, gicv3_its_driver, gicv3_its_methods,
351     sizeof(struct gicv3_its_softc));
352
353 static void
354 gicv3_its_cmdq_init(struct gicv3_its_softc *sc)
355 {
356         vm_paddr_t cmd_paddr;
357         uint64_t reg, tmp;
358
359         /* Set up the command circular buffer */
360         sc->sc_its_cmd_base = contigmalloc(ITS_CMDQ_SIZE, M_GICV3_ITS,
361             M_WAITOK | M_ZERO, 0, (1ul << 48) - 1, ITS_CMDQ_ALIGN, 0);
362         sc->sc_its_cmd_next_idx = 0;
363
364         cmd_paddr = vtophys(sc->sc_its_cmd_base);
365
366         /* Set the base of the command buffer */
367         reg = GITS_CBASER_VALID |
368             (GITS_CBASER_CACHE_NIWAWB << GITS_CBASER_CACHE_SHIFT) |
369             cmd_paddr | (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT) |
370             (ITS_CMDQ_SIZE / 4096 - 1);
371         gic_its_write_8(sc, GITS_CBASER, reg);
372
373         /* Read back to check for fixed value fields */
374         tmp = gic_its_read_8(sc, GITS_CBASER);
375
376         if ((tmp & GITS_CBASER_SHARE_MASK) !=
377             (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT)) {
378                 /* Check if the hardware reported non-shareable */
379                 if ((tmp & GITS_CBASER_SHARE_MASK) ==
380                     (GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT)) {
381                         /* If so remove the cache attribute */
382                         reg &= ~GITS_CBASER_CACHE_MASK;
383                         reg &= ~GITS_CBASER_SHARE_MASK;
384                         /* Set to Non-cacheable, Non-shareable */
385                         reg |= GITS_CBASER_CACHE_NIN << GITS_CBASER_CACHE_SHIFT;
386                         reg |= GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT;
387
388                         gic_its_write_8(sc, GITS_CBASER, reg);
389                 }
390
391                 /* The command queue has to be flushed after each command */
392                 sc->sc_its_flags |= ITS_FLAGS_CMDQ_FLUSH;
393         }
394
395         /* Get the next command from the start of the buffer */
396         gic_its_write_8(sc, GITS_CWRITER, 0x0);
397 }
398
399 static int
400 gicv3_its_table_init(device_t dev, struct gicv3_its_softc *sc)
401 {
402         vm_offset_t table;
403         vm_paddr_t paddr;
404         uint64_t cache, reg, share, tmp, type;
405         size_t esize, its_tbl_size, nidents, nitspages, npages;
406         int i, page_size;
407         int devbits;
408
409         if ((sc->sc_its_flags & ITS_FLAGS_ERRATA_CAVIUM_22375) != 0) {
410                 /*
411                  * GITS_TYPER[17:13] of ThunderX reports that device IDs
412                  * are to be 21 bits in length. The entry size of the ITS
413                  * table can be read from GITS_BASERn[52:48] and on ThunderX
414                  * is supposed to be 8 bytes in length (for device table).
415                  * Finally the page size that is to be used by ITS to access
416                  * this table will be set to 64KB.
417                  *
418                  * This gives 0x200000 entries of size 0x8 bytes covered by
419                  * 256 pages each of which 64KB in size. The number of pages
420                  * (minus 1) should then be written to GITS_BASERn[7:0]. In
421                  * that case this value would be 0xFF but on ThunderX the
422                  * maximum value that HW accepts is 0xFD.
423                  *
424                  * Set an arbitrary number of device ID bits to 20 in order
425                  * to limit the number of entries in ITS device table to
426                  * 0x100000 and the table size to 8MB.
427                  */
428                 devbits = 20;
429                 cache = 0;
430         } else {
431                 devbits = GITS_TYPER_DEVB(gic_its_read_8(sc, GITS_TYPER));
432                 cache = GITS_BASER_CACHE_WAWB;
433         }
434         share = GITS_BASER_SHARE_IS;
435         page_size = PAGE_SIZE_64K;
436
437         for (i = 0; i < GITS_BASER_NUM; i++) {
438                 reg = gic_its_read_8(sc, GITS_BASER(i));
439                 /* The type of table */
440                 type = GITS_BASER_TYPE(reg);
441                 /* The table entry size */
442                 esize = GITS_BASER_ESIZE(reg);
443
444                 switch(type) {
445                 case GITS_BASER_TYPE_DEV:
446                         nidents = (1 << devbits);
447                         its_tbl_size = esize * nidents;
448                         its_tbl_size = roundup2(its_tbl_size, PAGE_SIZE_64K);
449                         break;
450                 case GITS_BASER_TYPE_VP:
451                 case GITS_BASER_TYPE_PP: /* Undocumented? */
452                 case GITS_BASER_TYPE_IC:
453                         its_tbl_size = page_size;
454                         break;
455                 default:
456                         continue;
457                 }
458                 npages = howmany(its_tbl_size, PAGE_SIZE);
459
460                 /* Allocate the table */
461                 table = (vm_offset_t)contigmalloc(npages * PAGE_SIZE,
462                     M_GICV3_ITS, M_WAITOK | M_ZERO, 0, (1ul << 48) - 1,
463                     PAGE_SIZE_64K, 0);
464
465                 sc->sc_its_ptab[i].ptab_vaddr = table;
466                 sc->sc_its_ptab[i].ptab_size = npages * PAGE_SIZE;
467
468                 paddr = vtophys(table);
469
470                 while (1) {
471                         nitspages = howmany(its_tbl_size, page_size);
472
473                         /* Clear the fields we will be setting */
474                         reg &= ~(GITS_BASER_VALID |
475                             GITS_BASER_CACHE_MASK | GITS_BASER_TYPE_MASK |
476                             GITS_BASER_ESIZE_MASK | GITS_BASER_PA_MASK |
477                             GITS_BASER_SHARE_MASK | GITS_BASER_PSZ_MASK |
478                             GITS_BASER_SIZE_MASK);
479                         /* Set the new values */
480                         reg |= GITS_BASER_VALID |
481                             (cache << GITS_BASER_CACHE_SHIFT) |
482                             (type << GITS_BASER_TYPE_SHIFT) |
483                             ((esize - 1) << GITS_BASER_ESIZE_SHIFT) |
484                             paddr | (share << GITS_BASER_SHARE_SHIFT) |
485                             (nitspages - 1);
486
487                         switch (page_size) {
488                         case PAGE_SIZE:         /* 4KB */
489                                 reg |=
490                                     GITS_BASER_PSZ_4K << GITS_BASER_PSZ_SHIFT;
491                                 break;
492                         case PAGE_SIZE_16K:     /* 16KB */
493                                 reg |=
494                                     GITS_BASER_PSZ_16K << GITS_BASER_PSZ_SHIFT;
495                                 break;
496                         case PAGE_SIZE_64K:     /* 64KB */
497                                 reg |=
498                                     GITS_BASER_PSZ_64K << GITS_BASER_PSZ_SHIFT;
499                                 break;
500                         }
501
502                         gic_its_write_8(sc, GITS_BASER(i), reg);
503
504                         /* Read back to check */
505                         tmp = gic_its_read_8(sc, GITS_BASER(i));
506
507                         /* Do the shareability masks line up? */
508                         if ((tmp & GITS_BASER_SHARE_MASK) !=
509                             (reg & GITS_BASER_SHARE_MASK)) {
510                                 share = (tmp & GITS_BASER_SHARE_MASK) >>
511                                     GITS_BASER_SHARE_SHIFT;
512                                 continue;
513                         }
514
515                         if ((tmp & GITS_BASER_PSZ_MASK) !=
516                             (reg & GITS_BASER_PSZ_MASK)) {
517                                 switch (page_size) {
518                                 case PAGE_SIZE_16K:
519                                         page_size = PAGE_SIZE;
520                                         continue;
521                                 case PAGE_SIZE_64K:
522                                         page_size = PAGE_SIZE_16K;
523                                         continue;
524                                 }
525                         }
526
527                         if (tmp != reg) {
528                                 device_printf(dev, "GITS_BASER%d: "
529                                     "unable to be updated: %lx != %lx\n",
530                                     i, reg, tmp);
531                                 return (ENXIO);
532                         }
533
534                         /* We should have made all needed changes */
535                         break;
536                 }
537         }
538
539         return (0);
540 }
541
542 static void
543 gicv3_its_conftable_init(struct gicv3_its_softc *sc)
544 {
545
546         sc->sc_conf_base = (vm_offset_t)contigmalloc(LPI_CONFTAB_SIZE,
547             M_GICV3_ITS, M_WAITOK, 0, LPI_CONFTAB_MAX_ADDR, LPI_CONFTAB_ALIGN,
548             0);
549
550         /* Set the default configuration */
551         memset((void *)sc->sc_conf_base, GIC_PRIORITY_MAX | LPI_CONF_GROUP1,
552             LPI_CONFTAB_SIZE);
553
554         /* Flush the table to memory */
555         cpu_dcache_wb_range(sc->sc_conf_base, LPI_CONFTAB_SIZE);
556 }
557
558 static void
559 gicv3_its_pendtables_init(struct gicv3_its_softc *sc)
560 {
561         int i;
562
563         for (i = 0; i <= mp_maxid; i++) {
564                 if (CPU_ISSET(i, &sc->sc_cpus) == 0)
565                         continue;
566
567                 sc->sc_pend_base[i] = (vm_offset_t)contigmalloc(
568                     LPI_PENDTAB_SIZE, M_GICV3_ITS, M_WAITOK | M_ZERO,
569                     0, LPI_PENDTAB_MAX_ADDR, LPI_PENDTAB_ALIGN, 0);
570
571                 /* Flush so the ITS can see the memory */
572                 cpu_dcache_wb_range((vm_offset_t)sc->sc_pend_base[i],
573                     LPI_PENDTAB_SIZE);
574         }
575 }
576
577 static void
578 its_init_cpu_lpi(device_t dev, struct gicv3_its_softc *sc)
579 {
580         device_t gicv3;
581         uint64_t xbaser, tmp;
582         uint32_t ctlr;
583         u_int cpuid;
584
585         gicv3 = device_get_parent(dev);
586         cpuid = PCPU_GET(cpuid);
587
588         /* Disable LPIs */
589         ctlr = gic_r_read_4(gicv3, GICR_CTLR);
590         ctlr &= ~GICR_CTLR_LPI_ENABLE;
591         gic_r_write_4(gicv3, GICR_CTLR, ctlr);
592
593         /* Make sure changes are observable my the GIC */
594         dsb(sy);
595
596         /*
597          * Set the redistributor base
598          */
599         xbaser = vtophys(sc->sc_conf_base) |
600             (GICR_PROPBASER_SHARE_IS << GICR_PROPBASER_SHARE_SHIFT) |
601             (GICR_PROPBASER_CACHE_NIWAWB << GICR_PROPBASER_CACHE_SHIFT) |
602             (flsl(LPI_CONFTAB_SIZE | GIC_FIRST_LPI) - 1);
603         gic_r_write_8(gicv3, GICR_PROPBASER, xbaser);
604
605         /* Check the cache attributes we set */
606         tmp = gic_r_read_8(gicv3, GICR_PROPBASER);
607
608         if ((tmp & GICR_PROPBASER_SHARE_MASK) !=
609             (xbaser & GICR_PROPBASER_SHARE_MASK)) {
610                 if ((tmp & GICR_PROPBASER_SHARE_MASK) ==
611                     (GICR_PROPBASER_SHARE_NS << GICR_PROPBASER_SHARE_SHIFT)) {
612                         /* We need to mark as non-cacheable */
613                         xbaser &= ~(GICR_PROPBASER_SHARE_MASK |
614                             GICR_PROPBASER_CACHE_MASK);
615                         /* Non-cacheable */
616                         xbaser |= GICR_PROPBASER_CACHE_NIN <<
617                             GICR_PROPBASER_CACHE_SHIFT;
618                         /* Non-sareable */
619                         xbaser |= GICR_PROPBASER_SHARE_NS <<
620                             GICR_PROPBASER_SHARE_SHIFT;
621                         gic_r_write_8(gicv3, GICR_PROPBASER, xbaser);
622                 }
623                 sc->sc_its_flags |= ITS_FLAGS_LPI_CONF_FLUSH;
624         }
625
626         /*
627          * Set the LPI pending table base
628          */
629         xbaser = vtophys(sc->sc_pend_base[cpuid]) |
630             (GICR_PENDBASER_CACHE_NIWAWB << GICR_PENDBASER_CACHE_SHIFT) |
631             (GICR_PENDBASER_SHARE_IS << GICR_PENDBASER_SHARE_SHIFT);
632
633         gic_r_write_8(gicv3, GICR_PENDBASER, xbaser);
634
635         tmp = gic_r_read_8(gicv3, GICR_PENDBASER);
636
637         if ((tmp & GICR_PENDBASER_SHARE_MASK) ==
638             (GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT)) {
639                 /* Clear the cahce and shareability bits */
640                 xbaser &= ~(GICR_PENDBASER_CACHE_MASK |
641                     GICR_PENDBASER_SHARE_MASK);
642                 /* Mark as non-shareable */
643                 xbaser |= GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT;
644                 /* And non-cacheable */
645                 xbaser |= GICR_PENDBASER_CACHE_NIN <<
646                     GICR_PENDBASER_CACHE_SHIFT;
647         }
648
649         /* Enable LPIs */
650         ctlr = gic_r_read_4(gicv3, GICR_CTLR);
651         ctlr |= GICR_CTLR_LPI_ENABLE;
652         gic_r_write_4(gicv3, GICR_CTLR, ctlr);
653
654         /* Make sure the GIC has seen everything */
655         dsb(sy);
656 }
657
658 static int
659 its_init_cpu(device_t dev, struct gicv3_its_softc *sc)
660 {
661         device_t gicv3;
662         vm_paddr_t target;
663         u_int cpuid;
664         struct redist_pcpu *rpcpu;
665
666         gicv3 = device_get_parent(dev);
667         cpuid = PCPU_GET(cpuid);
668         if (!CPU_ISSET(cpuid, &sc->sc_cpus))
669                 return (0);
670
671         /* Check if the ITS is enabled on this CPU */
672         if ((gic_r_read_4(gicv3, GICR_TYPER) & GICR_TYPER_PLPIS) == 0)
673                 return (ENXIO);
674
675         rpcpu = gicv3_get_redist(dev);
676
677         /* Do per-cpu LPI init once */
678         if (!rpcpu->lpi_enabled) {
679                 its_init_cpu_lpi(dev, sc);
680                 rpcpu->lpi_enabled = true;
681         }
682
683         if ((gic_its_read_8(sc, GITS_TYPER) & GITS_TYPER_PTA) != 0) {
684                 /* This ITS wants the redistributor physical address */
685                 target = vtophys(rman_get_virtual(&rpcpu->res));
686         } else {
687                 /* This ITS wants the unique processor number */
688                 target = GICR_TYPER_CPUNUM(gic_r_read_8(gicv3, GICR_TYPER));
689         }
690
691         sc->sc_its_cols[cpuid]->col_target = target;
692         sc->sc_its_cols[cpuid]->col_id = cpuid;
693
694         its_cmd_mapc(dev, sc->sc_its_cols[cpuid], 1);
695         its_cmd_invall(dev, sc->sc_its_cols[cpuid]);
696
697         return (0);
698 }
699
700 static int
701 gicv3_its_attach(device_t dev)
702 {
703         struct gicv3_its_softc *sc;
704         const char *name;
705         uint32_t iidr;
706         int domain, err, i, rid;
707
708         sc = device_get_softc(dev);
709
710         sc->sc_irq_length = gicv3_get_nirqs(dev);
711         sc->sc_irq_base = GIC_FIRST_LPI;
712         sc->sc_irq_base += device_get_unit(dev) * sc->sc_irq_length;
713
714         rid = 0;
715         sc->sc_its_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
716             RF_ACTIVE);
717         if (sc->sc_its_res == NULL) {
718                 device_printf(dev, "Could not allocate memory\n");
719                 return (ENXIO);
720         }
721
722         iidr = gic_its_read_4(sc, GITS_IIDR);
723         for (i = 0; i < nitems(its_quirks); i++) {
724                 if ((iidr & its_quirks[i].iidr_mask) == its_quirks[i].iidr) {
725                         if (bootverbose) {
726                                 device_printf(dev, "Applying %s\n",
727                                     its_quirks[i].desc);
728                         }
729                         its_quirks[i].func(dev);
730                         break;
731                 }
732         }
733
734         /* Allocate the private tables */
735         err = gicv3_its_table_init(dev, sc);
736         if (err != 0)
737                 return (err);
738
739         /* Protects access to the device list */
740         mtx_init(&sc->sc_its_dev_lock, "ITS device lock", NULL, MTX_SPIN);
741
742         /* Protects access to the ITS command circular buffer. */
743         mtx_init(&sc->sc_its_cmd_lock, "ITS cmd lock", NULL, MTX_SPIN);
744
745         CPU_ZERO(&sc->sc_cpus);
746         if (bus_get_domain(dev, &domain) == 0) {
747                 if (domain < MAXMEMDOM)
748                         CPU_COPY(&cpuset_domain[domain], &sc->sc_cpus);
749         } else {
750                 /* XXX : cannot handle more than one ITS per cpu */
751                 if (device_get_unit(dev) == 0)
752                         CPU_COPY(&all_cpus, &sc->sc_cpus);
753         }
754
755         /* Allocate the command circular buffer */
756         gicv3_its_cmdq_init(sc);
757
758         /* Allocate the per-CPU collections */
759         for (int cpu = 0; cpu <= mp_maxid; cpu++)
760                 if (CPU_ISSET(cpu, &sc->sc_cpus) != 0)
761                         sc->sc_its_cols[cpu] = malloc(
762                             sizeof(*sc->sc_its_cols[0]), M_GICV3_ITS,
763                             M_WAITOK | M_ZERO);
764                 else
765                         sc->sc_its_cols[cpu] = NULL;
766
767         /* Enable the ITS */
768         gic_its_write_4(sc, GITS_CTLR,
769             gic_its_read_4(sc, GITS_CTLR) | GITS_CTLR_EN);
770
771         /* Create the LPI configuration table */
772         gicv3_its_conftable_init(sc);
773
774         /* And the pending tebles */
775         gicv3_its_pendtables_init(sc);
776
777         /* Enable LPIs on this CPU */
778         its_init_cpu(dev, sc);
779
780         TAILQ_INIT(&sc->sc_its_dev_list);
781
782         /*
783          * Create the vmem object to allocate INTRNG IRQs from. We try to
784          * use all IRQs not already used by the GICv3.
785          * XXX: This assumes there are no other interrupt controllers in the
786          * system.
787          */
788         sc->sc_irq_alloc = vmem_create("GICv3 ITS IRQs", 0,
789             gicv3_get_nirqs(dev), 1, 1, M_FIRSTFIT | M_WAITOK);
790
791         sc->sc_irqs = malloc(sizeof(*sc->sc_irqs) * sc->sc_irq_length,
792             M_GICV3_ITS, M_WAITOK | M_ZERO);
793         name = device_get_nameunit(dev);
794         for (i = 0; i < sc->sc_irq_length; i++) {
795                 sc->sc_irqs[i].gi_irq = i;
796                 err = intr_isrc_register(&sc->sc_irqs[i].gi_isrc, dev, 0,
797                     "%s,%u", name, i);
798         }
799
800         return (0);
801 }
802
803 static int
804 gicv3_its_detach(device_t dev)
805 {
806
807         return (ENXIO);
808 }
809
810 static void
811 its_quirk_cavium_22375(device_t dev)
812 {
813         struct gicv3_its_softc *sc;
814
815         sc = device_get_softc(dev);
816         sc->sc_its_flags |= ITS_FLAGS_ERRATA_CAVIUM_22375;
817 }
818
819 static void
820 gicv3_its_disable_intr(device_t dev, struct intr_irqsrc *isrc)
821 {
822         struct gicv3_its_softc *sc;
823         struct gicv3_its_irqsrc *girq;
824         uint8_t *conf;
825
826         sc = device_get_softc(dev);
827         girq = (struct gicv3_its_irqsrc *)isrc;
828         conf = (uint8_t *)sc->sc_conf_base;
829
830         conf[girq->gi_irq] &= ~LPI_CONF_ENABLE;
831
832         if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) {
833                 /* Clean D-cache under command. */
834                 cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_irq], 1);
835         } else {
836                 /* DSB inner shareable, store */
837                 dsb(ishst);
838         }
839
840         its_cmd_inv(dev, girq->gi_its_dev, girq);
841 }
842
843 static void
844 gicv3_its_enable_intr(device_t dev, struct intr_irqsrc *isrc)
845 {
846         struct gicv3_its_softc *sc;
847         struct gicv3_its_irqsrc *girq;
848         uint8_t *conf;
849
850         sc = device_get_softc(dev);
851         girq = (struct gicv3_its_irqsrc *)isrc;
852         conf = (uint8_t *)sc->sc_conf_base;
853
854         conf[girq->gi_irq] |= LPI_CONF_ENABLE;
855
856         if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) {
857                 /* Clean D-cache under command. */
858                 cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_irq], 1);
859         } else {
860                 /* DSB inner shareable, store */
861                 dsb(ishst);
862         }
863
864         its_cmd_inv(dev, girq->gi_its_dev, girq);
865 }
866
867 static int
868 gicv3_its_intr(void *arg, uintptr_t irq)
869 {
870         struct gicv3_its_softc *sc = arg;
871         struct gicv3_its_irqsrc *girq;
872         struct trapframe *tf;
873
874         irq -= sc->sc_irq_base;
875         girq = &sc->sc_irqs[irq];
876         if (girq == NULL)
877                 panic("gicv3_its_intr: Invalid interrupt %ld",
878                     irq + sc->sc_irq_base);
879
880         tf = curthread->td_intr_frame;
881         intr_isrc_dispatch(&girq->gi_isrc, tf);
882         return (FILTER_HANDLED);
883 }
884
885 static void
886 gicv3_its_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
887 {
888         struct gicv3_its_irqsrc *girq;
889         struct gicv3_its_softc *sc;
890
891         sc = device_get_softc(dev);
892         girq = (struct gicv3_its_irqsrc *)isrc;
893         gicv3_its_disable_intr(dev, isrc);
894         gic_icc_write(EOIR1, girq->gi_irq + sc->sc_irq_base);
895 }
896
897 static void
898 gicv3_its_post_ithread(device_t dev, struct intr_irqsrc *isrc)
899 {
900
901         gicv3_its_enable_intr(dev, isrc);
902 }
903
904 static void
905 gicv3_its_post_filter(device_t dev, struct intr_irqsrc *isrc)
906 {
907         struct gicv3_its_irqsrc *girq;
908         struct gicv3_its_softc *sc;
909
910         sc = device_get_softc(dev);
911         girq = (struct gicv3_its_irqsrc *)isrc;
912         gic_icc_write(EOIR1, girq->gi_irq + sc->sc_irq_base);
913 }
914
915 static int
916 gicv3_its_bind_intr(device_t dev, struct intr_irqsrc *isrc)
917 {
918         struct gicv3_its_irqsrc *girq;
919         struct gicv3_its_softc *sc;
920
921         sc = device_get_softc(dev);
922         girq = (struct gicv3_its_irqsrc *)isrc;
923         if (CPU_EMPTY(&isrc->isrc_cpu)) {
924                 sc->gic_irq_cpu = intr_irq_next_cpu(sc->gic_irq_cpu,
925                     &sc->sc_cpus);
926                 CPU_SETOF(sc->gic_irq_cpu, &isrc->isrc_cpu);
927         }
928
929         its_cmd_movi(dev, girq);
930
931         return (0);
932 }
933
934 static int
935 gicv3_its_map_intr(device_t dev, struct intr_map_data *data,
936     struct intr_irqsrc **isrcp)
937 {
938
939         /*
940          * This should never happen, we only call this function to map
941          * interrupts found before the controller driver is ready.
942          */
943         panic("gicv3_its_map_intr: Unable to map a MSI interrupt");
944 }
945
946 static int
947 gicv3_its_setup_intr(device_t dev, struct intr_irqsrc *isrc,
948     struct resource *res, struct intr_map_data *data)
949 {
950
951         /* Bind the interrupt to a CPU */
952         gicv3_its_bind_intr(dev, isrc);
953
954         return (0);
955 }
956
957 #ifdef SMP
958 static void
959 gicv3_its_init_secondary(device_t dev)
960 {
961         struct gicv3_its_softc *sc;
962
963         sc = device_get_softc(dev);
964
965         /*
966          * This is fatal as otherwise we may bind interrupts to this CPU.
967          * We need a way to tell the interrupt framework to only bind to a
968          * subset of given CPUs when it performs the shuffle.
969          */
970         if (its_init_cpu(dev, sc) != 0)
971                 panic("gicv3_its_init_secondary: No usable ITS on CPU%d",
972                     PCPU_GET(cpuid));
973 }
974 #endif
975
976 static uint32_t
977 its_get_devid(device_t pci_dev)
978 {
979         uintptr_t id;
980
981         if (pci_get_id(pci_dev, PCI_ID_MSI, &id) != 0)
982                 panic("its_get_devid: Unable to get the MSI DeviceID");
983
984         return (id);
985 }
986
987 static struct its_dev *
988 its_device_find(device_t dev, device_t child)
989 {
990         struct gicv3_its_softc *sc;
991         struct its_dev *its_dev = NULL;
992
993         sc = device_get_softc(dev);
994
995         mtx_lock_spin(&sc->sc_its_dev_lock);
996         TAILQ_FOREACH(its_dev, &sc->sc_its_dev_list, entry) {
997                 if (its_dev->pci_dev == child)
998                         break;
999         }
1000         mtx_unlock_spin(&sc->sc_its_dev_lock);
1001
1002         return (its_dev);
1003 }
1004
1005 static struct its_dev *
1006 its_device_get(device_t dev, device_t child, u_int nvecs)
1007 {
1008         struct gicv3_its_softc *sc;
1009         struct its_dev *its_dev;
1010         vmem_addr_t irq_base;
1011         size_t esize;
1012
1013         sc = device_get_softc(dev);
1014
1015         its_dev = its_device_find(dev, child);
1016         if (its_dev != NULL)
1017                 return (its_dev);
1018
1019         its_dev = malloc(sizeof(*its_dev), M_GICV3_ITS, M_NOWAIT | M_ZERO);
1020         if (its_dev == NULL)
1021                 return (NULL);
1022
1023         its_dev->pci_dev = child;
1024         its_dev->devid = its_get_devid(child);
1025
1026         its_dev->lpis.lpi_busy = 0;
1027         its_dev->lpis.lpi_num = nvecs;
1028         its_dev->lpis.lpi_free = nvecs;
1029
1030         if (vmem_alloc(sc->sc_irq_alloc, nvecs, M_FIRSTFIT | M_NOWAIT,
1031             &irq_base) != 0) {
1032                 free(its_dev, M_GICV3_ITS);
1033                 return (NULL);
1034         }
1035         its_dev->lpis.lpi_base = irq_base;
1036
1037         /* Get ITT entry size */
1038         esize = GITS_TYPER_ITTES(gic_its_read_8(sc, GITS_TYPER));
1039
1040         /*
1041          * Allocate ITT for this device.
1042          * PA has to be 256 B aligned. At least two entries for device.
1043          */
1044         its_dev->itt_size = roundup2(MAX(nvecs, 2) * esize, 256);
1045         its_dev->itt = (vm_offset_t)contigmalloc(its_dev->itt_size,
1046             M_GICV3_ITS, M_NOWAIT | M_ZERO, 0, LPI_INT_TRANS_TAB_MAX_ADDR,
1047             LPI_INT_TRANS_TAB_ALIGN, 0);
1048         if (its_dev->itt == 0) {
1049                 vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base, nvecs);
1050                 free(its_dev, M_GICV3_ITS);
1051                 return (NULL);
1052         }
1053
1054         mtx_lock_spin(&sc->sc_its_dev_lock);
1055         TAILQ_INSERT_TAIL(&sc->sc_its_dev_list, its_dev, entry);
1056         mtx_unlock_spin(&sc->sc_its_dev_lock);
1057
1058         /* Map device to its ITT */
1059         its_cmd_mapd(dev, its_dev, 1);
1060
1061         return (its_dev);
1062 }
1063
1064 static void
1065 its_device_release(device_t dev, struct its_dev *its_dev)
1066 {
1067         struct gicv3_its_softc *sc;
1068
1069         KASSERT(its_dev->lpis.lpi_busy == 0,
1070             ("its_device_release: Trying to release an inuse ITS device"));
1071
1072         /* Unmap device in ITS */
1073         its_cmd_mapd(dev, its_dev, 0);
1074
1075         sc = device_get_softc(dev);
1076
1077         /* Remove the device from the list of devices */
1078         mtx_lock_spin(&sc->sc_its_dev_lock);
1079         TAILQ_REMOVE(&sc->sc_its_dev_list, its_dev, entry);
1080         mtx_unlock_spin(&sc->sc_its_dev_lock);
1081
1082         /* Free ITT */
1083         KASSERT(its_dev->itt != 0, ("Invalid ITT in valid ITS device"));
1084         contigfree((void *)its_dev->itt, its_dev->itt_size, M_GICV3_ITS);
1085
1086         /* Free the IRQ allocation */
1087         vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base,
1088             its_dev->lpis.lpi_num);
1089
1090         free(its_dev, M_GICV3_ITS);
1091 }
1092
1093 static int
1094 gicv3_its_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1095     device_t *pic, struct intr_irqsrc **srcs)
1096 {
1097         struct gicv3_its_softc *sc;
1098         struct gicv3_its_irqsrc *girq;
1099         struct its_dev *its_dev;
1100         u_int irq;
1101         int i;
1102
1103         its_dev = its_device_get(dev, child, count);
1104         if (its_dev == NULL)
1105                 return (ENXIO);
1106
1107         KASSERT(its_dev->lpis.lpi_free >= count,
1108             ("gicv3_its_alloc_msi: No free LPIs"));
1109         sc = device_get_softc(dev);
1110         irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num -
1111             its_dev->lpis.lpi_free;
1112         for (i = 0; i < count; i++, irq++) {
1113                 its_dev->lpis.lpi_free--;
1114                 girq = &sc->sc_irqs[irq];
1115                 girq->gi_its_dev = its_dev;
1116                 srcs[i] = (struct intr_irqsrc *)girq;
1117         }
1118         its_dev->lpis.lpi_busy += count;
1119         *pic = dev;
1120
1121         return (0);
1122 }
1123
1124 static int
1125 gicv3_its_release_msi(device_t dev, device_t child, int count,
1126     struct intr_irqsrc **isrc)
1127 {
1128         struct gicv3_its_irqsrc *girq;
1129         struct its_dev *its_dev;
1130         int i;
1131
1132         its_dev = its_device_find(dev, child);
1133
1134         KASSERT(its_dev != NULL,
1135             ("gicv3_its_release_msi: Releasing a MSI interrupt with "
1136              "no ITS device"));
1137         KASSERT(its_dev->lpis.lpi_busy >= count,
1138             ("gicv3_its_release_msi: Releasing more interrupts than "
1139              "were allocated: releasing %d, allocated %d", count,
1140              its_dev->lpis.lpi_busy));
1141         for (i = 0; i < count; i++) {
1142                 girq = (struct gicv3_its_irqsrc *)isrc[i];
1143                 girq->gi_its_dev = NULL;
1144         }
1145         its_dev->lpis.lpi_busy -= count;
1146
1147         if (its_dev->lpis.lpi_busy == 0)
1148                 its_device_release(dev, its_dev);
1149
1150         return (0);
1151 }
1152
1153 static int
1154 gicv3_its_alloc_msix(device_t dev, device_t child, device_t *pic,
1155     struct intr_irqsrc **isrcp)
1156 {
1157         struct gicv3_its_softc *sc;
1158         struct gicv3_its_irqsrc *girq;
1159         struct its_dev *its_dev;
1160         u_int nvecs, irq;
1161
1162         nvecs = pci_msix_count(child);
1163         its_dev = its_device_get(dev, child, nvecs);
1164         if (its_dev == NULL)
1165                 return (ENXIO);
1166
1167         KASSERT(its_dev->lpis.lpi_free > 0,
1168             ("gicv3_its_alloc_msix: No free LPIs"));
1169         sc = device_get_softc(dev);
1170         irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num -
1171             its_dev->lpis.lpi_free;
1172         its_dev->lpis.lpi_free--;
1173         its_dev->lpis.lpi_busy++;
1174         girq = &sc->sc_irqs[irq];
1175         girq->gi_its_dev = its_dev;
1176
1177         *pic = dev;
1178         *isrcp = (struct intr_irqsrc *)girq;
1179
1180         return (0);
1181 }
1182
1183 static int
1184 gicv3_its_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1185 {
1186         struct gicv3_its_irqsrc *girq;
1187         struct its_dev *its_dev;
1188
1189         its_dev = its_device_find(dev, child);
1190
1191         KASSERT(its_dev != NULL,
1192             ("gicv3_its_release_msix: Releasing a MSI-X interrupt with "
1193              "no ITS device"));
1194         KASSERT(its_dev->lpis.lpi_busy > 0,
1195             ("gicv3_its_release_msix: Releasing more interrupts than "
1196              "were allocated: allocated %d", its_dev->lpis.lpi_busy));
1197         girq = (struct gicv3_its_irqsrc *)isrc;
1198         girq->gi_its_dev = NULL;
1199         its_dev->lpis.lpi_busy--;
1200
1201         if (its_dev->lpis.lpi_busy == 0)
1202                 its_device_release(dev, its_dev);
1203
1204         return (0);
1205 }
1206
1207 static int
1208 gicv3_its_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1209     uint64_t *addr, uint32_t *data)
1210 {
1211         struct gicv3_its_softc *sc;
1212         struct gicv3_its_irqsrc *girq;
1213
1214         sc = device_get_softc(dev);
1215         girq = (struct gicv3_its_irqsrc *)isrc;
1216
1217         /* Map the message to the given IRQ */
1218         its_cmd_mapti(dev, girq);
1219
1220         *addr = vtophys(rman_get_virtual(sc->sc_its_res)) + GITS_TRANSLATER;
1221         *data = girq->gi_irq - girq->gi_its_dev->lpis.lpi_base;
1222
1223         return (0);
1224 }
1225
1226 /*
1227  * Commands handling.
1228  */
1229
1230 static __inline void
1231 cmd_format_command(struct its_cmd *cmd, uint8_t cmd_type)
1232 {
1233         /* Command field: DW0 [7:0] */
1234         cmd->cmd_dword[0] &= htole64(~CMD_COMMAND_MASK);
1235         cmd->cmd_dword[0] |= htole64(cmd_type);
1236 }
1237
1238 static __inline void
1239 cmd_format_devid(struct its_cmd *cmd, uint32_t devid)
1240 {
1241         /* Device ID field: DW0 [63:32] */
1242         cmd->cmd_dword[0] &= htole64(~CMD_DEVID_MASK);
1243         cmd->cmd_dword[0] |= htole64((uint64_t)devid << CMD_DEVID_SHIFT);
1244 }
1245
1246 static __inline void
1247 cmd_format_size(struct its_cmd *cmd, uint16_t size)
1248 {
1249         /* Size field: DW1 [4:0] */
1250         cmd->cmd_dword[1] &= htole64(~CMD_SIZE_MASK);
1251         cmd->cmd_dword[1] |= htole64((size & CMD_SIZE_MASK));
1252 }
1253
1254 static __inline void
1255 cmd_format_id(struct its_cmd *cmd, uint32_t id)
1256 {
1257         /* ID field: DW1 [31:0] */
1258         cmd->cmd_dword[1] &= htole64(~CMD_ID_MASK);
1259         cmd->cmd_dword[1] |= htole64(id);
1260 }
1261
1262 static __inline void
1263 cmd_format_pid(struct its_cmd *cmd, uint32_t pid)
1264 {
1265         /* Physical ID field: DW1 [63:32] */
1266         cmd->cmd_dword[1] &= htole64(~CMD_PID_MASK);
1267         cmd->cmd_dword[1] |= htole64((uint64_t)pid << CMD_PID_SHIFT);
1268 }
1269
1270 static __inline void
1271 cmd_format_col(struct its_cmd *cmd, uint16_t col_id)
1272 {
1273         /* Collection field: DW2 [16:0] */
1274         cmd->cmd_dword[2] &= htole64(~CMD_COL_MASK);
1275         cmd->cmd_dword[2] |= htole64(col_id);
1276 }
1277
1278 static __inline void
1279 cmd_format_target(struct its_cmd *cmd, uint64_t target)
1280 {
1281         /* Target Address field: DW2 [47:16] */
1282         cmd->cmd_dword[2] &= htole64(~CMD_TARGET_MASK);
1283         cmd->cmd_dword[2] |= htole64(target & CMD_TARGET_MASK);
1284 }
1285
1286 static __inline void
1287 cmd_format_itt(struct its_cmd *cmd, uint64_t itt)
1288 {
1289         /* ITT Address field: DW2 [47:8] */
1290         cmd->cmd_dword[2] &= htole64(~CMD_ITT_MASK);
1291         cmd->cmd_dword[2] |= htole64(itt & CMD_ITT_MASK);
1292 }
1293
1294 static __inline void
1295 cmd_format_valid(struct its_cmd *cmd, uint8_t valid)
1296 {
1297         /* Valid field: DW2 [63] */
1298         cmd->cmd_dword[2] &= htole64(~CMD_VALID_MASK);
1299         cmd->cmd_dword[2] |= htole64((uint64_t)valid << CMD_VALID_SHIFT);
1300 }
1301
1302 static inline bool
1303 its_cmd_queue_full(struct gicv3_its_softc *sc)
1304 {
1305         size_t read_idx, next_write_idx;
1306
1307         /* Get the index of the next command */
1308         next_write_idx = (sc->sc_its_cmd_next_idx + 1) %
1309             (ITS_CMDQ_SIZE / sizeof(struct its_cmd));
1310         /* And the index of the current command being read */
1311         read_idx = gic_its_read_4(sc, GITS_CREADR) / sizeof(struct its_cmd);
1312
1313         /*
1314          * The queue is full when the write offset points
1315          * at the command before the current read offset.
1316          */
1317         return (next_write_idx == read_idx);
1318 }
1319
1320 static inline void
1321 its_cmd_sync(struct gicv3_its_softc *sc, struct its_cmd *cmd)
1322 {
1323
1324         if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0) {
1325                 /* Clean D-cache under command. */
1326                 cpu_dcache_wb_range((vm_offset_t)cmd, sizeof(*cmd));
1327         } else {
1328                 /* DSB inner shareable, store */
1329                 dsb(ishst);
1330         }
1331
1332 }
1333
1334 static inline uint64_t
1335 its_cmd_cwriter_offset(struct gicv3_its_softc *sc, struct its_cmd *cmd)
1336 {
1337         uint64_t off;
1338
1339         off = (cmd - sc->sc_its_cmd_base) * sizeof(*cmd);
1340
1341         return (off);
1342 }
1343
1344 static void
1345 its_cmd_wait_completion(device_t dev, struct its_cmd *cmd_first,
1346     struct its_cmd *cmd_last)
1347 {
1348         struct gicv3_its_softc *sc;
1349         uint64_t first, last, read;
1350         size_t us_left;
1351
1352         sc = device_get_softc(dev);
1353
1354         /*
1355          * XXX ARM64TODO: This is obviously a significant delay.
1356          * The reason for that is that currently the time frames for
1357          * the command to complete are not known.
1358          */
1359         us_left = 1000000;
1360
1361         first = its_cmd_cwriter_offset(sc, cmd_first);
1362         last = its_cmd_cwriter_offset(sc, cmd_last);
1363
1364         for (;;) {
1365                 read = gic_its_read_8(sc, GITS_CREADR);
1366                 if (first < last) {
1367                         if (read < first || read >= last)
1368                                 break;
1369                 } else if (read < first && read >= last)
1370                         break;
1371
1372                 if (us_left-- == 0) {
1373                         /* This means timeout */
1374                         device_printf(dev,
1375                             "Timeout while waiting for CMD completion.\n");
1376                         return;
1377                 }
1378                 DELAY(1);
1379         }
1380 }
1381
1382
1383 static struct its_cmd *
1384 its_cmd_alloc_locked(device_t dev)
1385 {
1386         struct gicv3_its_softc *sc;
1387         struct its_cmd *cmd;
1388         size_t us_left;
1389
1390         sc = device_get_softc(dev);
1391
1392         /*
1393          * XXX ARM64TODO: This is obviously a significant delay.
1394          * The reason for that is that currently the time frames for
1395          * the command to complete (and therefore free the descriptor)
1396          * are not known.
1397          */
1398         us_left = 1000000;
1399
1400         mtx_assert(&sc->sc_its_cmd_lock, MA_OWNED);
1401         while (its_cmd_queue_full(sc)) {
1402                 if (us_left-- == 0) {
1403                         /* Timeout while waiting for free command */
1404                         device_printf(dev,
1405                             "Timeout while waiting for free command\n");
1406                         return (NULL);
1407                 }
1408                 DELAY(1);
1409         }
1410
1411         cmd = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx];
1412         sc->sc_its_cmd_next_idx++;
1413         sc->sc_its_cmd_next_idx %= ITS_CMDQ_SIZE / sizeof(struct its_cmd);
1414
1415         return (cmd);
1416 }
1417
1418 static uint64_t
1419 its_cmd_prepare(struct its_cmd *cmd, struct its_cmd_desc *desc)
1420 {
1421         uint64_t target;
1422         uint8_t cmd_type;
1423         u_int size;
1424
1425         cmd_type = desc->cmd_type;
1426         target = ITS_TARGET_NONE;
1427
1428         switch (cmd_type) {
1429         case ITS_CMD_MOVI:      /* Move interrupt ID to another collection */
1430                 target = desc->cmd_desc_movi.col->col_target;
1431                 cmd_format_command(cmd, ITS_CMD_MOVI);
1432                 cmd_format_id(cmd, desc->cmd_desc_movi.id);
1433                 cmd_format_col(cmd, desc->cmd_desc_movi.col->col_id);
1434                 cmd_format_devid(cmd, desc->cmd_desc_movi.its_dev->devid);
1435                 break;
1436         case ITS_CMD_SYNC:      /* Wait for previous commands completion */
1437                 target = desc->cmd_desc_sync.col->col_target;
1438                 cmd_format_command(cmd, ITS_CMD_SYNC);
1439                 cmd_format_target(cmd, target);
1440                 break;
1441         case ITS_CMD_MAPD:      /* Assign ITT to device */
1442                 cmd_format_command(cmd, ITS_CMD_MAPD);
1443                 cmd_format_itt(cmd, vtophys(desc->cmd_desc_mapd.its_dev->itt));
1444                 /*
1445                  * Size describes number of bits to encode interrupt IDs
1446                  * supported by the device minus one.
1447                  * When V (valid) bit is zero, this field should be written
1448                  * as zero.
1449                  */
1450                 if (desc->cmd_desc_mapd.valid != 0) {
1451                         size = fls(desc->cmd_desc_mapd.its_dev->lpis.lpi_num);
1452                         size = MAX(1, size) - 1;
1453                 } else
1454                         size = 0;
1455
1456                 cmd_format_size(cmd, size);
1457                 cmd_format_devid(cmd, desc->cmd_desc_mapd.its_dev->devid);
1458                 cmd_format_valid(cmd, desc->cmd_desc_mapd.valid);
1459                 break;
1460         case ITS_CMD_MAPC:      /* Map collection to Re-Distributor */
1461                 target = desc->cmd_desc_mapc.col->col_target;
1462                 cmd_format_command(cmd, ITS_CMD_MAPC);
1463                 cmd_format_col(cmd, desc->cmd_desc_mapc.col->col_id);
1464                 cmd_format_valid(cmd, desc->cmd_desc_mapc.valid);
1465                 cmd_format_target(cmd, target);
1466                 break;
1467         case ITS_CMD_MAPTI:
1468                 target = desc->cmd_desc_mapvi.col->col_target;
1469                 cmd_format_command(cmd, ITS_CMD_MAPTI);
1470                 cmd_format_devid(cmd, desc->cmd_desc_mapvi.its_dev->devid);
1471                 cmd_format_id(cmd, desc->cmd_desc_mapvi.id);
1472                 cmd_format_pid(cmd, desc->cmd_desc_mapvi.pid);
1473                 cmd_format_col(cmd, desc->cmd_desc_mapvi.col->col_id);
1474                 break;
1475         case ITS_CMD_MAPI:
1476                 target = desc->cmd_desc_mapi.col->col_target;
1477                 cmd_format_command(cmd, ITS_CMD_MAPI);
1478                 cmd_format_devid(cmd, desc->cmd_desc_mapi.its_dev->devid);
1479                 cmd_format_id(cmd, desc->cmd_desc_mapi.pid);
1480                 cmd_format_col(cmd, desc->cmd_desc_mapi.col->col_id);
1481                 break;
1482         case ITS_CMD_INV:
1483                 target = desc->cmd_desc_inv.col->col_target;
1484                 cmd_format_command(cmd, ITS_CMD_INV);
1485                 cmd_format_devid(cmd, desc->cmd_desc_inv.its_dev->devid);
1486                 cmd_format_id(cmd, desc->cmd_desc_inv.pid);
1487                 break;
1488         case ITS_CMD_INVALL:
1489                 cmd_format_command(cmd, ITS_CMD_INVALL);
1490                 cmd_format_col(cmd, desc->cmd_desc_invall.col->col_id);
1491                 break;
1492         default:
1493                 panic("its_cmd_prepare: Invalid command: %x", cmd_type);
1494         }
1495
1496         return (target);
1497 }
1498
1499 static int
1500 its_cmd_send(device_t dev, struct its_cmd_desc *desc)
1501 {
1502         struct gicv3_its_softc *sc;
1503         struct its_cmd *cmd, *cmd_sync, *cmd_write;
1504         struct its_col col_sync;
1505         struct its_cmd_desc desc_sync;
1506         uint64_t target, cwriter;
1507
1508         sc = device_get_softc(dev);
1509         mtx_lock_spin(&sc->sc_its_cmd_lock);
1510         cmd = its_cmd_alloc_locked(dev);
1511         if (cmd == NULL) {
1512                 device_printf(dev, "could not allocate ITS command\n");
1513                 mtx_unlock_spin(&sc->sc_its_cmd_lock);
1514                 return (EBUSY);
1515         }
1516
1517         target = its_cmd_prepare(cmd, desc);
1518         its_cmd_sync(sc, cmd);
1519
1520         if (target != ITS_TARGET_NONE) {
1521                 cmd_sync = its_cmd_alloc_locked(dev);
1522                 if (cmd_sync != NULL) {
1523                         desc_sync.cmd_type = ITS_CMD_SYNC;
1524                         col_sync.col_target = target;
1525                         desc_sync.cmd_desc_sync.col = &col_sync;
1526                         its_cmd_prepare(cmd_sync, &desc_sync);
1527                         its_cmd_sync(sc, cmd_sync);
1528                 }
1529         }
1530
1531         /* Update GITS_CWRITER */
1532         cwriter = sc->sc_its_cmd_next_idx * sizeof(struct its_cmd);
1533         gic_its_write_8(sc, GITS_CWRITER, cwriter);
1534         cmd_write = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx];
1535         mtx_unlock_spin(&sc->sc_its_cmd_lock);
1536
1537         its_cmd_wait_completion(dev, cmd, cmd_write);
1538
1539         return (0);
1540 }
1541
1542 /* Handlers to send commands */
1543 static void
1544 its_cmd_movi(device_t dev, struct gicv3_its_irqsrc *girq)
1545 {
1546         struct gicv3_its_softc *sc;
1547         struct its_cmd_desc desc;
1548         struct its_col *col;
1549
1550         sc = device_get_softc(dev);
1551         col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1];
1552
1553         desc.cmd_type = ITS_CMD_MOVI;
1554         desc.cmd_desc_movi.its_dev = girq->gi_its_dev;
1555         desc.cmd_desc_movi.col = col;
1556         desc.cmd_desc_movi.id = girq->gi_irq - girq->gi_its_dev->lpis.lpi_base;
1557
1558         its_cmd_send(dev, &desc);
1559 }
1560
1561 static void
1562 its_cmd_mapc(device_t dev, struct its_col *col, uint8_t valid)
1563 {
1564         struct its_cmd_desc desc;
1565
1566         desc.cmd_type = ITS_CMD_MAPC;
1567         desc.cmd_desc_mapc.col = col;
1568         /*
1569          * Valid bit set - map the collection.
1570          * Valid bit cleared - unmap the collection.
1571          */
1572         desc.cmd_desc_mapc.valid = valid;
1573
1574         its_cmd_send(dev, &desc);
1575 }
1576
1577 static void
1578 its_cmd_mapti(device_t dev, struct gicv3_its_irqsrc *girq)
1579 {
1580         struct gicv3_its_softc *sc;
1581         struct its_cmd_desc desc;
1582         struct its_col *col;
1583         u_int col_id;
1584
1585         sc = device_get_softc(dev);
1586
1587         col_id = CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1;
1588         col = sc->sc_its_cols[col_id];
1589
1590         desc.cmd_type = ITS_CMD_MAPTI;
1591         desc.cmd_desc_mapvi.its_dev = girq->gi_its_dev;
1592         desc.cmd_desc_mapvi.col = col;
1593         /* The EventID sent to the device */
1594         desc.cmd_desc_mapvi.id = girq->gi_irq - girq->gi_its_dev->lpis.lpi_base;
1595         /* The physical interrupt presented to softeware */
1596         desc.cmd_desc_mapvi.pid = girq->gi_irq + sc->sc_irq_base;
1597
1598         its_cmd_send(dev, &desc);
1599 }
1600
1601 static void
1602 its_cmd_mapd(device_t dev, struct its_dev *its_dev, uint8_t valid)
1603 {
1604         struct its_cmd_desc desc;
1605
1606         desc.cmd_type = ITS_CMD_MAPD;
1607         desc.cmd_desc_mapd.its_dev = its_dev;
1608         desc.cmd_desc_mapd.valid = valid;
1609
1610         its_cmd_send(dev, &desc);
1611 }
1612
1613 static void
1614 its_cmd_inv(device_t dev, struct its_dev *its_dev,
1615     struct gicv3_its_irqsrc *girq)
1616 {
1617         struct gicv3_its_softc *sc;
1618         struct its_cmd_desc desc;
1619         struct its_col *col;
1620
1621         sc = device_get_softc(dev);
1622         col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1];
1623
1624         desc.cmd_type = ITS_CMD_INV;
1625         /* The EventID sent to the device */
1626         desc.cmd_desc_inv.pid = girq->gi_irq - its_dev->lpis.lpi_base;
1627         desc.cmd_desc_inv.its_dev = its_dev;
1628         desc.cmd_desc_inv.col = col;
1629
1630         its_cmd_send(dev, &desc);
1631 }
1632
1633 static void
1634 its_cmd_invall(device_t dev, struct its_col *col)
1635 {
1636         struct its_cmd_desc desc;
1637
1638         desc.cmd_type = ITS_CMD_INVALL;
1639         desc.cmd_desc_invall.col = col;
1640
1641         its_cmd_send(dev, &desc);
1642 }
1643
1644 #ifdef FDT
1645 static device_probe_t gicv3_its_fdt_probe;
1646 static device_attach_t gicv3_its_fdt_attach;
1647
1648 static device_method_t gicv3_its_fdt_methods[] = {
1649         /* Device interface */
1650         DEVMETHOD(device_probe,         gicv3_its_fdt_probe),
1651         DEVMETHOD(device_attach,        gicv3_its_fdt_attach),
1652
1653         /* End */
1654         DEVMETHOD_END
1655 };
1656
1657 #define its_baseclasses its_fdt_baseclasses
1658 DEFINE_CLASS_1(its, gicv3_its_fdt_driver, gicv3_its_fdt_methods,
1659     sizeof(struct gicv3_its_softc), gicv3_its_driver);
1660 #undef its_baseclasses
1661 static devclass_t gicv3_its_fdt_devclass;
1662
1663 EARLY_DRIVER_MODULE(its_fdt, gic, gicv3_its_fdt_driver,
1664     gicv3_its_fdt_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
1665
1666 static int
1667 gicv3_its_fdt_probe(device_t dev)
1668 {
1669
1670         if (!ofw_bus_status_okay(dev))
1671                 return (ENXIO);
1672
1673         if (!ofw_bus_is_compatible(dev, "arm,gic-v3-its"))
1674                 return (ENXIO);
1675
1676         device_set_desc(dev, "ARM GIC Interrupt Translation Service");
1677         return (BUS_PROBE_DEFAULT);
1678 }
1679
1680 static int
1681 gicv3_its_fdt_attach(device_t dev)
1682 {
1683         struct gicv3_its_softc *sc;
1684         phandle_t xref;
1685         int err;
1686
1687         sc = device_get_softc(dev);
1688         err = gicv3_its_attach(dev);
1689         if (err != 0)
1690                 return (err);
1691
1692         /* Register this device as a interrupt controller */
1693         xref = OF_xref_from_node(ofw_bus_get_node(dev));
1694         sc->sc_pic = intr_pic_register(dev, xref);
1695         intr_pic_add_handler(device_get_parent(dev), sc->sc_pic,
1696             gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length);
1697
1698         /* Register this device to handle MSI interrupts */
1699         intr_msi_register(dev, xref);
1700
1701         return (0);
1702 }
1703 #endif
1704
1705 #ifdef DEV_ACPI
1706 static device_probe_t gicv3_its_acpi_probe;
1707 static device_attach_t gicv3_its_acpi_attach;
1708
1709 static device_method_t gicv3_its_acpi_methods[] = {
1710         /* Device interface */
1711         DEVMETHOD(device_probe,         gicv3_its_acpi_probe),
1712         DEVMETHOD(device_attach,        gicv3_its_acpi_attach),
1713
1714         /* End */
1715         DEVMETHOD_END
1716 };
1717
1718 #define its_baseclasses its_acpi_baseclasses
1719 DEFINE_CLASS_1(its, gicv3_its_acpi_driver, gicv3_its_acpi_methods,
1720     sizeof(struct gicv3_its_softc), gicv3_its_driver);
1721 #undef its_baseclasses
1722 static devclass_t gicv3_its_acpi_devclass;
1723
1724 EARLY_DRIVER_MODULE(its_acpi, gic, gicv3_its_acpi_driver,
1725     gicv3_its_acpi_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
1726
1727 static int
1728 gicv3_its_acpi_probe(device_t dev)
1729 {
1730
1731         if (gic_get_bus(dev) != GIC_BUS_ACPI)
1732                 return (EINVAL);
1733
1734         if (gic_get_hw_rev(dev) < 3)
1735                 return (EINVAL);
1736
1737         device_set_desc(dev, "ARM GIC Interrupt Translation Service");
1738         return (BUS_PROBE_DEFAULT);
1739 }
1740
1741 static int
1742 gicv3_its_acpi_attach(device_t dev)
1743 {
1744         struct gicv3_its_softc *sc;
1745         struct gic_v3_devinfo *di;
1746         int err;
1747
1748         sc = device_get_softc(dev);
1749         err = gicv3_its_attach(dev);
1750         if (err != 0)
1751                 return (err);
1752
1753         di = device_get_ivars(dev);
1754         sc->sc_pic = intr_pic_register(dev, di->msi_xref);
1755         intr_pic_add_handler(device_get_parent(dev), sc->sc_pic,
1756             gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length);
1757
1758         /* Register this device to handle MSI interrupts */
1759         intr_msi_register(dev, di->msi_xref);
1760
1761         return (0);
1762 }
1763 #endif