]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm64/arm64/gicv3_its.c
Merge r357339 from the clang1000-import branch:
[FreeBSD/FreeBSD.git] / sys / arm64 / arm64 / gicv3_its.c
1 /*-
2  * Copyright (c) 2015-2016 The FreeBSD Foundation
3  * All rights reserved.
4  *
5  * This software was developed by Andrew Turner under
6  * the sponsorship of the FreeBSD Foundation.
7  *
8  * This software was developed by Semihalf under
9  * the sponsorship of the FreeBSD Foundation.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32
33 #include "opt_acpi.h"
34 #include "opt_platform.h"
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/cpuset.h>
43 #include <sys/endian.h>
44 #include <sys/kernel.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/module.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/queue.h>
51 #include <sys/rman.h>
52 #include <sys/smp.h>
53 #include <sys/vmem.h>
54
55 #include <vm/vm.h>
56 #include <vm/pmap.h>
57
58 #include <machine/bus.h>
59 #include <machine/intr.h>
60
61 #include <arm/arm/gic_common.h>
62 #include <arm64/arm64/gic_v3_reg.h>
63 #include <arm64/arm64/gic_v3_var.h>
64
65 #ifdef FDT
66 #include <dev/ofw/openfirm.h>
67 #include <dev/ofw/ofw_bus.h>
68 #include <dev/ofw/ofw_bus_subr.h>
69 #endif
70 #include <dev/pci/pcireg.h>
71 #include <dev/pci/pcivar.h>
72
73 #include "pcib_if.h"
74 #include "pic_if.h"
75 #include "msi_if.h"
76
77 MALLOC_DEFINE(M_GICV3_ITS, "GICv3 ITS",
78     "ARM GICv3 Interrupt Translation Service");
79
80 #define LPI_NIRQS               (64 * 1024)
81
82 /* The size and alignment of the command circular buffer */
83 #define ITS_CMDQ_SIZE           (64 * 1024)     /* Must be a multiple of 4K */
84 #define ITS_CMDQ_ALIGN          (64 * 1024)
85
86 #define LPI_CONFTAB_SIZE        LPI_NIRQS
87 #define LPI_CONFTAB_ALIGN       (64 * 1024)
88 #define LPI_CONFTAB_MAX_ADDR    ((1ul << 48) - 1) /* We need a 47 bit PA */
89
90 /* 1 bit per SPI, PPI, and SGI (8k), and 1 bit per LPI (LPI_CONFTAB_SIZE) */
91 #define LPI_PENDTAB_SIZE        ((LPI_NIRQS + GIC_FIRST_LPI) / 8)
92 #define LPI_PENDTAB_ALIGN       (64 * 1024)
93 #define LPI_PENDTAB_MAX_ADDR    ((1ul << 48) - 1) /* We need a 47 bit PA */
94
95 #define LPI_INT_TRANS_TAB_ALIGN 256
96 #define LPI_INT_TRANS_TAB_MAX_ADDR ((1ul << 48) - 1)
97
98 /* ITS commands encoding */
99 #define ITS_CMD_MOVI            (0x01)
100 #define ITS_CMD_SYNC            (0x05)
101 #define ITS_CMD_MAPD            (0x08)
102 #define ITS_CMD_MAPC            (0x09)
103 #define ITS_CMD_MAPTI           (0x0a)
104 #define ITS_CMD_MAPI            (0x0b)
105 #define ITS_CMD_INV             (0x0c)
106 #define ITS_CMD_INVALL          (0x0d)
107 /* Command */
108 #define CMD_COMMAND_MASK        (0xFFUL)
109 /* PCI device ID */
110 #define CMD_DEVID_SHIFT         (32)
111 #define CMD_DEVID_MASK          (0xFFFFFFFFUL << CMD_DEVID_SHIFT)
112 /* Size of IRQ ID bitfield */
113 #define CMD_SIZE_MASK           (0xFFUL)
114 /* Virtual LPI ID */
115 #define CMD_ID_MASK             (0xFFFFFFFFUL)
116 /* Physical LPI ID */
117 #define CMD_PID_SHIFT           (32)
118 #define CMD_PID_MASK            (0xFFFFFFFFUL << CMD_PID_SHIFT)
119 /* Collection */
120 #define CMD_COL_MASK            (0xFFFFUL)
121 /* Target (CPU or Re-Distributor) */
122 #define CMD_TARGET_SHIFT        (16)
123 #define CMD_TARGET_MASK         (0xFFFFFFFFUL << CMD_TARGET_SHIFT)
124 /* Interrupt Translation Table address */
125 #define CMD_ITT_MASK            (0xFFFFFFFFFF00UL)
126 /* Valid command bit */
127 #define CMD_VALID_SHIFT         (63)
128 #define CMD_VALID_MASK          (1UL << CMD_VALID_SHIFT)
129
130 #define ITS_TARGET_NONE         0xFBADBEEF
131
132 /* LPI chunk owned by ITS device */
133 struct lpi_chunk {
134         u_int   lpi_base;
135         u_int   lpi_free;       /* First free LPI in set */
136         u_int   lpi_num;        /* Total number of LPIs in chunk */
137         u_int   lpi_busy;       /* Number of busy LPIs in chink */
138 };
139
140 /* ITS device */
141 struct its_dev {
142         TAILQ_ENTRY(its_dev)    entry;
143         /* PCI device */
144         device_t                pci_dev;
145         /* Device ID (i.e. PCI device ID) */
146         uint32_t                devid;
147         /* List of assigned LPIs */
148         struct lpi_chunk        lpis;
149         /* Virtual address of ITT */
150         vm_offset_t             itt;
151         size_t                  itt_size;
152 };
153
154 /*
155  * ITS command descriptor.
156  * Idea for command description passing taken from Linux.
157  */
158 struct its_cmd_desc {
159         uint8_t cmd_type;
160
161         union {
162                 struct {
163                         struct its_dev *its_dev;
164                         struct its_col *col;
165                         uint32_t id;
166                 } cmd_desc_movi;
167
168                 struct {
169                         struct its_col *col;
170                 } cmd_desc_sync;
171
172                 struct {
173                         struct its_col *col;
174                         uint8_t valid;
175                 } cmd_desc_mapc;
176
177                 struct {
178                         struct its_dev *its_dev;
179                         struct its_col *col;
180                         uint32_t pid;
181                         uint32_t id;
182                 } cmd_desc_mapvi;
183
184                 struct {
185                         struct its_dev *its_dev;
186                         struct its_col *col;
187                         uint32_t pid;
188                 } cmd_desc_mapi;
189
190                 struct {
191                         struct its_dev *its_dev;
192                         uint8_t valid;
193                 } cmd_desc_mapd;
194
195                 struct {
196                         struct its_dev *its_dev;
197                         struct its_col *col;
198                         uint32_t pid;
199                 } cmd_desc_inv;
200
201                 struct {
202                         struct its_col *col;
203                 } cmd_desc_invall;
204         };
205 };
206
207 /* ITS command. Each command is 32 bytes long */
208 struct its_cmd {
209         uint64_t        cmd_dword[4];   /* ITS command double word */
210 };
211
212 /* An ITS private table */
213 struct its_ptable {
214         vm_offset_t     ptab_vaddr;
215         unsigned long   ptab_size;
216 };
217
218 /* ITS collection description. */
219 struct its_col {
220         uint64_t        col_target;     /* Target Re-Distributor */
221         uint64_t        col_id;         /* Collection ID */
222 };
223
224 struct gicv3_its_irqsrc {
225         struct intr_irqsrc      gi_isrc;
226         u_int                   gi_irq;
227         u_int                   gi_lpi;
228         struct its_dev          *gi_its_dev;
229 };
230
231 struct gicv3_its_softc {
232         struct intr_pic *sc_pic;
233         struct resource *sc_its_res;
234
235         cpuset_t        sc_cpus;
236         u_int           gic_irq_cpu;
237
238         struct its_ptable sc_its_ptab[GITS_BASER_NUM];
239         struct its_col *sc_its_cols[MAXCPU];    /* Per-CPU collections */
240
241         /*
242          * TODO: We should get these from the parent as we only want a
243          * single copy of each across the interrupt controller.
244          */
245         uint8_t         *sc_conf_base;
246         vm_offset_t sc_pend_base[MAXCPU];
247
248         /* Command handling */
249         struct mtx sc_its_cmd_lock;
250         struct its_cmd *sc_its_cmd_base; /* Command circular buffer address */
251         size_t sc_its_cmd_next_idx;
252
253         vmem_t *sc_irq_alloc;
254         struct gicv3_its_irqsrc *sc_irqs;
255         u_int   sc_irq_base;
256         u_int   sc_irq_length;
257
258         struct mtx sc_its_dev_lock;
259         TAILQ_HEAD(its_dev_list, its_dev) sc_its_dev_list;
260
261 #define ITS_FLAGS_CMDQ_FLUSH            0x00000001
262 #define ITS_FLAGS_LPI_CONF_FLUSH        0x00000002
263 #define ITS_FLAGS_ERRATA_CAVIUM_22375   0x00000004
264         u_int sc_its_flags;
265 };
266
267 static void *conf_base;
268
269 typedef void (its_quirk_func_t)(device_t);
270 static its_quirk_func_t its_quirk_cavium_22375;
271
272 static const struct {
273         const char *desc;
274         uint32_t iidr;
275         uint32_t iidr_mask;
276         its_quirk_func_t *func;
277 } its_quirks[] = {
278         {
279                 /* Cavium ThunderX Pass 1.x */
280                 .desc = "Cavium ThunderX errata: 22375, 24313",
281                 .iidr = GITS_IIDR_RAW(GITS_IIDR_IMPL_CAVIUM,
282                     GITS_IIDR_PROD_THUNDER, GITS_IIDR_VAR_THUNDER_1, 0),
283                 .iidr_mask = ~GITS_IIDR_REVISION_MASK,
284                 .func = its_quirk_cavium_22375,
285         },
286 };
287
288 #define gic_its_read_4(sc, reg)                 \
289     bus_read_4((sc)->sc_its_res, (reg))
290 #define gic_its_read_8(sc, reg)                 \
291     bus_read_8((sc)->sc_its_res, (reg))
292
293 #define gic_its_write_4(sc, reg, val)           \
294     bus_write_4((sc)->sc_its_res, (reg), (val))
295 #define gic_its_write_8(sc, reg, val)           \
296     bus_write_8((sc)->sc_its_res, (reg), (val))
297
298 static device_attach_t gicv3_its_attach;
299 static device_detach_t gicv3_its_detach;
300
301 static pic_disable_intr_t gicv3_its_disable_intr;
302 static pic_enable_intr_t gicv3_its_enable_intr;
303 static pic_map_intr_t gicv3_its_map_intr;
304 static pic_setup_intr_t gicv3_its_setup_intr;
305 static pic_post_filter_t gicv3_its_post_filter;
306 static pic_post_ithread_t gicv3_its_post_ithread;
307 static pic_pre_ithread_t gicv3_its_pre_ithread;
308 static pic_bind_intr_t gicv3_its_bind_intr;
309 #ifdef SMP
310 static pic_init_secondary_t gicv3_its_init_secondary;
311 #endif
312 static msi_alloc_msi_t gicv3_its_alloc_msi;
313 static msi_release_msi_t gicv3_its_release_msi;
314 static msi_alloc_msix_t gicv3_its_alloc_msix;
315 static msi_release_msix_t gicv3_its_release_msix;
316 static msi_map_msi_t gicv3_its_map_msi;
317
318 static void its_cmd_movi(device_t, struct gicv3_its_irqsrc *);
319 static void its_cmd_mapc(device_t, struct its_col *, uint8_t);
320 static void its_cmd_mapti(device_t, struct gicv3_its_irqsrc *);
321 static void its_cmd_mapd(device_t, struct its_dev *, uint8_t);
322 static void its_cmd_inv(device_t, struct its_dev *, struct gicv3_its_irqsrc *);
323 static void its_cmd_invall(device_t, struct its_col *);
324
325 static device_method_t gicv3_its_methods[] = {
326         /* Device interface */
327         DEVMETHOD(device_detach,        gicv3_its_detach),
328
329         /* Interrupt controller interface */
330         DEVMETHOD(pic_disable_intr,     gicv3_its_disable_intr),
331         DEVMETHOD(pic_enable_intr,      gicv3_its_enable_intr),
332         DEVMETHOD(pic_map_intr,         gicv3_its_map_intr),
333         DEVMETHOD(pic_setup_intr,       gicv3_its_setup_intr),
334         DEVMETHOD(pic_post_filter,      gicv3_its_post_filter),
335         DEVMETHOD(pic_post_ithread,     gicv3_its_post_ithread),
336         DEVMETHOD(pic_pre_ithread,      gicv3_its_pre_ithread),
337 #ifdef SMP
338         DEVMETHOD(pic_bind_intr,        gicv3_its_bind_intr),
339         DEVMETHOD(pic_init_secondary,   gicv3_its_init_secondary),
340 #endif
341
342         /* MSI/MSI-X */
343         DEVMETHOD(msi_alloc_msi,        gicv3_its_alloc_msi),
344         DEVMETHOD(msi_release_msi,      gicv3_its_release_msi),
345         DEVMETHOD(msi_alloc_msix,       gicv3_its_alloc_msix),
346         DEVMETHOD(msi_release_msix,     gicv3_its_release_msix),
347         DEVMETHOD(msi_map_msi,          gicv3_its_map_msi),
348
349         /* End */
350         DEVMETHOD_END
351 };
352
353 static DEFINE_CLASS_0(gic, gicv3_its_driver, gicv3_its_methods,
354     sizeof(struct gicv3_its_softc));
355
356 static void
357 gicv3_its_cmdq_init(struct gicv3_its_softc *sc)
358 {
359         vm_paddr_t cmd_paddr;
360         uint64_t reg, tmp;
361
362         /* Set up the command circular buffer */
363         sc->sc_its_cmd_base = contigmalloc(ITS_CMDQ_SIZE, M_GICV3_ITS,
364             M_WAITOK | M_ZERO, 0, (1ul << 48) - 1, ITS_CMDQ_ALIGN, 0);
365         sc->sc_its_cmd_next_idx = 0;
366
367         cmd_paddr = vtophys(sc->sc_its_cmd_base);
368
369         /* Set the base of the command buffer */
370         reg = GITS_CBASER_VALID |
371             (GITS_CBASER_CACHE_NIWAWB << GITS_CBASER_CACHE_SHIFT) |
372             cmd_paddr | (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT) |
373             (ITS_CMDQ_SIZE / 4096 - 1);
374         gic_its_write_8(sc, GITS_CBASER, reg);
375
376         /* Read back to check for fixed value fields */
377         tmp = gic_its_read_8(sc, GITS_CBASER);
378
379         if ((tmp & GITS_CBASER_SHARE_MASK) !=
380             (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT)) {
381                 /* Check if the hardware reported non-shareable */
382                 if ((tmp & GITS_CBASER_SHARE_MASK) ==
383                     (GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT)) {
384                         /* If so remove the cache attribute */
385                         reg &= ~GITS_CBASER_CACHE_MASK;
386                         reg &= ~GITS_CBASER_SHARE_MASK;
387                         /* Set to Non-cacheable, Non-shareable */
388                         reg |= GITS_CBASER_CACHE_NIN << GITS_CBASER_CACHE_SHIFT;
389                         reg |= GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT;
390
391                         gic_its_write_8(sc, GITS_CBASER, reg);
392                 }
393
394                 /* The command queue has to be flushed after each command */
395                 sc->sc_its_flags |= ITS_FLAGS_CMDQ_FLUSH;
396         }
397
398         /* Get the next command from the start of the buffer */
399         gic_its_write_8(sc, GITS_CWRITER, 0x0);
400 }
401
402 static int
403 gicv3_its_table_init(device_t dev, struct gicv3_its_softc *sc)
404 {
405         vm_offset_t table;
406         vm_paddr_t paddr;
407         uint64_t cache, reg, share, tmp, type;
408         size_t esize, its_tbl_size, nidents, nitspages, npages;
409         int i, page_size;
410         int devbits;
411
412         if ((sc->sc_its_flags & ITS_FLAGS_ERRATA_CAVIUM_22375) != 0) {
413                 /*
414                  * GITS_TYPER[17:13] of ThunderX reports that device IDs
415                  * are to be 21 bits in length. The entry size of the ITS
416                  * table can be read from GITS_BASERn[52:48] and on ThunderX
417                  * is supposed to be 8 bytes in length (for device table).
418                  * Finally the page size that is to be used by ITS to access
419                  * this table will be set to 64KB.
420                  *
421                  * This gives 0x200000 entries of size 0x8 bytes covered by
422                  * 256 pages each of which 64KB in size. The number of pages
423                  * (minus 1) should then be written to GITS_BASERn[7:0]. In
424                  * that case this value would be 0xFF but on ThunderX the
425                  * maximum value that HW accepts is 0xFD.
426                  *
427                  * Set an arbitrary number of device ID bits to 20 in order
428                  * to limit the number of entries in ITS device table to
429                  * 0x100000 and the table size to 8MB.
430                  */
431                 devbits = 20;
432                 cache = 0;
433         } else {
434                 devbits = GITS_TYPER_DEVB(gic_its_read_8(sc, GITS_TYPER));
435                 cache = GITS_BASER_CACHE_WAWB;
436         }
437         share = GITS_BASER_SHARE_IS;
438         page_size = PAGE_SIZE_64K;
439
440         for (i = 0; i < GITS_BASER_NUM; i++) {
441                 reg = gic_its_read_8(sc, GITS_BASER(i));
442                 /* The type of table */
443                 type = GITS_BASER_TYPE(reg);
444                 /* The table entry size */
445                 esize = GITS_BASER_ESIZE(reg);
446
447                 switch(type) {
448                 case GITS_BASER_TYPE_DEV:
449                         nidents = (1 << devbits);
450                         its_tbl_size = esize * nidents;
451                         its_tbl_size = roundup2(its_tbl_size, PAGE_SIZE_64K);
452                         break;
453                 case GITS_BASER_TYPE_VP:
454                 case GITS_BASER_TYPE_PP: /* Undocumented? */
455                 case GITS_BASER_TYPE_IC:
456                         its_tbl_size = page_size;
457                         break;
458                 default:
459                         continue;
460                 }
461                 npages = howmany(its_tbl_size, PAGE_SIZE);
462
463                 /* Allocate the table */
464                 table = (vm_offset_t)contigmalloc(npages * PAGE_SIZE,
465                     M_GICV3_ITS, M_WAITOK | M_ZERO, 0, (1ul << 48) - 1,
466                     PAGE_SIZE_64K, 0);
467
468                 sc->sc_its_ptab[i].ptab_vaddr = table;
469                 sc->sc_its_ptab[i].ptab_size = npages * PAGE_SIZE;
470
471                 paddr = vtophys(table);
472
473                 while (1) {
474                         nitspages = howmany(its_tbl_size, page_size);
475
476                         /* Clear the fields we will be setting */
477                         reg &= ~(GITS_BASER_VALID |
478                             GITS_BASER_CACHE_MASK | GITS_BASER_TYPE_MASK |
479                             GITS_BASER_ESIZE_MASK | GITS_BASER_PA_MASK |
480                             GITS_BASER_SHARE_MASK | GITS_BASER_PSZ_MASK |
481                             GITS_BASER_SIZE_MASK);
482                         /* Set the new values */
483                         reg |= GITS_BASER_VALID |
484                             (cache << GITS_BASER_CACHE_SHIFT) |
485                             (type << GITS_BASER_TYPE_SHIFT) |
486                             ((esize - 1) << GITS_BASER_ESIZE_SHIFT) |
487                             paddr | (share << GITS_BASER_SHARE_SHIFT) |
488                             (nitspages - 1);
489
490                         switch (page_size) {
491                         case PAGE_SIZE:         /* 4KB */
492                                 reg |=
493                                     GITS_BASER_PSZ_4K << GITS_BASER_PSZ_SHIFT;
494                                 break;
495                         case PAGE_SIZE_16K:     /* 16KB */
496                                 reg |=
497                                     GITS_BASER_PSZ_16K << GITS_BASER_PSZ_SHIFT;
498                                 break;
499                         case PAGE_SIZE_64K:     /* 64KB */
500                                 reg |=
501                                     GITS_BASER_PSZ_64K << GITS_BASER_PSZ_SHIFT;
502                                 break;
503                         }
504
505                         gic_its_write_8(sc, GITS_BASER(i), reg);
506
507                         /* Read back to check */
508                         tmp = gic_its_read_8(sc, GITS_BASER(i));
509
510                         /* Do the shareability masks line up? */
511                         if ((tmp & GITS_BASER_SHARE_MASK) !=
512                             (reg & GITS_BASER_SHARE_MASK)) {
513                                 share = (tmp & GITS_BASER_SHARE_MASK) >>
514                                     GITS_BASER_SHARE_SHIFT;
515                                 continue;
516                         }
517
518                         if ((tmp & GITS_BASER_PSZ_MASK) !=
519                             (reg & GITS_BASER_PSZ_MASK)) {
520                                 switch (page_size) {
521                                 case PAGE_SIZE_16K:
522                                         page_size = PAGE_SIZE;
523                                         continue;
524                                 case PAGE_SIZE_64K:
525                                         page_size = PAGE_SIZE_16K;
526                                         continue;
527                                 }
528                         }
529
530                         if (tmp != reg) {
531                                 device_printf(dev, "GITS_BASER%d: "
532                                     "unable to be updated: %lx != %lx\n",
533                                     i, reg, tmp);
534                                 return (ENXIO);
535                         }
536
537                         /* We should have made all needed changes */
538                         break;
539                 }
540         }
541
542         return (0);
543 }
544
545 static void
546 gicv3_its_conftable_init(struct gicv3_its_softc *sc)
547 {
548         void *conf_table;
549
550         conf_table = (void *)atomic_load_ptr((uintptr_t *)&conf_base);
551         if (conf_table == NULL) {
552                 conf_table = contigmalloc(LPI_CONFTAB_SIZE,
553                     M_GICV3_ITS, M_WAITOK, 0, LPI_CONFTAB_MAX_ADDR,
554                     LPI_CONFTAB_ALIGN, 0);
555
556                 if (atomic_cmpset_ptr((uintptr_t *)&conf_base,
557                     (uintptr_t)NULL, (uintptr_t)conf_table) == 0) {
558                         contigfree(conf_table, LPI_CONFTAB_SIZE, M_GICV3_ITS);
559                         conf_table =
560                             (void *)atomic_load_ptr((uintptr_t *)&conf_base);
561                 }
562         }
563         sc->sc_conf_base = conf_table;
564
565         /* Set the default configuration */
566         memset(sc->sc_conf_base, GIC_PRIORITY_MAX | LPI_CONF_GROUP1,
567             LPI_CONFTAB_SIZE);
568
569         /* Flush the table to memory */
570         cpu_dcache_wb_range((vm_offset_t)sc->sc_conf_base, LPI_CONFTAB_SIZE);
571 }
572
573 static void
574 gicv3_its_pendtables_init(struct gicv3_its_softc *sc)
575 {
576         int i;
577
578         for (i = 0; i <= mp_maxid; i++) {
579                 if (CPU_ISSET(i, &sc->sc_cpus) == 0)
580                         continue;
581
582                 sc->sc_pend_base[i] = (vm_offset_t)contigmalloc(
583                     LPI_PENDTAB_SIZE, M_GICV3_ITS, M_WAITOK | M_ZERO,
584                     0, LPI_PENDTAB_MAX_ADDR, LPI_PENDTAB_ALIGN, 0);
585
586                 /* Flush so the ITS can see the memory */
587                 cpu_dcache_wb_range((vm_offset_t)sc->sc_pend_base[i],
588                     LPI_PENDTAB_SIZE);
589         }
590 }
591
592 static void
593 its_init_cpu_lpi(device_t dev, struct gicv3_its_softc *sc)
594 {
595         device_t gicv3;
596         uint64_t xbaser, tmp;
597         uint32_t ctlr;
598         u_int cpuid;
599
600         gicv3 = device_get_parent(dev);
601         cpuid = PCPU_GET(cpuid);
602
603         /* Disable LPIs */
604         ctlr = gic_r_read_4(gicv3, GICR_CTLR);
605         ctlr &= ~GICR_CTLR_LPI_ENABLE;
606         gic_r_write_4(gicv3, GICR_CTLR, ctlr);
607
608         /* Make sure changes are observable my the GIC */
609         dsb(sy);
610
611         /*
612          * Set the redistributor base
613          */
614         xbaser = vtophys(sc->sc_conf_base) |
615             (GICR_PROPBASER_SHARE_IS << GICR_PROPBASER_SHARE_SHIFT) |
616             (GICR_PROPBASER_CACHE_NIWAWB << GICR_PROPBASER_CACHE_SHIFT) |
617             (flsl(LPI_CONFTAB_SIZE | GIC_FIRST_LPI) - 1);
618         gic_r_write_8(gicv3, GICR_PROPBASER, xbaser);
619
620         /* Check the cache attributes we set */
621         tmp = gic_r_read_8(gicv3, GICR_PROPBASER);
622
623         if ((tmp & GICR_PROPBASER_SHARE_MASK) !=
624             (xbaser & GICR_PROPBASER_SHARE_MASK)) {
625                 if ((tmp & GICR_PROPBASER_SHARE_MASK) ==
626                     (GICR_PROPBASER_SHARE_NS << GICR_PROPBASER_SHARE_SHIFT)) {
627                         /* We need to mark as non-cacheable */
628                         xbaser &= ~(GICR_PROPBASER_SHARE_MASK |
629                             GICR_PROPBASER_CACHE_MASK);
630                         /* Non-cacheable */
631                         xbaser |= GICR_PROPBASER_CACHE_NIN <<
632                             GICR_PROPBASER_CACHE_SHIFT;
633                         /* Non-sareable */
634                         xbaser |= GICR_PROPBASER_SHARE_NS <<
635                             GICR_PROPBASER_SHARE_SHIFT;
636                         gic_r_write_8(gicv3, GICR_PROPBASER, xbaser);
637                 }
638                 sc->sc_its_flags |= ITS_FLAGS_LPI_CONF_FLUSH;
639         }
640
641         /*
642          * Set the LPI pending table base
643          */
644         xbaser = vtophys(sc->sc_pend_base[cpuid]) |
645             (GICR_PENDBASER_CACHE_NIWAWB << GICR_PENDBASER_CACHE_SHIFT) |
646             (GICR_PENDBASER_SHARE_IS << GICR_PENDBASER_SHARE_SHIFT);
647
648         gic_r_write_8(gicv3, GICR_PENDBASER, xbaser);
649
650         tmp = gic_r_read_8(gicv3, GICR_PENDBASER);
651
652         if ((tmp & GICR_PENDBASER_SHARE_MASK) ==
653             (GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT)) {
654                 /* Clear the cahce and shareability bits */
655                 xbaser &= ~(GICR_PENDBASER_CACHE_MASK |
656                     GICR_PENDBASER_SHARE_MASK);
657                 /* Mark as non-shareable */
658                 xbaser |= GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT;
659                 /* And non-cacheable */
660                 xbaser |= GICR_PENDBASER_CACHE_NIN <<
661                     GICR_PENDBASER_CACHE_SHIFT;
662         }
663
664         /* Enable LPIs */
665         ctlr = gic_r_read_4(gicv3, GICR_CTLR);
666         ctlr |= GICR_CTLR_LPI_ENABLE;
667         gic_r_write_4(gicv3, GICR_CTLR, ctlr);
668
669         /* Make sure the GIC has seen everything */
670         dsb(sy);
671 }
672
673 static int
674 its_init_cpu(device_t dev, struct gicv3_its_softc *sc)
675 {
676         device_t gicv3;
677         vm_paddr_t target;
678         u_int cpuid;
679         struct redist_pcpu *rpcpu;
680
681         gicv3 = device_get_parent(dev);
682         cpuid = PCPU_GET(cpuid);
683         if (!CPU_ISSET(cpuid, &sc->sc_cpus))
684                 return (0);
685
686         /* Check if the ITS is enabled on this CPU */
687         if ((gic_r_read_4(gicv3, GICR_TYPER) & GICR_TYPER_PLPIS) == 0)
688                 return (ENXIO);
689
690         rpcpu = gicv3_get_redist(dev);
691
692         /* Do per-cpu LPI init once */
693         if (!rpcpu->lpi_enabled) {
694                 its_init_cpu_lpi(dev, sc);
695                 rpcpu->lpi_enabled = true;
696         }
697
698         if ((gic_its_read_8(sc, GITS_TYPER) & GITS_TYPER_PTA) != 0) {
699                 /* This ITS wants the redistributor physical address */
700                 target = vtophys(rman_get_virtual(&rpcpu->res));
701         } else {
702                 /* This ITS wants the unique processor number */
703                 target = GICR_TYPER_CPUNUM(gic_r_read_8(gicv3, GICR_TYPER)) <<
704                     CMD_TARGET_SHIFT;
705         }
706
707         sc->sc_its_cols[cpuid]->col_target = target;
708         sc->sc_its_cols[cpuid]->col_id = cpuid;
709
710         its_cmd_mapc(dev, sc->sc_its_cols[cpuid], 1);
711         its_cmd_invall(dev, sc->sc_its_cols[cpuid]);
712
713         return (0);
714 }
715
716 static int
717 gicv3_its_attach(device_t dev)
718 {
719         struct gicv3_its_softc *sc;
720         const char *name;
721         uint32_t iidr;
722         int domain, err, i, rid;
723
724         sc = device_get_softc(dev);
725
726         sc->sc_irq_length = gicv3_get_nirqs(dev);
727         sc->sc_irq_base = GIC_FIRST_LPI;
728         sc->sc_irq_base += device_get_unit(dev) * sc->sc_irq_length;
729
730         rid = 0;
731         sc->sc_its_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
732             RF_ACTIVE);
733         if (sc->sc_its_res == NULL) {
734                 device_printf(dev, "Could not allocate memory\n");
735                 return (ENXIO);
736         }
737
738         iidr = gic_its_read_4(sc, GITS_IIDR);
739         for (i = 0; i < nitems(its_quirks); i++) {
740                 if ((iidr & its_quirks[i].iidr_mask) == its_quirks[i].iidr) {
741                         if (bootverbose) {
742                                 device_printf(dev, "Applying %s\n",
743                                     its_quirks[i].desc);
744                         }
745                         its_quirks[i].func(dev);
746                         break;
747                 }
748         }
749
750         /* Allocate the private tables */
751         err = gicv3_its_table_init(dev, sc);
752         if (err != 0)
753                 return (err);
754
755         /* Protects access to the device list */
756         mtx_init(&sc->sc_its_dev_lock, "ITS device lock", NULL, MTX_SPIN);
757
758         /* Protects access to the ITS command circular buffer. */
759         mtx_init(&sc->sc_its_cmd_lock, "ITS cmd lock", NULL, MTX_SPIN);
760
761         CPU_ZERO(&sc->sc_cpus);
762         if (bus_get_domain(dev, &domain) == 0) {
763                 if (domain < MAXMEMDOM)
764                         CPU_COPY(&cpuset_domain[domain], &sc->sc_cpus);
765         } else {
766                 CPU_COPY(&all_cpus, &sc->sc_cpus);
767         }
768
769         /* Allocate the command circular buffer */
770         gicv3_its_cmdq_init(sc);
771
772         /* Allocate the per-CPU collections */
773         for (int cpu = 0; cpu <= mp_maxid; cpu++)
774                 if (CPU_ISSET(cpu, &sc->sc_cpus) != 0)
775                         sc->sc_its_cols[cpu] = malloc(
776                             sizeof(*sc->sc_its_cols[0]), M_GICV3_ITS,
777                             M_WAITOK | M_ZERO);
778                 else
779                         sc->sc_its_cols[cpu] = NULL;
780
781         /* Enable the ITS */
782         gic_its_write_4(sc, GITS_CTLR,
783             gic_its_read_4(sc, GITS_CTLR) | GITS_CTLR_EN);
784
785         /* Create the LPI configuration table */
786         gicv3_its_conftable_init(sc);
787
788         /* And the pending tebles */
789         gicv3_its_pendtables_init(sc);
790
791         /* Enable LPIs on this CPU */
792         its_init_cpu(dev, sc);
793
794         TAILQ_INIT(&sc->sc_its_dev_list);
795
796         /*
797          * Create the vmem object to allocate INTRNG IRQs from. We try to
798          * use all IRQs not already used by the GICv3.
799          * XXX: This assumes there are no other interrupt controllers in the
800          * system.
801          */
802         sc->sc_irq_alloc = vmem_create("GICv3 ITS IRQs", 0,
803             gicv3_get_nirqs(dev), 1, 1, M_FIRSTFIT | M_WAITOK);
804
805         sc->sc_irqs = malloc(sizeof(*sc->sc_irqs) * sc->sc_irq_length,
806             M_GICV3_ITS, M_WAITOK | M_ZERO);
807         name = device_get_nameunit(dev);
808         for (i = 0; i < sc->sc_irq_length; i++) {
809                 sc->sc_irqs[i].gi_irq = i;
810                 sc->sc_irqs[i].gi_lpi = i + sc->sc_irq_base - GIC_FIRST_LPI;
811                 err = intr_isrc_register(&sc->sc_irqs[i].gi_isrc, dev, 0,
812                     "%s,%u", name, i);
813         }
814
815         return (0);
816 }
817
818 static int
819 gicv3_its_detach(device_t dev)
820 {
821
822         return (ENXIO);
823 }
824
825 static void
826 its_quirk_cavium_22375(device_t dev)
827 {
828         struct gicv3_its_softc *sc;
829
830         sc = device_get_softc(dev);
831         sc->sc_its_flags |= ITS_FLAGS_ERRATA_CAVIUM_22375;
832 }
833
834 static void
835 gicv3_its_disable_intr(device_t dev, struct intr_irqsrc *isrc)
836 {
837         struct gicv3_its_softc *sc;
838         struct gicv3_its_irqsrc *girq;
839         uint8_t *conf;
840
841         sc = device_get_softc(dev);
842         girq = (struct gicv3_its_irqsrc *)isrc;
843         conf = sc->sc_conf_base;
844
845         conf[girq->gi_lpi] &= ~LPI_CONF_ENABLE;
846
847         if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) {
848                 /* Clean D-cache under command. */
849                 cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_lpi], 1);
850         } else {
851                 /* DSB inner shareable, store */
852                 dsb(ishst);
853         }
854
855         its_cmd_inv(dev, girq->gi_its_dev, girq);
856 }
857
858 static void
859 gicv3_its_enable_intr(device_t dev, struct intr_irqsrc *isrc)
860 {
861         struct gicv3_its_softc *sc;
862         struct gicv3_its_irqsrc *girq;
863         uint8_t *conf;
864
865         sc = device_get_softc(dev);
866         girq = (struct gicv3_its_irqsrc *)isrc;
867         conf = sc->sc_conf_base;
868
869         conf[girq->gi_lpi] |= LPI_CONF_ENABLE;
870
871         if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) {
872                 /* Clean D-cache under command. */
873                 cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_lpi], 1);
874         } else {
875                 /* DSB inner shareable, store */
876                 dsb(ishst);
877         }
878
879         its_cmd_inv(dev, girq->gi_its_dev, girq);
880 }
881
882 static int
883 gicv3_its_intr(void *arg, uintptr_t irq)
884 {
885         struct gicv3_its_softc *sc = arg;
886         struct gicv3_its_irqsrc *girq;
887         struct trapframe *tf;
888
889         irq -= sc->sc_irq_base;
890         girq = &sc->sc_irqs[irq];
891         if (girq == NULL)
892                 panic("gicv3_its_intr: Invalid interrupt %ld",
893                     irq + sc->sc_irq_base);
894
895         tf = curthread->td_intr_frame;
896         intr_isrc_dispatch(&girq->gi_isrc, tf);
897         return (FILTER_HANDLED);
898 }
899
900 static void
901 gicv3_its_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
902 {
903         struct gicv3_its_irqsrc *girq;
904         struct gicv3_its_softc *sc;
905
906         sc = device_get_softc(dev);
907         girq = (struct gicv3_its_irqsrc *)isrc;
908         gicv3_its_disable_intr(dev, isrc);
909         gic_icc_write(EOIR1, girq->gi_irq + sc->sc_irq_base);
910 }
911
912 static void
913 gicv3_its_post_ithread(device_t dev, struct intr_irqsrc *isrc)
914 {
915
916         gicv3_its_enable_intr(dev, isrc);
917 }
918
919 static void
920 gicv3_its_post_filter(device_t dev, struct intr_irqsrc *isrc)
921 {
922         struct gicv3_its_irqsrc *girq;
923         struct gicv3_its_softc *sc;
924
925         sc = device_get_softc(dev);
926         girq = (struct gicv3_its_irqsrc *)isrc;
927         gic_icc_write(EOIR1, girq->gi_irq + sc->sc_irq_base);
928 }
929
930 static int
931 gicv3_its_select_cpu(device_t dev, struct intr_irqsrc *isrc)
932 {
933         struct gicv3_its_softc *sc;
934
935         sc = device_get_softc(dev);
936         if (CPU_EMPTY(&isrc->isrc_cpu)) {
937                 sc->gic_irq_cpu = intr_irq_next_cpu(sc->gic_irq_cpu,
938                     &sc->sc_cpus);
939                 CPU_SETOF(sc->gic_irq_cpu, &isrc->isrc_cpu);
940         }
941
942         return (0);
943 }
944
945 static int
946 gicv3_its_bind_intr(device_t dev, struct intr_irqsrc *isrc)
947 {
948         struct gicv3_its_irqsrc *girq;
949
950         gicv3_its_select_cpu(dev, isrc);
951
952         girq = (struct gicv3_its_irqsrc *)isrc;
953         its_cmd_movi(dev, girq);
954         return (0);
955 }
956
957 static int
958 gicv3_its_map_intr(device_t dev, struct intr_map_data *data,
959     struct intr_irqsrc **isrcp)
960 {
961
962         /*
963          * This should never happen, we only call this function to map
964          * interrupts found before the controller driver is ready.
965          */
966         panic("gicv3_its_map_intr: Unable to map a MSI interrupt");
967 }
968
969 static int
970 gicv3_its_setup_intr(device_t dev, struct intr_irqsrc *isrc,
971     struct resource *res, struct intr_map_data *data)
972 {
973
974         /* Bind the interrupt to a CPU */
975         gicv3_its_bind_intr(dev, isrc);
976
977         return (0);
978 }
979
980 #ifdef SMP
981 static void
982 gicv3_its_init_secondary(device_t dev)
983 {
984         struct gicv3_its_softc *sc;
985
986         sc = device_get_softc(dev);
987
988         /*
989          * This is fatal as otherwise we may bind interrupts to this CPU.
990          * We need a way to tell the interrupt framework to only bind to a
991          * subset of given CPUs when it performs the shuffle.
992          */
993         if (its_init_cpu(dev, sc) != 0)
994                 panic("gicv3_its_init_secondary: No usable ITS on CPU%d",
995                     PCPU_GET(cpuid));
996 }
997 #endif
998
999 static uint32_t
1000 its_get_devid(device_t pci_dev)
1001 {
1002         uintptr_t id;
1003
1004         if (pci_get_id(pci_dev, PCI_ID_MSI, &id) != 0)
1005                 panic("its_get_devid: Unable to get the MSI DeviceID");
1006
1007         return (id);
1008 }
1009
1010 static struct its_dev *
1011 its_device_find(device_t dev, device_t child)
1012 {
1013         struct gicv3_its_softc *sc;
1014         struct its_dev *its_dev = NULL;
1015
1016         sc = device_get_softc(dev);
1017
1018         mtx_lock_spin(&sc->sc_its_dev_lock);
1019         TAILQ_FOREACH(its_dev, &sc->sc_its_dev_list, entry) {
1020                 if (its_dev->pci_dev == child)
1021                         break;
1022         }
1023         mtx_unlock_spin(&sc->sc_its_dev_lock);
1024
1025         return (its_dev);
1026 }
1027
1028 static struct its_dev *
1029 its_device_get(device_t dev, device_t child, u_int nvecs)
1030 {
1031         struct gicv3_its_softc *sc;
1032         struct its_dev *its_dev;
1033         vmem_addr_t irq_base;
1034         size_t esize;
1035
1036         sc = device_get_softc(dev);
1037
1038         its_dev = its_device_find(dev, child);
1039         if (its_dev != NULL)
1040                 return (its_dev);
1041
1042         its_dev = malloc(sizeof(*its_dev), M_GICV3_ITS, M_NOWAIT | M_ZERO);
1043         if (its_dev == NULL)
1044                 return (NULL);
1045
1046         its_dev->pci_dev = child;
1047         its_dev->devid = its_get_devid(child);
1048
1049         its_dev->lpis.lpi_busy = 0;
1050         its_dev->lpis.lpi_num = nvecs;
1051         its_dev->lpis.lpi_free = nvecs;
1052
1053         if (vmem_alloc(sc->sc_irq_alloc, nvecs, M_FIRSTFIT | M_NOWAIT,
1054             &irq_base) != 0) {
1055                 free(its_dev, M_GICV3_ITS);
1056                 return (NULL);
1057         }
1058         its_dev->lpis.lpi_base = irq_base;
1059
1060         /* Get ITT entry size */
1061         esize = GITS_TYPER_ITTES(gic_its_read_8(sc, GITS_TYPER));
1062
1063         /*
1064          * Allocate ITT for this device.
1065          * PA has to be 256 B aligned. At least two entries for device.
1066          */
1067         its_dev->itt_size = roundup2(MAX(nvecs, 2) * esize, 256);
1068         its_dev->itt = (vm_offset_t)contigmalloc(its_dev->itt_size,
1069             M_GICV3_ITS, M_NOWAIT | M_ZERO, 0, LPI_INT_TRANS_TAB_MAX_ADDR,
1070             LPI_INT_TRANS_TAB_ALIGN, 0);
1071         if (its_dev->itt == 0) {
1072                 vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base, nvecs);
1073                 free(its_dev, M_GICV3_ITS);
1074                 return (NULL);
1075         }
1076
1077         mtx_lock_spin(&sc->sc_its_dev_lock);
1078         TAILQ_INSERT_TAIL(&sc->sc_its_dev_list, its_dev, entry);
1079         mtx_unlock_spin(&sc->sc_its_dev_lock);
1080
1081         /* Map device to its ITT */
1082         its_cmd_mapd(dev, its_dev, 1);
1083
1084         return (its_dev);
1085 }
1086
1087 static void
1088 its_device_release(device_t dev, struct its_dev *its_dev)
1089 {
1090         struct gicv3_its_softc *sc;
1091
1092         KASSERT(its_dev->lpis.lpi_busy == 0,
1093             ("its_device_release: Trying to release an inuse ITS device"));
1094
1095         /* Unmap device in ITS */
1096         its_cmd_mapd(dev, its_dev, 0);
1097
1098         sc = device_get_softc(dev);
1099
1100         /* Remove the device from the list of devices */
1101         mtx_lock_spin(&sc->sc_its_dev_lock);
1102         TAILQ_REMOVE(&sc->sc_its_dev_list, its_dev, entry);
1103         mtx_unlock_spin(&sc->sc_its_dev_lock);
1104
1105         /* Free ITT */
1106         KASSERT(its_dev->itt != 0, ("Invalid ITT in valid ITS device"));
1107         contigfree((void *)its_dev->itt, its_dev->itt_size, M_GICV3_ITS);
1108
1109         /* Free the IRQ allocation */
1110         vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base,
1111             its_dev->lpis.lpi_num);
1112
1113         free(its_dev, M_GICV3_ITS);
1114 }
1115
1116 static int
1117 gicv3_its_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1118     device_t *pic, struct intr_irqsrc **srcs)
1119 {
1120         struct gicv3_its_softc *sc;
1121         struct gicv3_its_irqsrc *girq;
1122         struct its_dev *its_dev;
1123         u_int irq;
1124         int i;
1125
1126         its_dev = its_device_get(dev, child, count);
1127         if (its_dev == NULL)
1128                 return (ENXIO);
1129
1130         KASSERT(its_dev->lpis.lpi_free >= count,
1131             ("gicv3_its_alloc_msi: No free LPIs"));
1132         sc = device_get_softc(dev);
1133         irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num -
1134             its_dev->lpis.lpi_free;
1135         for (i = 0; i < count; i++, irq++) {
1136                 its_dev->lpis.lpi_free--;
1137                 girq = &sc->sc_irqs[irq];
1138                 girq->gi_its_dev = its_dev;
1139                 srcs[i] = (struct intr_irqsrc *)girq;
1140
1141                 /* Map the message to the given IRQ */
1142                 gicv3_its_select_cpu(dev, (struct intr_irqsrc *)girq);
1143                 its_cmd_mapti(dev, girq);
1144         }
1145         its_dev->lpis.lpi_busy += count;
1146         *pic = dev;
1147
1148         return (0);
1149 }
1150
1151 static int
1152 gicv3_its_release_msi(device_t dev, device_t child, int count,
1153     struct intr_irqsrc **isrc)
1154 {
1155         struct gicv3_its_irqsrc *girq;
1156         struct its_dev *its_dev;
1157         int i;
1158
1159         its_dev = its_device_find(dev, child);
1160
1161         KASSERT(its_dev != NULL,
1162             ("gicv3_its_release_msi: Releasing a MSI interrupt with "
1163              "no ITS device"));
1164         KASSERT(its_dev->lpis.lpi_busy >= count,
1165             ("gicv3_its_release_msi: Releasing more interrupts than "
1166              "were allocated: releasing %d, allocated %d", count,
1167              its_dev->lpis.lpi_busy));
1168         for (i = 0; i < count; i++) {
1169                 girq = (struct gicv3_its_irqsrc *)isrc[i];
1170                 girq->gi_its_dev = NULL;
1171         }
1172         its_dev->lpis.lpi_busy -= count;
1173
1174         if (its_dev->lpis.lpi_busy == 0)
1175                 its_device_release(dev, its_dev);
1176
1177         return (0);
1178 }
1179
1180 static int
1181 gicv3_its_alloc_msix(device_t dev, device_t child, device_t *pic,
1182     struct intr_irqsrc **isrcp)
1183 {
1184         struct gicv3_its_softc *sc;
1185         struct gicv3_its_irqsrc *girq;
1186         struct its_dev *its_dev;
1187         u_int nvecs, irq;
1188
1189         nvecs = pci_msix_count(child);
1190         its_dev = its_device_get(dev, child, nvecs);
1191         if (its_dev == NULL)
1192                 return (ENXIO);
1193
1194         KASSERT(its_dev->lpis.lpi_free > 0,
1195             ("gicv3_its_alloc_msix: No free LPIs"));
1196         sc = device_get_softc(dev);
1197         irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num -
1198             its_dev->lpis.lpi_free;
1199         its_dev->lpis.lpi_free--;
1200         its_dev->lpis.lpi_busy++;
1201         girq = &sc->sc_irqs[irq];
1202         girq->gi_its_dev = its_dev;
1203
1204         /* Map the message to the given IRQ */
1205         gicv3_its_select_cpu(dev, (struct intr_irqsrc *)girq);
1206         its_cmd_mapti(dev, girq);
1207
1208         *pic = dev;
1209         *isrcp = (struct intr_irqsrc *)girq;
1210
1211         return (0);
1212 }
1213
1214 static int
1215 gicv3_its_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1216 {
1217         struct gicv3_its_irqsrc *girq;
1218         struct its_dev *its_dev;
1219
1220         its_dev = its_device_find(dev, child);
1221
1222         KASSERT(its_dev != NULL,
1223             ("gicv3_its_release_msix: Releasing a MSI-X interrupt with "
1224              "no ITS device"));
1225         KASSERT(its_dev->lpis.lpi_busy > 0,
1226             ("gicv3_its_release_msix: Releasing more interrupts than "
1227              "were allocated: allocated %d", its_dev->lpis.lpi_busy));
1228         girq = (struct gicv3_its_irqsrc *)isrc;
1229         girq->gi_its_dev = NULL;
1230         its_dev->lpis.lpi_busy--;
1231
1232         if (its_dev->lpis.lpi_busy == 0)
1233                 its_device_release(dev, its_dev);
1234
1235         return (0);
1236 }
1237
1238 static int
1239 gicv3_its_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1240     uint64_t *addr, uint32_t *data)
1241 {
1242         struct gicv3_its_softc *sc;
1243         struct gicv3_its_irqsrc *girq;
1244
1245         sc = device_get_softc(dev);
1246         girq = (struct gicv3_its_irqsrc *)isrc;
1247
1248         *addr = vtophys(rman_get_virtual(sc->sc_its_res)) + GITS_TRANSLATER;
1249         *data = girq->gi_irq - girq->gi_its_dev->lpis.lpi_base;
1250
1251         return (0);
1252 }
1253
1254 /*
1255  * Commands handling.
1256  */
1257
1258 static __inline void
1259 cmd_format_command(struct its_cmd *cmd, uint8_t cmd_type)
1260 {
1261         /* Command field: DW0 [7:0] */
1262         cmd->cmd_dword[0] &= htole64(~CMD_COMMAND_MASK);
1263         cmd->cmd_dword[0] |= htole64(cmd_type);
1264 }
1265
1266 static __inline void
1267 cmd_format_devid(struct its_cmd *cmd, uint32_t devid)
1268 {
1269         /* Device ID field: DW0 [63:32] */
1270         cmd->cmd_dword[0] &= htole64(~CMD_DEVID_MASK);
1271         cmd->cmd_dword[0] |= htole64((uint64_t)devid << CMD_DEVID_SHIFT);
1272 }
1273
1274 static __inline void
1275 cmd_format_size(struct its_cmd *cmd, uint16_t size)
1276 {
1277         /* Size field: DW1 [4:0] */
1278         cmd->cmd_dword[1] &= htole64(~CMD_SIZE_MASK);
1279         cmd->cmd_dword[1] |= htole64((size & CMD_SIZE_MASK));
1280 }
1281
1282 static __inline void
1283 cmd_format_id(struct its_cmd *cmd, uint32_t id)
1284 {
1285         /* ID field: DW1 [31:0] */
1286         cmd->cmd_dword[1] &= htole64(~CMD_ID_MASK);
1287         cmd->cmd_dword[1] |= htole64(id);
1288 }
1289
1290 static __inline void
1291 cmd_format_pid(struct its_cmd *cmd, uint32_t pid)
1292 {
1293         /* Physical ID field: DW1 [63:32] */
1294         cmd->cmd_dword[1] &= htole64(~CMD_PID_MASK);
1295         cmd->cmd_dword[1] |= htole64((uint64_t)pid << CMD_PID_SHIFT);
1296 }
1297
1298 static __inline void
1299 cmd_format_col(struct its_cmd *cmd, uint16_t col_id)
1300 {
1301         /* Collection field: DW2 [16:0] */
1302         cmd->cmd_dword[2] &= htole64(~CMD_COL_MASK);
1303         cmd->cmd_dword[2] |= htole64(col_id);
1304 }
1305
1306 static __inline void
1307 cmd_format_target(struct its_cmd *cmd, uint64_t target)
1308 {
1309         /* Target Address field: DW2 [47:16] */
1310         cmd->cmd_dword[2] &= htole64(~CMD_TARGET_MASK);
1311         cmd->cmd_dword[2] |= htole64(target & CMD_TARGET_MASK);
1312 }
1313
1314 static __inline void
1315 cmd_format_itt(struct its_cmd *cmd, uint64_t itt)
1316 {
1317         /* ITT Address field: DW2 [47:8] */
1318         cmd->cmd_dword[2] &= htole64(~CMD_ITT_MASK);
1319         cmd->cmd_dword[2] |= htole64(itt & CMD_ITT_MASK);
1320 }
1321
1322 static __inline void
1323 cmd_format_valid(struct its_cmd *cmd, uint8_t valid)
1324 {
1325         /* Valid field: DW2 [63] */
1326         cmd->cmd_dword[2] &= htole64(~CMD_VALID_MASK);
1327         cmd->cmd_dword[2] |= htole64((uint64_t)valid << CMD_VALID_SHIFT);
1328 }
1329
1330 static inline bool
1331 its_cmd_queue_full(struct gicv3_its_softc *sc)
1332 {
1333         size_t read_idx, next_write_idx;
1334
1335         /* Get the index of the next command */
1336         next_write_idx = (sc->sc_its_cmd_next_idx + 1) %
1337             (ITS_CMDQ_SIZE / sizeof(struct its_cmd));
1338         /* And the index of the current command being read */
1339         read_idx = gic_its_read_4(sc, GITS_CREADR) / sizeof(struct its_cmd);
1340
1341         /*
1342          * The queue is full when the write offset points
1343          * at the command before the current read offset.
1344          */
1345         return (next_write_idx == read_idx);
1346 }
1347
1348 static inline void
1349 its_cmd_sync(struct gicv3_its_softc *sc, struct its_cmd *cmd)
1350 {
1351
1352         if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0) {
1353                 /* Clean D-cache under command. */
1354                 cpu_dcache_wb_range((vm_offset_t)cmd, sizeof(*cmd));
1355         } else {
1356                 /* DSB inner shareable, store */
1357                 dsb(ishst);
1358         }
1359
1360 }
1361
1362 static inline uint64_t
1363 its_cmd_cwriter_offset(struct gicv3_its_softc *sc, struct its_cmd *cmd)
1364 {
1365         uint64_t off;
1366
1367         off = (cmd - sc->sc_its_cmd_base) * sizeof(*cmd);
1368
1369         return (off);
1370 }
1371
1372 static void
1373 its_cmd_wait_completion(device_t dev, struct its_cmd *cmd_first,
1374     struct its_cmd *cmd_last)
1375 {
1376         struct gicv3_its_softc *sc;
1377         uint64_t first, last, read;
1378         size_t us_left;
1379
1380         sc = device_get_softc(dev);
1381
1382         /*
1383          * XXX ARM64TODO: This is obviously a significant delay.
1384          * The reason for that is that currently the time frames for
1385          * the command to complete are not known.
1386          */
1387         us_left = 1000000;
1388
1389         first = its_cmd_cwriter_offset(sc, cmd_first);
1390         last = its_cmd_cwriter_offset(sc, cmd_last);
1391
1392         for (;;) {
1393                 read = gic_its_read_8(sc, GITS_CREADR);
1394                 if (first < last) {
1395                         if (read < first || read >= last)
1396                                 break;
1397                 } else if (read < first && read >= last)
1398                         break;
1399
1400                 if (us_left-- == 0) {
1401                         /* This means timeout */
1402                         device_printf(dev,
1403                             "Timeout while waiting for CMD completion.\n");
1404                         return;
1405                 }
1406                 DELAY(1);
1407         }
1408 }
1409
1410
1411 static struct its_cmd *
1412 its_cmd_alloc_locked(device_t dev)
1413 {
1414         struct gicv3_its_softc *sc;
1415         struct its_cmd *cmd;
1416         size_t us_left;
1417
1418         sc = device_get_softc(dev);
1419
1420         /*
1421          * XXX ARM64TODO: This is obviously a significant delay.
1422          * The reason for that is that currently the time frames for
1423          * the command to complete (and therefore free the descriptor)
1424          * are not known.
1425          */
1426         us_left = 1000000;
1427
1428         mtx_assert(&sc->sc_its_cmd_lock, MA_OWNED);
1429         while (its_cmd_queue_full(sc)) {
1430                 if (us_left-- == 0) {
1431                         /* Timeout while waiting for free command */
1432                         device_printf(dev,
1433                             "Timeout while waiting for free command\n");
1434                         return (NULL);
1435                 }
1436                 DELAY(1);
1437         }
1438
1439         cmd = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx];
1440         sc->sc_its_cmd_next_idx++;
1441         sc->sc_its_cmd_next_idx %= ITS_CMDQ_SIZE / sizeof(struct its_cmd);
1442
1443         return (cmd);
1444 }
1445
1446 static uint64_t
1447 its_cmd_prepare(struct its_cmd *cmd, struct its_cmd_desc *desc)
1448 {
1449         uint64_t target;
1450         uint8_t cmd_type;
1451         u_int size;
1452
1453         cmd_type = desc->cmd_type;
1454         target = ITS_TARGET_NONE;
1455
1456         switch (cmd_type) {
1457         case ITS_CMD_MOVI:      /* Move interrupt ID to another collection */
1458                 target = desc->cmd_desc_movi.col->col_target;
1459                 cmd_format_command(cmd, ITS_CMD_MOVI);
1460                 cmd_format_id(cmd, desc->cmd_desc_movi.id);
1461                 cmd_format_col(cmd, desc->cmd_desc_movi.col->col_id);
1462                 cmd_format_devid(cmd, desc->cmd_desc_movi.its_dev->devid);
1463                 break;
1464         case ITS_CMD_SYNC:      /* Wait for previous commands completion */
1465                 target = desc->cmd_desc_sync.col->col_target;
1466                 cmd_format_command(cmd, ITS_CMD_SYNC);
1467                 cmd_format_target(cmd, target);
1468                 break;
1469         case ITS_CMD_MAPD:      /* Assign ITT to device */
1470                 cmd_format_command(cmd, ITS_CMD_MAPD);
1471                 cmd_format_itt(cmd, vtophys(desc->cmd_desc_mapd.its_dev->itt));
1472                 /*
1473                  * Size describes number of bits to encode interrupt IDs
1474                  * supported by the device minus one.
1475                  * When V (valid) bit is zero, this field should be written
1476                  * as zero.
1477                  */
1478                 if (desc->cmd_desc_mapd.valid != 0) {
1479                         size = fls(desc->cmd_desc_mapd.its_dev->lpis.lpi_num);
1480                         size = MAX(1, size) - 1;
1481                 } else
1482                         size = 0;
1483
1484                 cmd_format_size(cmd, size);
1485                 cmd_format_devid(cmd, desc->cmd_desc_mapd.its_dev->devid);
1486                 cmd_format_valid(cmd, desc->cmd_desc_mapd.valid);
1487                 break;
1488         case ITS_CMD_MAPC:      /* Map collection to Re-Distributor */
1489                 target = desc->cmd_desc_mapc.col->col_target;
1490                 cmd_format_command(cmd, ITS_CMD_MAPC);
1491                 cmd_format_col(cmd, desc->cmd_desc_mapc.col->col_id);
1492                 cmd_format_valid(cmd, desc->cmd_desc_mapc.valid);
1493                 cmd_format_target(cmd, target);
1494                 break;
1495         case ITS_CMD_MAPTI:
1496                 target = desc->cmd_desc_mapvi.col->col_target;
1497                 cmd_format_command(cmd, ITS_CMD_MAPTI);
1498                 cmd_format_devid(cmd, desc->cmd_desc_mapvi.its_dev->devid);
1499                 cmd_format_id(cmd, desc->cmd_desc_mapvi.id);
1500                 cmd_format_pid(cmd, desc->cmd_desc_mapvi.pid);
1501                 cmd_format_col(cmd, desc->cmd_desc_mapvi.col->col_id);
1502                 break;
1503         case ITS_CMD_MAPI:
1504                 target = desc->cmd_desc_mapi.col->col_target;
1505                 cmd_format_command(cmd, ITS_CMD_MAPI);
1506                 cmd_format_devid(cmd, desc->cmd_desc_mapi.its_dev->devid);
1507                 cmd_format_id(cmd, desc->cmd_desc_mapi.pid);
1508                 cmd_format_col(cmd, desc->cmd_desc_mapi.col->col_id);
1509                 break;
1510         case ITS_CMD_INV:
1511                 target = desc->cmd_desc_inv.col->col_target;
1512                 cmd_format_command(cmd, ITS_CMD_INV);
1513                 cmd_format_devid(cmd, desc->cmd_desc_inv.its_dev->devid);
1514                 cmd_format_id(cmd, desc->cmd_desc_inv.pid);
1515                 break;
1516         case ITS_CMD_INVALL:
1517                 cmd_format_command(cmd, ITS_CMD_INVALL);
1518                 cmd_format_col(cmd, desc->cmd_desc_invall.col->col_id);
1519                 break;
1520         default:
1521                 panic("its_cmd_prepare: Invalid command: %x", cmd_type);
1522         }
1523
1524         return (target);
1525 }
1526
1527 static int
1528 its_cmd_send(device_t dev, struct its_cmd_desc *desc)
1529 {
1530         struct gicv3_its_softc *sc;
1531         struct its_cmd *cmd, *cmd_sync, *cmd_write;
1532         struct its_col col_sync;
1533         struct its_cmd_desc desc_sync;
1534         uint64_t target, cwriter;
1535
1536         sc = device_get_softc(dev);
1537         mtx_lock_spin(&sc->sc_its_cmd_lock);
1538         cmd = its_cmd_alloc_locked(dev);
1539         if (cmd == NULL) {
1540                 device_printf(dev, "could not allocate ITS command\n");
1541                 mtx_unlock_spin(&sc->sc_its_cmd_lock);
1542                 return (EBUSY);
1543         }
1544
1545         target = its_cmd_prepare(cmd, desc);
1546         its_cmd_sync(sc, cmd);
1547
1548         if (target != ITS_TARGET_NONE) {
1549                 cmd_sync = its_cmd_alloc_locked(dev);
1550                 if (cmd_sync != NULL) {
1551                         desc_sync.cmd_type = ITS_CMD_SYNC;
1552                         col_sync.col_target = target;
1553                         desc_sync.cmd_desc_sync.col = &col_sync;
1554                         its_cmd_prepare(cmd_sync, &desc_sync);
1555                         its_cmd_sync(sc, cmd_sync);
1556                 }
1557         }
1558
1559         /* Update GITS_CWRITER */
1560         cwriter = sc->sc_its_cmd_next_idx * sizeof(struct its_cmd);
1561         gic_its_write_8(sc, GITS_CWRITER, cwriter);
1562         cmd_write = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx];
1563         mtx_unlock_spin(&sc->sc_its_cmd_lock);
1564
1565         its_cmd_wait_completion(dev, cmd, cmd_write);
1566
1567         return (0);
1568 }
1569
1570 /* Handlers to send commands */
1571 static void
1572 its_cmd_movi(device_t dev, struct gicv3_its_irqsrc *girq)
1573 {
1574         struct gicv3_its_softc *sc;
1575         struct its_cmd_desc desc;
1576         struct its_col *col;
1577
1578         sc = device_get_softc(dev);
1579         col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1];
1580
1581         desc.cmd_type = ITS_CMD_MOVI;
1582         desc.cmd_desc_movi.its_dev = girq->gi_its_dev;
1583         desc.cmd_desc_movi.col = col;
1584         desc.cmd_desc_movi.id = girq->gi_irq - girq->gi_its_dev->lpis.lpi_base;
1585
1586         its_cmd_send(dev, &desc);
1587 }
1588
1589 static void
1590 its_cmd_mapc(device_t dev, struct its_col *col, uint8_t valid)
1591 {
1592         struct its_cmd_desc desc;
1593
1594         desc.cmd_type = ITS_CMD_MAPC;
1595         desc.cmd_desc_mapc.col = col;
1596         /*
1597          * Valid bit set - map the collection.
1598          * Valid bit cleared - unmap the collection.
1599          */
1600         desc.cmd_desc_mapc.valid = valid;
1601
1602         its_cmd_send(dev, &desc);
1603 }
1604
1605 static void
1606 its_cmd_mapti(device_t dev, struct gicv3_its_irqsrc *girq)
1607 {
1608         struct gicv3_its_softc *sc;
1609         struct its_cmd_desc desc;
1610         struct its_col *col;
1611         u_int col_id;
1612
1613         sc = device_get_softc(dev);
1614
1615         col_id = CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1;
1616         col = sc->sc_its_cols[col_id];
1617
1618         desc.cmd_type = ITS_CMD_MAPTI;
1619         desc.cmd_desc_mapvi.its_dev = girq->gi_its_dev;
1620         desc.cmd_desc_mapvi.col = col;
1621         /* The EventID sent to the device */
1622         desc.cmd_desc_mapvi.id = girq->gi_irq - girq->gi_its_dev->lpis.lpi_base;
1623         /* The physical interrupt presented to softeware */
1624         desc.cmd_desc_mapvi.pid = girq->gi_irq + sc->sc_irq_base;
1625
1626         its_cmd_send(dev, &desc);
1627 }
1628
1629 static void
1630 its_cmd_mapd(device_t dev, struct its_dev *its_dev, uint8_t valid)
1631 {
1632         struct its_cmd_desc desc;
1633
1634         desc.cmd_type = ITS_CMD_MAPD;
1635         desc.cmd_desc_mapd.its_dev = its_dev;
1636         desc.cmd_desc_mapd.valid = valid;
1637
1638         its_cmd_send(dev, &desc);
1639 }
1640
1641 static void
1642 its_cmd_inv(device_t dev, struct its_dev *its_dev,
1643     struct gicv3_its_irqsrc *girq)
1644 {
1645         struct gicv3_its_softc *sc;
1646         struct its_cmd_desc desc;
1647         struct its_col *col;
1648
1649         sc = device_get_softc(dev);
1650         col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1];
1651
1652         desc.cmd_type = ITS_CMD_INV;
1653         /* The EventID sent to the device */
1654         desc.cmd_desc_inv.pid = girq->gi_irq - its_dev->lpis.lpi_base;
1655         desc.cmd_desc_inv.its_dev = its_dev;
1656         desc.cmd_desc_inv.col = col;
1657
1658         its_cmd_send(dev, &desc);
1659 }
1660
1661 static void
1662 its_cmd_invall(device_t dev, struct its_col *col)
1663 {
1664         struct its_cmd_desc desc;
1665
1666         desc.cmd_type = ITS_CMD_INVALL;
1667         desc.cmd_desc_invall.col = col;
1668
1669         its_cmd_send(dev, &desc);
1670 }
1671
1672 #ifdef FDT
1673 static device_probe_t gicv3_its_fdt_probe;
1674 static device_attach_t gicv3_its_fdt_attach;
1675
1676 static device_method_t gicv3_its_fdt_methods[] = {
1677         /* Device interface */
1678         DEVMETHOD(device_probe,         gicv3_its_fdt_probe),
1679         DEVMETHOD(device_attach,        gicv3_its_fdt_attach),
1680
1681         /* End */
1682         DEVMETHOD_END
1683 };
1684
1685 #define its_baseclasses its_fdt_baseclasses
1686 DEFINE_CLASS_1(its, gicv3_its_fdt_driver, gicv3_its_fdt_methods,
1687     sizeof(struct gicv3_its_softc), gicv3_its_driver);
1688 #undef its_baseclasses
1689 static devclass_t gicv3_its_fdt_devclass;
1690
1691 EARLY_DRIVER_MODULE(its_fdt, gic, gicv3_its_fdt_driver,
1692     gicv3_its_fdt_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
1693
1694 static int
1695 gicv3_its_fdt_probe(device_t dev)
1696 {
1697
1698         if (!ofw_bus_status_okay(dev))
1699                 return (ENXIO);
1700
1701         if (!ofw_bus_is_compatible(dev, "arm,gic-v3-its"))
1702                 return (ENXIO);
1703
1704         device_set_desc(dev, "ARM GIC Interrupt Translation Service");
1705         return (BUS_PROBE_DEFAULT);
1706 }
1707
1708 static int
1709 gicv3_its_fdt_attach(device_t dev)
1710 {
1711         struct gicv3_its_softc *sc;
1712         phandle_t xref;
1713         int err;
1714
1715         sc = device_get_softc(dev);
1716         err = gicv3_its_attach(dev);
1717         if (err != 0)
1718                 return (err);
1719
1720         /* Register this device as a interrupt controller */
1721         xref = OF_xref_from_node(ofw_bus_get_node(dev));
1722         sc->sc_pic = intr_pic_register(dev, xref);
1723         intr_pic_add_handler(device_get_parent(dev), sc->sc_pic,
1724             gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length);
1725
1726         /* Register this device to handle MSI interrupts */
1727         intr_msi_register(dev, xref);
1728
1729         return (0);
1730 }
1731 #endif
1732
1733 #ifdef DEV_ACPI
1734 static device_probe_t gicv3_its_acpi_probe;
1735 static device_attach_t gicv3_its_acpi_attach;
1736
1737 static device_method_t gicv3_its_acpi_methods[] = {
1738         /* Device interface */
1739         DEVMETHOD(device_probe,         gicv3_its_acpi_probe),
1740         DEVMETHOD(device_attach,        gicv3_its_acpi_attach),
1741
1742         /* End */
1743         DEVMETHOD_END
1744 };
1745
1746 #define its_baseclasses its_acpi_baseclasses
1747 DEFINE_CLASS_1(its, gicv3_its_acpi_driver, gicv3_its_acpi_methods,
1748     sizeof(struct gicv3_its_softc), gicv3_its_driver);
1749 #undef its_baseclasses
1750 static devclass_t gicv3_its_acpi_devclass;
1751
1752 EARLY_DRIVER_MODULE(its_acpi, gic, gicv3_its_acpi_driver,
1753     gicv3_its_acpi_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
1754
1755 static int
1756 gicv3_its_acpi_probe(device_t dev)
1757 {
1758
1759         if (gic_get_bus(dev) != GIC_BUS_ACPI)
1760                 return (EINVAL);
1761
1762         if (gic_get_hw_rev(dev) < 3)
1763                 return (EINVAL);
1764
1765         device_set_desc(dev, "ARM GIC Interrupt Translation Service");
1766         return (BUS_PROBE_DEFAULT);
1767 }
1768
1769 static int
1770 gicv3_its_acpi_attach(device_t dev)
1771 {
1772         struct gicv3_its_softc *sc;
1773         struct gic_v3_devinfo *di;
1774         int err;
1775
1776         sc = device_get_softc(dev);
1777         err = gicv3_its_attach(dev);
1778         if (err != 0)
1779                 return (err);
1780
1781         di = device_get_ivars(dev);
1782         sc->sc_pic = intr_pic_register(dev, di->msi_xref);
1783         intr_pic_add_handler(device_get_parent(dev), sc->sc_pic,
1784             gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length);
1785
1786         /* Register this device to handle MSI interrupts */
1787         intr_msi_register(dev, di->msi_xref);
1788
1789         return (0);
1790 }
1791 #endif