2 * Copyright (c) 1999, 2000 Matthew R. Green
3 * Copyright (c) 2001 - 2003 by Thomas Moestl <tmm@FreeBSD.org>
4 * Copyright (c) 2009 by Marius Strobl <marius@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * from: NetBSD: psycho.c,v 1.39 2001/10/07 20:30:41 eeh Exp
31 * from: FreeBSD: psycho.c 183152 2008-09-18 19:45:22Z marius
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
38 * Driver for `Fire' JBus to PCI Express and `Oberon' Uranus to PCI Express
43 #include "opt_ofw_pci.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
48 #include <sys/interrupt.h>
49 #include <sys/kernel.h>
51 #include <sys/malloc.h>
52 #include <sys/module.h>
53 #include <sys/mutex.h>
54 #include <sys/pciio.h>
58 #include <sys/sysctl.h>
59 #include <sys/timetc.h>
61 #include <dev/ofw/ofw_bus.h>
62 #include <dev/ofw/openfirm.h>
67 #include <machine/bus.h>
68 #include <machine/bus_common.h>
69 #include <machine/bus_private.h>
70 #include <machine/iommureg.h>
71 #include <machine/iommuvar.h>
72 #include <machine/resource.h>
74 #include <dev/pci/pcireg.h>
75 #include <dev/pci/pcivar.h>
76 #include <dev/pci/pcib_private.h>
78 #include <sparc64/pci/ofw_pci.h>
79 #include <sparc64/pci/firereg.h>
80 #include <sparc64/pci/firevar.h>
86 static const struct fire_desc *fire_get_desc(device_t dev);
87 static void fire_dmamap_sync(bus_dma_tag_t dt __unused, bus_dmamap_t map,
89 static int fire_get_intrmap(struct fire_softc *sc, u_int ino,
90 bus_addr_t *intrmapptr, bus_addr_t *intrclrptr);
91 static void fire_intr_assign(void *arg);
92 static void fire_intr_clear(void *arg);
93 static void fire_intr_disable(void *arg);
94 static void fire_intr_enable(void *arg);
95 static int fire_intr_register(struct fire_softc *sc, u_int ino);
96 static inline void fire_msiq_common(struct intr_vector *iv,
97 struct fire_msiqarg *fmqa);
98 static void fire_msiq_filter(void *cookie);
99 static void fire_msiq_handler(void *cookie);
100 static void fire_set_intr(struct fire_softc *sc, u_int index, u_int ino,
101 driver_filter_t handler, void *arg);
102 static timecounter_get_t fire_get_timecount;
104 /* Interrupt handlers */
105 static driver_filter_t fire_dmc_pec;
106 static driver_filter_t fire_pcie;
107 static driver_filter_t fire_xcb;
112 static pcib_alloc_msi_t fire_alloc_msi;
113 static pcib_alloc_msix_t fire_alloc_msix;
114 static bus_alloc_resource_t fire_alloc_resource;
115 static device_attach_t fire_attach;
116 static pcib_map_msi_t fire_map_msi;
117 static pcib_maxslots_t fire_maxslots;
118 static device_probe_t fire_probe;
119 static pcib_read_config_t fire_read_config;
120 static pcib_release_msi_t fire_release_msi;
121 static pcib_release_msix_t fire_release_msix;
122 static pcib_route_interrupt_t fire_route_interrupt;
123 static bus_setup_intr_t fire_setup_intr;
124 static bus_teardown_intr_t fire_teardown_intr;
125 static pcib_write_config_t fire_write_config;
127 static device_method_t fire_methods[] = {
128 /* Device interface */
129 DEVMETHOD(device_probe, fire_probe),
130 DEVMETHOD(device_attach, fire_attach),
131 DEVMETHOD(device_shutdown, bus_generic_shutdown),
132 DEVMETHOD(device_suspend, bus_generic_suspend),
133 DEVMETHOD(device_resume, bus_generic_resume),
136 DEVMETHOD(bus_read_ivar, ofw_pci_read_ivar),
137 DEVMETHOD(bus_setup_intr, fire_setup_intr),
138 DEVMETHOD(bus_teardown_intr, fire_teardown_intr),
139 DEVMETHOD(bus_alloc_resource, fire_alloc_resource),
140 DEVMETHOD(bus_activate_resource, ofw_pci_activate_resource),
141 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
142 DEVMETHOD(bus_adjust_resource, ofw_pci_adjust_resource),
143 DEVMETHOD(bus_release_resource, bus_generic_release_resource),
144 DEVMETHOD(bus_get_dma_tag, ofw_pci_get_dma_tag),
147 DEVMETHOD(pcib_maxslots, fire_maxslots),
148 DEVMETHOD(pcib_read_config, fire_read_config),
149 DEVMETHOD(pcib_write_config, fire_write_config),
150 DEVMETHOD(pcib_route_interrupt, fire_route_interrupt),
151 DEVMETHOD(pcib_alloc_msi, fire_alloc_msi),
152 DEVMETHOD(pcib_release_msi, fire_release_msi),
153 DEVMETHOD(pcib_alloc_msix, fire_alloc_msix),
154 DEVMETHOD(pcib_release_msix, fire_release_msix),
155 DEVMETHOD(pcib_map_msi, fire_map_msi),
156 DEVMETHOD(pcib_request_feature, pcib_request_feature_allow),
158 /* ofw_bus interface */
159 DEVMETHOD(ofw_bus_get_node, ofw_pci_get_node),
164 static devclass_t fire_devclass;
166 DEFINE_CLASS_0(pcib, fire_driver, fire_methods, sizeof(struct fire_softc));
167 EARLY_DRIVER_MODULE(fire, nexus, fire_driver, fire_devclass, 0, 0,
169 MODULE_DEPEND(fire, nexus, 1, 1, 1);
171 static const struct intr_controller fire_ic = {
179 struct fire_softc *fica_sc;
184 static const struct intr_controller fire_msiqc_filter = {
191 struct fire_msiqarg {
192 struct fire_icarg fmqa_fica;
194 struct fo_msiq_record *fmqa_base;
201 #define FIRE_PERF_CNT_QLTY 100
203 #define FIRE_SPC_BARRIER(spc, sc, offs, len, flags) \
204 bus_barrier((sc)->sc_mem_res[(spc)], (offs), (len), (flags))
205 #define FIRE_SPC_READ_8(spc, sc, offs) \
206 bus_read_8((sc)->sc_mem_res[(spc)], (offs))
207 #define FIRE_SPC_WRITE_8(spc, sc, offs, v) \
208 bus_write_8((sc)->sc_mem_res[(spc)], (offs), (v))
211 #define FIRE_SPC_SET(spc, sc, offs, reg, v) \
212 FIRE_SPC_WRITE_8((spc), (sc), (offs), (v))
214 #define FIRE_SPC_SET(spc, sc, offs, reg, v) do { \
215 device_printf((sc)->sc_dev, reg " 0x%016llx -> 0x%016llx\n", \
216 (unsigned long long)FIRE_SPC_READ_8((spc), (sc), (offs)), \
217 (unsigned long long)(v)); \
218 FIRE_SPC_WRITE_8((spc), (sc), (offs), (v)); \
222 #define FIRE_PCI_BARRIER(sc, offs, len, flags) \
223 FIRE_SPC_BARRIER(FIRE_PCI, (sc), (offs), len, flags)
224 #define FIRE_PCI_READ_8(sc, offs) \
225 FIRE_SPC_READ_8(FIRE_PCI, (sc), (offs))
226 #define FIRE_PCI_WRITE_8(sc, offs, v) \
227 FIRE_SPC_WRITE_8(FIRE_PCI, (sc), (offs), (v))
228 #define FIRE_CTRL_BARRIER(sc, offs, len, flags) \
229 FIRE_SPC_BARRIER(FIRE_CTRL, (sc), (offs), len, flags)
230 #define FIRE_CTRL_READ_8(sc, offs) \
231 FIRE_SPC_READ_8(FIRE_CTRL, (sc), (offs))
232 #define FIRE_CTRL_WRITE_8(sc, offs, v) \
233 FIRE_SPC_WRITE_8(FIRE_CTRL, (sc), (offs), (v))
235 #define FIRE_PCI_SET(sc, offs, v) \
236 FIRE_SPC_SET(FIRE_PCI, (sc), (offs), # offs, (v))
237 #define FIRE_CTRL_SET(sc, offs, v) \
238 FIRE_SPC_SET(FIRE_CTRL, (sc), (offs), # offs, (v))
241 const char *fd_string;
246 static const struct fire_desc fire_compats[] = {
247 { "pciex108e,80f0", FIRE_MODE_FIRE, "Fire" },
249 { "pciex108e,80f8", FIRE_MODE_OBERON, "Oberon" },
254 static const struct fire_desc *
255 fire_get_desc(device_t dev)
257 const struct fire_desc *desc;
260 compat = ofw_bus_get_compat(dev);
263 for (desc = fire_compats; desc->fd_string != NULL; desc++)
264 if (strcmp(desc->fd_string, compat) == 0)
270 fire_probe(device_t dev)
274 dtype = ofw_bus_get_type(dev);
275 if (dtype != NULL && strcmp(dtype, OFW_TYPE_PCIE) == 0 &&
276 fire_get_desc(dev) != NULL) {
277 device_set_desc(dev, "Sun Host-PCIe bridge");
278 return (BUS_PROBE_GENERIC);
284 fire_attach(device_t dev)
286 struct fire_softc *sc;
287 const struct fire_desc *desc;
288 struct ofw_pci_msi_ranges msi_ranges;
289 struct ofw_pci_msi_addr_ranges msi_addr_ranges;
290 struct ofw_pci_msi_eq_to_devino msi_eq_to_devino;
291 struct fire_msiqarg *fmqa;
292 struct timecounter *tc;
294 uint64_t ino_bitmap, val;
296 uint32_t prop, prop_array[2];
301 sc = device_get_softc(dev);
302 node = ofw_bus_get_node(dev);
303 desc = fire_get_desc(dev);
304 mode = desc->fd_mode;
310 mtx_init(&sc->sc_msi_mtx, "msi_mtx", NULL, MTX_DEF);
311 mtx_init(&sc->sc_pcib_mtx, "pcib_mtx", NULL, MTX_SPIN);
314 * Fire and Oberon have two register banks:
315 * (0) per-PBM PCI Express configuration and status registers
316 * (1) (shared) Fire/Oberon controller configuration and status
319 for (i = 0; i < FIRE_NREG; i++) {
321 sc->sc_mem_res[i] = bus_alloc_resource_any(dev,
322 SYS_RES_MEMORY, &j, RF_ACTIVE);
323 if (sc->sc_mem_res[i] == NULL)
324 panic("%s: could not allocate register bank %d",
328 if (OF_getprop(node, "portid", &sc->sc_ign, sizeof(sc->sc_ign)) == -1)
329 panic("%s: could not determine IGN", __func__);
330 if (OF_getprop(node, "module-revision#", &prop, sizeof(prop)) == -1)
331 panic("%s: could not determine module-revision", __func__);
333 device_printf(dev, "%s, module-revision %d, IGN %#x\n",
334 desc->fd_name, prop, sc->sc_ign);
337 * Hunt through all the interrupt mapping regs and register
338 * the interrupt controller for our interrupt vectors. We do
339 * this early in order to be able to catch stray interrupts.
341 i = OF_getprop(node, "ino-bitmap", (void *)prop_array,
344 panic("%s: could not get ino-bitmap", __func__);
345 ino_bitmap = ((uint64_t)prop_array[1] << 32) | prop_array[0];
346 for (i = 0; i <= FO_MAX_INO; i++) {
347 if ((ino_bitmap & (1ULL << i)) == 0)
349 j = fire_intr_register(sc, i);
351 device_printf(dev, "could not register interrupt "
352 "controller for INO %d (%d)\n", i, j);
355 /* JBC/UBC module initialization */
356 FIRE_CTRL_SET(sc, FO_XBC_ERR_LOG_EN, ~0ULL);
357 FIRE_CTRL_SET(sc, FO_XBC_ERR_STAT_CLR, ~0ULL);
358 /* not enabled by OpenSolaris */
359 FIRE_CTRL_SET(sc, FO_XBC_INT_EN, ~0ULL);
360 if (sc->sc_mode == FIRE_MODE_FIRE) {
361 FIRE_CTRL_SET(sc, FIRE_JBUS_PAR_CTRL,
362 FIRE_JBUS_PAR_CTRL_P_EN);
363 FIRE_CTRL_SET(sc, FIRE_JBC_FATAL_RST_EN,
364 ((1ULL << FIRE_JBC_FATAL_RST_EN_SPARE_P_INT_SHFT) &
365 FIRE_JBC_FATAL_RST_EN_SPARE_P_INT_MASK) |
366 FIRE_JBC_FATAL_RST_EN_MB_PEA_P_INT |
367 FIRE_JBC_FATAL_RST_EN_CPE_P_INT |
368 FIRE_JBC_FATAL_RST_EN_APE_P_INT |
369 FIRE_JBC_FATAL_RST_EN_PIO_CPE_INT |
370 FIRE_JBC_FATAL_RST_EN_JTCEEW_P_INT |
371 FIRE_JBC_FATAL_RST_EN_JTCEEI_P_INT |
372 FIRE_JBC_FATAL_RST_EN_JTCEER_P_INT);
373 FIRE_CTRL_SET(sc, FIRE_JBC_CORE_BLOCK_INT_EN, ~0ULL);
376 /* TLU initialization */
377 FIRE_PCI_SET(sc, FO_PCI_TLU_OEVENT_STAT_CLR,
378 FO_PCI_TLU_OEVENT_S_MASK | FO_PCI_TLU_OEVENT_P_MASK);
379 /* not enabled by OpenSolaris */
380 FIRE_PCI_SET(sc, FO_PCI_TLU_OEVENT_INT_EN,
381 FO_PCI_TLU_OEVENT_S_MASK | FO_PCI_TLU_OEVENT_P_MASK);
382 FIRE_PCI_SET(sc, FO_PCI_TLU_UERR_STAT_CLR,
383 FO_PCI_TLU_UERR_INT_S_MASK | FO_PCI_TLU_UERR_INT_P_MASK);
384 /* not enabled by OpenSolaris */
385 FIRE_PCI_SET(sc, FO_PCI_TLU_UERR_INT_EN,
386 FO_PCI_TLU_UERR_INT_S_MASK | FO_PCI_TLU_UERR_INT_P_MASK);
387 FIRE_PCI_SET(sc, FO_PCI_TLU_CERR_STAT_CLR,
388 FO_PCI_TLU_CERR_INT_S_MASK | FO_PCI_TLU_CERR_INT_P_MASK);
389 /* not enabled by OpenSolaris */
390 FIRE_PCI_SET(sc, FO_PCI_TLU_CERR_INT_EN,
391 FO_PCI_TLU_CERR_INT_S_MASK | FO_PCI_TLU_CERR_INT_P_MASK);
392 val = FIRE_PCI_READ_8(sc, FO_PCI_TLU_CTRL) |
393 ((FO_PCI_TLU_CTRL_L0S_TIM_DFLT << FO_PCI_TLU_CTRL_L0S_TIM_SHFT) &
394 FO_PCI_TLU_CTRL_L0S_TIM_MASK) |
395 ((FO_PCI_TLU_CTRL_CFG_DFLT << FO_PCI_TLU_CTRL_CFG_SHFT) &
396 FO_PCI_TLU_CTRL_CFG_MASK);
397 if (sc->sc_mode == FIRE_MODE_OBERON)
398 val &= ~FO_PCI_TLU_CTRL_NWPR_EN;
399 val |= FO_PCI_TLU_CTRL_CFG_REMAIN_DETECT_QUIET;
400 FIRE_PCI_SET(sc, FO_PCI_TLU_CTRL, val);
401 FIRE_PCI_SET(sc, FO_PCI_TLU_DEV_CTRL, 0);
402 FIRE_PCI_SET(sc, FO_PCI_TLU_LNK_CTRL, FO_PCI_TLU_LNK_CTRL_CLK);
404 /* DLU/LPU initialization */
405 if (sc->sc_mode == FIRE_MODE_OBERON)
406 FIRE_PCI_SET(sc, FO_PCI_LPU_INT_MASK, 0);
408 FIRE_PCI_SET(sc, FO_PCI_LPU_RST, 0);
409 FIRE_PCI_SET(sc, FO_PCI_LPU_LNK_LYR_CFG,
410 FO_PCI_LPU_LNK_LYR_CFG_VC0_EN);
411 FIRE_PCI_SET(sc, FO_PCI_LPU_FLW_CTRL_UPDT_CTRL,
412 FO_PCI_LPU_FLW_CTRL_UPDT_CTRL_FC0_NP_EN |
413 FO_PCI_LPU_FLW_CTRL_UPDT_CTRL_FC0_P_EN);
414 if (sc->sc_mode == FIRE_MODE_OBERON)
415 FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_RPLY_TMR_THRS,
416 (OBERON_PCI_LPU_TXLNK_RPLY_TMR_THRS_DFLT <<
417 FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_SHFT) &
418 FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_MASK);
420 switch ((FIRE_PCI_READ_8(sc, FO_PCI_TLU_LNK_STAT) &
421 FO_PCI_TLU_LNK_STAT_WDTH_MASK) >>
422 FO_PCI_TLU_LNK_STAT_WDTH_SHFT) {
438 mps = (FIRE_PCI_READ_8(sc, FO_PCI_TLU_CTRL) &
439 FO_PCI_TLU_CTRL_CFG_MPS_MASK) >>
440 FO_PCI_TLU_CTRL_CFG_MPS_SHFT;
441 i = sizeof(fire_freq_nak_tmr_thrs) /
442 sizeof(*fire_freq_nak_tmr_thrs);
445 FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS,
446 (fire_freq_nak_tmr_thrs[mps][lw] <<
447 FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS_SHFT) &
448 FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS_MASK);
449 FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_RPLY_TMR_THRS,
450 (fire_rply_tmr_thrs[mps][lw] <<
451 FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_SHFT) &
452 FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_MASK);
453 FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_RTR_FIFO_PTR,
454 ((FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_DFLT <<
455 FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_SHFT) &
456 FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_MASK) |
457 ((FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_DFLT <<
458 FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_SHFT) &
459 FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_MASK));
460 FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG2,
461 (FO_PCI_LPU_LTSSM_CFG2_12_TO_DFLT <<
462 FO_PCI_LPU_LTSSM_CFG2_12_TO_SHFT) &
463 FO_PCI_LPU_LTSSM_CFG2_12_TO_MASK);
464 FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG3,
465 (FO_PCI_LPU_LTSSM_CFG3_2_TO_DFLT <<
466 FO_PCI_LPU_LTSSM_CFG3_2_TO_SHFT) &
467 FO_PCI_LPU_LTSSM_CFG3_2_TO_MASK);
468 FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG4,
469 ((FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_DFLT <<
470 FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_SHFT) &
471 FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_MASK) |
472 ((FO_PCI_LPU_LTSSM_CFG4_N_FTS_DFLT <<
473 FO_PCI_LPU_LTSSM_CFG4_N_FTS_SHFT) &
474 FO_PCI_LPU_LTSSM_CFG4_N_FTS_MASK));
475 FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG5, 0);
478 /* ILU initialization */
479 FIRE_PCI_SET(sc, FO_PCI_ILU_ERR_STAT_CLR, ~0ULL);
480 /* not enabled by OpenSolaris */
481 FIRE_PCI_SET(sc, FO_PCI_ILU_INT_EN, ~0ULL);
483 /* IMU initialization */
484 FIRE_PCI_SET(sc, FO_PCI_IMU_ERR_STAT_CLR, ~0ULL);
485 FIRE_PCI_SET(sc, FO_PCI_IMU_INT_EN,
486 FIRE_PCI_READ_8(sc, FO_PCI_IMU_INT_EN) &
487 ~(FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_S |
488 FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_S |
489 FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_S |
490 FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_P |
491 FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_P |
492 FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_P));
494 /* MMU initialization */
495 FIRE_PCI_SET(sc, FO_PCI_MMU_ERR_STAT_CLR,
496 FO_PCI_MMU_ERR_INT_S_MASK | FO_PCI_MMU_ERR_INT_P_MASK);
497 /* not enabled by OpenSolaris */
498 FIRE_PCI_SET(sc, FO_PCI_MMU_INT_EN,
499 FO_PCI_MMU_ERR_INT_S_MASK | FO_PCI_MMU_ERR_INT_P_MASK);
501 /* DMC initialization */
502 FIRE_PCI_SET(sc, FO_PCI_DMC_CORE_BLOCK_INT_EN, ~0ULL);
503 FIRE_PCI_SET(sc, FO_PCI_DMC_DBG_SEL_PORTA, 0);
504 FIRE_PCI_SET(sc, FO_PCI_DMC_DBG_SEL_PORTB, 0);
506 /* PEC initialization */
507 FIRE_PCI_SET(sc, FO_PCI_PEC_CORE_BLOCK_INT_EN, ~0ULL);
509 /* Establish handlers for interesting interrupts. */
510 if ((ino_bitmap & (1ULL << FO_DMC_PEC_INO)) != 0)
511 fire_set_intr(sc, 1, FO_DMC_PEC_INO, fire_dmc_pec, sc);
512 if ((ino_bitmap & (1ULL << FO_XCB_INO)) != 0)
513 fire_set_intr(sc, 0, FO_XCB_INO, fire_xcb, sc);
515 /* MSI/MSI-X support */
516 if (OF_getprop(node, "#msi", &sc->sc_msi_count,
517 sizeof(sc->sc_msi_count)) == -1)
518 panic("%s: could not determine MSI count", __func__);
519 if (OF_getprop(node, "msi-ranges", &msi_ranges,
520 sizeof(msi_ranges)) == -1)
521 sc->sc_msi_first = 0;
523 sc->sc_msi_first = msi_ranges.first;
524 if (OF_getprop(node, "msi-data-mask", &sc->sc_msi_data_mask,
525 sizeof(sc->sc_msi_data_mask)) == -1)
526 panic("%s: could not determine MSI data mask", __func__);
527 if (OF_getprop(node, "msix-data-width", &sc->sc_msix_data_width,
528 sizeof(sc->sc_msix_data_width)) > 0)
529 sc->sc_flags |= FIRE_MSIX;
530 if (OF_getprop(node, "msi-address-ranges", &msi_addr_ranges,
531 sizeof(msi_addr_ranges)) == -1)
532 panic("%s: could not determine MSI address ranges", __func__);
533 sc->sc_msi_addr32 = OFW_PCI_MSI_ADDR_RANGE_32(&msi_addr_ranges);
534 sc->sc_msi_addr64 = OFW_PCI_MSI_ADDR_RANGE_64(&msi_addr_ranges);
535 if (OF_getprop(node, "#msi-eqs", &sc->sc_msiq_count,
536 sizeof(sc->sc_msiq_count)) == -1)
537 panic("%s: could not determine MSI event queue count",
539 if (OF_getprop(node, "msi-eq-size", &sc->sc_msiq_size,
540 sizeof(sc->sc_msiq_size)) == -1)
541 panic("%s: could not determine MSI event queue size",
543 if (OF_getprop(node, "msi-eq-to-devino", &msi_eq_to_devino,
544 sizeof(msi_eq_to_devino)) == -1 &&
545 OF_getprop(node, "msi-eq-devino", &msi_eq_to_devino,
546 sizeof(msi_eq_to_devino)) == -1) {
547 sc->sc_msiq_first = 0;
548 sc->sc_msiq_ino_first = FO_EQ_FIRST_INO;
550 sc->sc_msiq_first = msi_eq_to_devino.eq_first;
551 sc->sc_msiq_ino_first = msi_eq_to_devino.devino_first;
553 if (sc->sc_msiq_ino_first < FO_EQ_FIRST_INO ||
554 sc->sc_msiq_ino_first + sc->sc_msiq_count - 1 > FO_EQ_LAST_INO)
555 panic("%s: event queues exceed INO range", __func__);
556 sc->sc_msi_bitmap = malloc(roundup2(sc->sc_msi_count, NBBY) / NBBY,
557 M_DEVBUF, M_NOWAIT | M_ZERO);
558 if (sc->sc_msi_bitmap == NULL)
559 panic("%s: could not malloc MSI bitmap", __func__);
560 sc->sc_msi_msiq_table = malloc(sc->sc_msi_count *
561 sizeof(*sc->sc_msi_msiq_table), M_DEVBUF, M_NOWAIT | M_ZERO);
562 if (sc->sc_msi_msiq_table == NULL)
563 panic("%s: could not malloc MSI-MSI event queue table",
565 sc->sc_msiq_bitmap = malloc(roundup2(sc->sc_msiq_count, NBBY) / NBBY,
566 M_DEVBUF, M_NOWAIT | M_ZERO);
567 if (sc->sc_msiq_bitmap == NULL)
568 panic("%s: could not malloc MSI event queue bitmap", __func__);
569 j = FO_EQ_RECORD_SIZE * FO_EQ_NRECORDS * sc->sc_msiq_count;
570 sc->sc_msiq = contigmalloc(j, M_DEVBUF, M_NOWAIT, 0, ~0UL,
572 if (sc->sc_msiq == NULL)
573 panic("%s: could not contigmalloc MSI event queue", __func__);
574 memset(sc->sc_msiq, 0, j);
575 FIRE_PCI_SET(sc, FO_PCI_EQ_BASE_ADDR, FO_PCI_EQ_BASE_ADDR_BYPASS |
576 (pmap_kextract((vm_offset_t)sc->sc_msiq) &
577 FO_PCI_EQ_BASE_ADDR_MASK));
578 for (i = 0; i < sc->sc_msi_count; i++) {
579 j = (i + sc->sc_msi_first) << 3;
580 FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + j,
581 FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + j) &
584 for (i = 0; i < sc->sc_msiq_count; i++) {
585 j = i + sc->sc_msiq_ino_first;
586 if ((ino_bitmap & (1ULL << j)) == 0) {
587 mtx_lock(&sc->sc_msi_mtx);
588 setbit(sc->sc_msiq_bitmap, i);
589 mtx_unlock(&sc->sc_msi_mtx);
591 fmqa = intr_vectors[INTMAP_VEC(sc->sc_ign, j)].iv_icarg;
592 mtx_init(&fmqa->fmqa_mtx, "msiq_mtx", NULL, MTX_SPIN);
594 (struct fo_msiq_record *)((caddr_t)sc->sc_msiq +
595 (FO_EQ_RECORD_SIZE * FO_EQ_NRECORDS * i));
596 j = i + sc->sc_msiq_first;
599 fmqa->fmqa_head = FO_PCI_EQ_HD_BASE + j;
600 fmqa->fmqa_tail = FO_PCI_EQ_TL_BASE + j;
601 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + j,
602 FO_PCI_EQ_CTRL_CLR_COVERR | FO_PCI_EQ_CTRL_CLR_E2I |
603 FO_PCI_EQ_CTRL_CLR_DIS);
604 FIRE_PCI_WRITE_8(sc, fmqa->fmqa_tail,
605 (0 << FO_PCI_EQ_TL_SHFT) & FO_PCI_EQ_TL_MASK);
606 FIRE_PCI_WRITE_8(sc, fmqa->fmqa_head,
607 (0 << FO_PCI_EQ_HD_SHFT) & FO_PCI_EQ_HD_MASK);
609 FIRE_PCI_SET(sc, FO_PCI_MSI_32_BIT_ADDR, sc->sc_msi_addr32 &
610 FO_PCI_MSI_32_BIT_ADDR_MASK);
611 FIRE_PCI_SET(sc, FO_PCI_MSI_64_BIT_ADDR, sc->sc_msi_addr64 &
612 FO_PCI_MSI_64_BIT_ADDR_MASK);
615 * Establish a handler for interesting PCIe messages and disable
618 mtx_lock(&sc->sc_msi_mtx);
619 for (i = 0; i < sc->sc_msiq_count; i++) {
620 if (isclr(sc->sc_msiq_bitmap, i) != 0) {
625 if (i == sc->sc_msiq_count) {
626 mtx_unlock(&sc->sc_msi_mtx);
627 panic("%s: no spare event queue for PCIe messages", __func__);
629 setbit(sc->sc_msiq_bitmap, j);
630 mtx_unlock(&sc->sc_msi_mtx);
631 i = INTMAP_VEC(sc->sc_ign, j + sc->sc_msiq_ino_first);
632 if (bus_set_resource(dev, SYS_RES_IRQ, 2, i, 1) != 0)
633 panic("%s: failed to add interrupt for PCIe messages",
635 fire_set_intr(sc, 2, INTINO(i), fire_pcie, intr_vectors[i].iv_icarg);
636 j += sc->sc_msiq_first;
638 * "Please note that setting the EQNUM field to a value larger than
639 * 35 will yield unpredictable results."
642 panic("%s: invalid queue for PCIe messages (%d)",
644 FIRE_PCI_SET(sc, FO_PCI_ERR_COR, FO_PCI_ERR_PME_V |
645 ((j << FO_PCI_ERR_PME_EQNUM_SHFT) & FO_PCI_ERR_PME_EQNUM_MASK));
646 FIRE_PCI_SET(sc, FO_PCI_ERR_NONFATAL, FO_PCI_ERR_PME_V |
647 ((j << FO_PCI_ERR_PME_EQNUM_SHFT) & FO_PCI_ERR_PME_EQNUM_MASK));
648 FIRE_PCI_SET(sc, FO_PCI_ERR_FATAL, FO_PCI_ERR_PME_V |
649 ((j << FO_PCI_ERR_PME_EQNUM_SHFT) & FO_PCI_ERR_PME_EQNUM_MASK));
650 FIRE_PCI_SET(sc, FO_PCI_PM_PME, 0);
651 FIRE_PCI_SET(sc, FO_PCI_PME_TO_ACK, 0);
652 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_SET_BASE + (j << 3),
653 FO_PCI_EQ_CTRL_SET_EN);
655 #define TC_COUNTER_MAX_MASK 0xffffffff
658 * Setup JBC/UBC performance counter 0 in bus cycle counting
659 * mode as timecounter.
661 if (device_get_unit(dev) == 0) {
662 FIRE_CTRL_SET(sc, FO_XBC_PRF_CNT0, 0);
663 FIRE_CTRL_SET(sc, FO_XBC_PRF_CNT1, 0);
664 FIRE_CTRL_SET(sc, FO_XBC_PRF_CNT_SEL,
665 (FO_XBC_PRF_CNT_NONE << FO_XBC_PRF_CNT_CNT1_SHFT) |
666 (FO_XBC_PRF_CNT_XB_CLK << FO_XBC_PRF_CNT_CNT0_SHFT));
667 tc = malloc(sizeof(*tc), M_DEVBUF, M_NOWAIT | M_ZERO);
669 panic("%s: could not malloc timecounter", __func__);
670 tc->tc_get_timecount = fire_get_timecount;
671 tc->tc_counter_mask = TC_COUNTER_MAX_MASK;
672 if (OF_getprop(OF_peer(0), "clock-frequency", &prop,
674 panic("%s: could not determine clock frequency",
676 tc->tc_frequency = prop;
677 tc->tc_name = strdup(device_get_nameunit(dev), M_DEVBUF);
680 * Due to initial problems with the JBus-driven performance
681 * counters not advancing which might be firmware dependent
682 * ensure that it actually works.
684 if (fire_get_timecount(tc) - fire_get_timecount(tc) != 0)
685 tc->tc_quality = FIRE_PERF_CNT_QLTY;
687 tc->tc_quality = -FIRE_PERF_CNT_QLTY;
692 * Set up the IOMMU. Both Fire and Oberon have one per PBM, but
693 * neither has a streaming buffer.
695 memcpy(&sc->sc_dma_methods, &iommu_dma_methods,
696 sizeof(sc->sc_dma_methods));
697 sc->sc_is.is_flags = IOMMU_FIRE | IOMMU_PRESERVE_PROM;
698 if (sc->sc_mode == FIRE_MODE_OBERON) {
699 sc->sc_is.is_flags |= IOMMU_FLUSH_CACHE;
700 sc->sc_is.is_pmaxaddr = IOMMU_MAXADDR(OBERON_IOMMU_BITS);
702 sc->sc_dma_methods.dm_dmamap_sync = fire_dmamap_sync;
703 sc->sc_is.is_pmaxaddr = IOMMU_MAXADDR(FIRE_IOMMU_BITS);
705 sc->sc_is.is_sb[0] = sc->sc_is.is_sb[1] = 0;
706 /* Punch in our copies. */
707 sc->sc_is.is_bustag = rman_get_bustag(sc->sc_mem_res[FIRE_PCI]);
708 sc->sc_is.is_bushandle = rman_get_bushandle(sc->sc_mem_res[FIRE_PCI]);
709 sc->sc_is.is_iommu = FO_PCI_MMU;
710 val = FIRE_PCI_READ_8(sc, FO_PCI_MMU + IMR_CTL);
711 iommu_init(device_get_nameunit(dev), &sc->sc_is, 7, -1, 0);
713 device_printf(dev, "FO_PCI_MMU + IMR_CTL 0x%016llx -> 0x%016llx\n",
714 (long long unsigned)val, (long long unsigned)sc->sc_is.is_cr);
716 /* Create our DMA tag. */
717 if (bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0x100000000,
718 sc->sc_is.is_pmaxaddr, ~0, NULL, NULL, sc->sc_is.is_pmaxaddr,
719 0xff, 0xffffffff, 0, NULL, NULL, &dmat) != 0)
720 panic("%s: could not create PCI DMA tag", __func__);
721 dmat->dt_cookie = &sc->sc_is;
722 dmat->dt_mt = &sc->sc_dma_methods;
724 if (ofw_pci_attach_common(dev, dmat, FO_IO_SIZE, FO_MEM_SIZE) != 0)
725 panic("%s: ofw_pci_attach_common() failed", __func__);
727 #define FIRE_SYSCTL_ADD_UINT(name, arg, desc) \
728 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), \
729 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, \
730 (name), CTLFLAG_RD, (arg), 0, (desc))
732 FIRE_SYSCTL_ADD_UINT("ilu_err", &sc->sc_stats_ilu_err,
733 "ILU unknown errors");
734 FIRE_SYSCTL_ADD_UINT("jbc_ce_async", &sc->sc_stats_jbc_ce_async,
735 "JBC correctable errors");
736 FIRE_SYSCTL_ADD_UINT("jbc_unsol_int", &sc->sc_stats_jbc_unsol_int,
737 "JBC unsolicited interrupt ACK/NACK errors");
738 FIRE_SYSCTL_ADD_UINT("jbc_unsol_rd", &sc->sc_stats_jbc_unsol_rd,
739 "JBC unsolicited read response errors");
740 FIRE_SYSCTL_ADD_UINT("mmu_err", &sc->sc_stats_mmu_err, "MMU errors");
741 FIRE_SYSCTL_ADD_UINT("tlu_ce", &sc->sc_stats_tlu_ce,
742 "DLU/TLU correctable errors");
743 FIRE_SYSCTL_ADD_UINT("tlu_oe_non_fatal",
744 &sc->sc_stats_tlu_oe_non_fatal,
745 "DLU/TLU other event non-fatal errors summary");
746 FIRE_SYSCTL_ADD_UINT("tlu_oe_rx_err", &sc->sc_stats_tlu_oe_rx_err,
747 "DLU/TLU receive other event errors");
748 FIRE_SYSCTL_ADD_UINT("tlu_oe_tx_err", &sc->sc_stats_tlu_oe_tx_err,
749 "DLU/TLU transmit other event errors");
750 FIRE_SYSCTL_ADD_UINT("ubc_dmardue", &sc->sc_stats_ubc_dmardue,
751 "UBC DMARDUE erros");
753 #undef FIRE_SYSCTL_ADD_UINT
755 device_add_child(dev, "pci", -1);
756 return (bus_generic_attach(dev));
760 fire_set_intr(struct fire_softc *sc, u_int index, u_int ino,
761 driver_filter_t handler, void *arg)
767 sc->sc_irq_res[index] = bus_alloc_resource_any(sc->sc_dev,
768 SYS_RES_IRQ, &rid, RF_ACTIVE);
769 if (sc->sc_irq_res[index] == NULL ||
770 INTINO(vec = rman_get_start(sc->sc_irq_res[index])) != ino ||
771 INTIGN(vec) != sc->sc_ign ||
772 intr_vectors[vec].iv_ic != &fire_ic ||
773 bus_setup_intr(sc->sc_dev, sc->sc_irq_res[index],
774 INTR_TYPE_MISC | INTR_BRIDGE, handler, NULL, arg,
775 &sc->sc_ihand[index]) != 0)
776 panic("%s: failed to set up interrupt %d", __func__, index);
780 fire_intr_register(struct fire_softc *sc, u_int ino)
782 struct fire_icarg *fica;
783 bus_addr_t intrclr, intrmap;
786 if (fire_get_intrmap(sc, ino, &intrmap, &intrclr) == 0)
788 fica = malloc((ino >= FO_EQ_FIRST_INO && ino <= FO_EQ_LAST_INO) ?
789 sizeof(struct fire_msiqarg) : sizeof(struct fire_icarg), M_DEVBUF,
794 fica->fica_map = intrmap;
795 fica->fica_clr = intrclr;
796 error = (intr_controller_register(INTMAP_VEC(sc->sc_ign, ino),
799 free(fica, M_DEVBUF);
804 fire_get_intrmap(struct fire_softc *sc, u_int ino, bus_addr_t *intrmapptr,
805 bus_addr_t *intrclrptr)
808 if (ino > FO_MAX_INO) {
809 device_printf(sc->sc_dev, "out of range INO %d requested\n",
815 if (intrmapptr != NULL)
816 *intrmapptr = FO_PCI_INT_MAP_BASE + ino;
817 if (intrclrptr != NULL)
818 *intrclrptr = FO_PCI_INT_CLR_BASE + ino;
826 fire_dmc_pec(void *arg)
828 struct fire_softc *sc;
830 uint64_t cestat, dmcstat, ilustat, imustat, mcstat, mmustat, mmutfar;
831 uint64_t mmutfsr, oestat, pecstat, uestat, val;
832 u_int fatal, oenfatal;
837 mtx_lock_spin(&sc->sc_pcib_mtx);
838 mcstat = FIRE_PCI_READ_8(sc, FO_PCI_MULTI_CORE_ERR_STAT);
839 if ((mcstat & FO_PCI_MULTI_CORE_ERR_STAT_DMC) != 0) {
840 dmcstat = FIRE_PCI_READ_8(sc, FO_PCI_DMC_CORE_BLOCK_ERR_STAT);
841 if ((dmcstat & FO_PCI_DMC_CORE_BLOCK_INT_EN_IMU) != 0) {
842 imustat = FIRE_PCI_READ_8(sc, FO_PCI_IMU_INT_STAT);
843 device_printf(dev, "IMU error %#llx\n",
844 (unsigned long long)imustat);
846 FO_PCI_IMU_ERR_INT_EQ_NOT_EN_P) != 0) {
848 val = FIRE_PCI_READ_8(sc,
849 FO_PCI_IMU_SCS_ERR_LOG);
850 device_printf(dev, "SCS error log %#llx\n",
851 (unsigned long long)val);
853 if ((imustat & FO_PCI_IMU_ERR_INT_EQ_OVER_P) != 0) {
855 val = FIRE_PCI_READ_8(sc,
856 FO_PCI_IMU_EQS_ERR_LOG);
857 device_printf(dev, "EQS error log %#llx\n",
858 (unsigned long long)val);
860 if ((imustat & (FO_PCI_IMU_ERR_INT_MSI_MAL_ERR_P |
861 FO_PCI_IMU_ERR_INT_MSI_PAR_ERR_P |
862 FO_PCI_IMU_ERR_INT_PMEACK_MES_NOT_EN_P |
863 FO_PCI_IMU_ERR_INT_PMPME_MES_NOT_EN_P |
864 FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_P |
865 FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_P |
866 FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_P |
867 FO_PCI_IMU_ERR_INT_MSI_NOT_EN_P)) != 0) {
869 val = FIRE_PCI_READ_8(sc,
870 FO_PCI_IMU_RDS_ERR_LOG);
871 device_printf(dev, "RDS error log %#llx\n",
872 (unsigned long long)val);
875 if ((dmcstat & FO_PCI_DMC_CORE_BLOCK_INT_EN_MMU) != 0) {
877 mmustat = FIRE_PCI_READ_8(sc, FO_PCI_MMU_INT_STAT);
878 mmutfar = FIRE_PCI_READ_8(sc,
879 FO_PCI_MMU_TRANS_FAULT_ADDR);
880 mmutfsr = FIRE_PCI_READ_8(sc,
881 FO_PCI_MMU_TRANS_FAULT_STAT);
882 if ((mmustat & (FO_PCI_MMU_ERR_INT_TBW_DPE_P |
883 FO_PCI_MMU_ERR_INT_TBW_ERR_P |
884 FO_PCI_MMU_ERR_INT_TBW_UDE_P |
885 FO_PCI_MMU_ERR_INT_TBW_DME_P |
886 FO_PCI_MMU_ERR_INT_TTC_CAE_P |
887 FIRE_PCI_MMU_ERR_INT_TTC_DPE_P |
888 OBERON_PCI_MMU_ERR_INT_TTC_DUE_P |
889 FO_PCI_MMU_ERR_INT_TRN_ERR_P)) != 0)
892 sc->sc_stats_mmu_err++;
893 FIRE_PCI_WRITE_8(sc, FO_PCI_MMU_ERR_STAT_CLR,
897 "MMU error %#llx: TFAR %#llx TFSR %#llx\n",
898 (unsigned long long)mmustat,
899 (unsigned long long)mmutfar,
900 (unsigned long long)mmutfsr);
903 if ((mcstat & FO_PCI_MULTI_CORE_ERR_STAT_PEC) != 0) {
904 pecstat = FIRE_PCI_READ_8(sc, FO_PCI_PEC_CORE_BLOCK_INT_STAT);
905 if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_UERR) != 0) {
907 uestat = FIRE_PCI_READ_8(sc,
908 FO_PCI_TLU_UERR_INT_STAT);
910 "DLU/TLU uncorrectable error %#llx\n",
911 (unsigned long long)uestat);
912 if ((uestat & (FO_PCI_TLU_UERR_INT_UR_P |
913 OBERON_PCI_TLU_UERR_INT_POIS_P |
914 FO_PCI_TLU_UERR_INT_MFP_P |
915 FO_PCI_TLU_UERR_INT_ROF_P |
916 FO_PCI_TLU_UERR_INT_UC_P |
917 FIRE_PCI_TLU_UERR_INT_PP_P |
918 OBERON_PCI_TLU_UERR_INT_POIS_P)) != 0) {
919 val = FIRE_PCI_READ_8(sc,
920 FO_PCI_TLU_RX_UERR_HDR1_LOG);
922 "receive header log %#llx\n",
923 (unsigned long long)val);
924 val = FIRE_PCI_READ_8(sc,
925 FO_PCI_TLU_RX_UERR_HDR2_LOG);
927 "receive header log 2 %#llx\n",
928 (unsigned long long)val);
930 if ((uestat & FO_PCI_TLU_UERR_INT_CTO_P) != 0) {
931 val = FIRE_PCI_READ_8(sc,
932 FO_PCI_TLU_TX_UERR_HDR1_LOG);
934 "transmit header log %#llx\n",
935 (unsigned long long)val);
936 val = FIRE_PCI_READ_8(sc,
937 FO_PCI_TLU_TX_UERR_HDR2_LOG);
939 "transmit header log 2 %#llx\n",
940 (unsigned long long)val);
942 if ((uestat & FO_PCI_TLU_UERR_INT_DLP_P) != 0) {
943 val = FIRE_PCI_READ_8(sc,
944 FO_PCI_LPU_LNK_LYR_INT_STAT);
946 "link layer interrupt and status %#llx\n",
947 (unsigned long long)val);
949 if ((uestat & FO_PCI_TLU_UERR_INT_TE_P) != 0) {
950 val = FIRE_PCI_READ_8(sc,
951 FO_PCI_LPU_PHY_LYR_INT_STAT);
953 "phy layer interrupt and status %#llx\n",
954 (unsigned long long)val);
957 if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_CERR) != 0) {
958 sc->sc_stats_tlu_ce++;
959 cestat = FIRE_PCI_READ_8(sc,
960 FO_PCI_TLU_CERR_INT_STAT);
962 "DLU/TLU correctable error %#llx\n",
963 (unsigned long long)cestat);
964 val = FIRE_PCI_READ_8(sc,
965 FO_PCI_LPU_LNK_LYR_INT_STAT);
967 "link layer interrupt and status %#llx\n",
968 (unsigned long long)val);
969 if ((cestat & FO_PCI_TLU_CERR_INT_RE_P) != 0) {
971 FO_PCI_LPU_LNK_LYR_INT_STAT, val);
972 val = FIRE_PCI_READ_8(sc,
973 FO_PCI_LPU_PHY_LYR_INT_STAT);
975 "phy layer interrupt and status %#llx\n",
976 (unsigned long long)val);
978 FIRE_PCI_WRITE_8(sc, FO_PCI_TLU_CERR_STAT_CLR,
981 if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_OEVENT) != 0) {
983 oestat = FIRE_PCI_READ_8(sc,
984 FO_PCI_TLU_OEVENT_INT_STAT);
985 device_printf(dev, "DLU/TLU other event %#llx\n",
986 (unsigned long long)oestat);
987 if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
988 FO_PCI_TLU_OEVENT_MRC_P |
989 FO_PCI_TLU_OEVENT_WUC_P |
990 FO_PCI_TLU_OEVENT_RUC_P |
991 FO_PCI_TLU_OEVENT_CRS_P)) != 0) {
992 val = FIRE_PCI_READ_8(sc,
993 FO_PCI_TLU_RX_OEVENT_HDR1_LOG);
995 "receive header log %#llx\n",
996 (unsigned long long)val);
997 val = FIRE_PCI_READ_8(sc,
998 FO_PCI_TLU_RX_OEVENT_HDR2_LOG);
1000 "receive header log 2 %#llx\n",
1001 (unsigned long long)val);
1002 if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
1003 FO_PCI_TLU_OEVENT_MRC_P |
1004 FO_PCI_TLU_OEVENT_WUC_P |
1005 FO_PCI_TLU_OEVENT_RUC_P)) != 0)
1008 sc->sc_stats_tlu_oe_rx_err++;
1012 if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
1013 FO_PCI_TLU_OEVENT_CTO_P |
1014 FO_PCI_TLU_OEVENT_WUC_P |
1015 FO_PCI_TLU_OEVENT_RUC_P)) != 0) {
1016 val = FIRE_PCI_READ_8(sc,
1017 FO_PCI_TLU_TX_OEVENT_HDR1_LOG);
1019 "transmit header log %#llx\n",
1020 (unsigned long long)val);
1021 val = FIRE_PCI_READ_8(sc,
1022 FO_PCI_TLU_TX_OEVENT_HDR2_LOG);
1024 "transmit header log 2 %#llx\n",
1025 (unsigned long long)val);
1026 if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
1027 FO_PCI_TLU_OEVENT_CTO_P |
1028 FO_PCI_TLU_OEVENT_WUC_P |
1029 FO_PCI_TLU_OEVENT_RUC_P)) != 0)
1032 sc->sc_stats_tlu_oe_tx_err++;
1036 if ((oestat & (FO_PCI_TLU_OEVENT_ERO_P |
1037 FO_PCI_TLU_OEVENT_EMP_P |
1038 FO_PCI_TLU_OEVENT_EPE_P |
1039 FIRE_PCI_TLU_OEVENT_ERP_P |
1040 OBERON_PCI_TLU_OEVENT_ERBU_P |
1041 FIRE_PCI_TLU_OEVENT_EIP_P |
1042 OBERON_PCI_TLU_OEVENT_EIUE_P)) != 0) {
1044 val = FIRE_PCI_READ_8(sc,
1045 FO_PCI_LPU_LNK_LYR_INT_STAT);
1047 "link layer interrupt and status %#llx\n",
1048 (unsigned long long)val);
1050 if ((oestat & (FO_PCI_TLU_OEVENT_IIP_P |
1051 FO_PCI_TLU_OEVENT_EDP_P |
1052 FIRE_PCI_TLU_OEVENT_EHP_P |
1053 OBERON_PCI_TLU_OEVENT_TLUEITMO_S |
1054 FO_PCI_TLU_OEVENT_ERU_P)) != 0)
1056 if ((oestat & (FO_PCI_TLU_OEVENT_NFP_P |
1057 FO_PCI_TLU_OEVENT_LWC_P |
1058 FO_PCI_TLU_OEVENT_LIN_P |
1059 FO_PCI_TLU_OEVENT_LRS_P |
1060 FO_PCI_TLU_OEVENT_LDN_P |
1061 FO_PCI_TLU_OEVENT_LUP_P)) != 0)
1063 if (oenfatal != 0) {
1064 sc->sc_stats_tlu_oe_non_fatal++;
1065 FIRE_PCI_WRITE_8(sc,
1066 FO_PCI_TLU_OEVENT_STAT_CLR, oestat);
1067 if ((oestat & FO_PCI_TLU_OEVENT_LIN_P) != 0)
1068 FIRE_PCI_WRITE_8(sc,
1069 FO_PCI_LPU_LNK_LYR_INT_STAT,
1071 FO_PCI_LPU_LNK_LYR_INT_STAT));
1074 if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_ILU) != 0) {
1075 ilustat = FIRE_PCI_READ_8(sc, FO_PCI_ILU_INT_STAT);
1076 device_printf(dev, "ILU error %#llx\n",
1077 (unsigned long long)ilustat);
1078 if ((ilustat & (FIRE_PCI_ILU_ERR_INT_IHB_PE_P |
1079 FIRE_PCI_ILU_ERR_INT_IHB_PE_P)) != 0)
1082 sc->sc_stats_ilu_err++;
1083 FIRE_PCI_WRITE_8(sc, FO_PCI_ILU_INT_STAT,
1088 mtx_unlock_spin(&sc->sc_pcib_mtx);
1090 panic("%s: fatal DMC/PEC error",
1091 device_get_nameunit(sc->sc_dev));
1092 return (FILTER_HANDLED);
1098 struct fire_softc *sc;
1100 uint64_t errstat, intstat, val;
1106 mtx_lock_spin(&sc->sc_pcib_mtx);
1107 if (sc->sc_mode == FIRE_MODE_OBERON) {
1108 intstat = FIRE_CTRL_READ_8(sc, FO_XBC_INT_STAT);
1109 device_printf(dev, "UBC error: interrupt status %#llx\n",
1110 (unsigned long long)intstat);
1111 if ((intstat & ~(OBERON_UBC_ERR_INT_DMARDUEB_P |
1112 OBERON_UBC_ERR_INT_DMARDUEA_P)) != 0)
1115 sc->sc_stats_ubc_dmardue++;
1117 mtx_unlock_spin(&sc->sc_pcib_mtx);
1118 panic("%s: fatal UBC core block error",
1119 device_get_nameunit(sc->sc_dev));
1121 FIRE_CTRL_SET(sc, FO_XBC_ERR_STAT_CLR, ~0ULL);
1122 mtx_unlock_spin(&sc->sc_pcib_mtx);
1125 errstat = FIRE_CTRL_READ_8(sc, FIRE_JBC_CORE_BLOCK_ERR_STAT);
1126 if ((errstat & (FIRE_JBC_CORE_BLOCK_ERR_STAT_MERGE |
1127 FIRE_JBC_CORE_BLOCK_ERR_STAT_JBCINT |
1128 FIRE_JBC_CORE_BLOCK_ERR_STAT_DMCINT)) != 0) {
1129 intstat = FIRE_CTRL_READ_8(sc, FO_XBC_INT_STAT);
1130 device_printf(dev, "JBC interrupt status %#llx\n",
1131 (unsigned long long)intstat);
1132 if ((intstat & FIRE_JBC_ERR_INT_EBUS_TO_P) != 0) {
1133 val = FIRE_CTRL_READ_8(sc,
1134 FIRE_JBC_CSR_ERR_LOG);
1135 device_printf(dev, "CSR error log %#llx\n",
1136 (unsigned long long)val);
1138 if ((intstat & (FIRE_JBC_ERR_INT_UNSOL_RD_P |
1139 FIRE_JBC_ERR_INT_UNSOL_INT_P)) != 0) {
1141 FIRE_JBC_ERR_INT_UNSOL_RD_P) != 0)
1142 sc->sc_stats_jbc_unsol_rd++;
1144 FIRE_JBC_ERR_INT_UNSOL_INT_P) != 0)
1145 sc->sc_stats_jbc_unsol_int++;
1146 val = FIRE_CTRL_READ_8(sc,
1147 FIRE_DMCINT_IDC_ERR_LOG);
1149 "DMCINT IDC error log %#llx\n",
1150 (unsigned long long)val);
1152 if ((intstat & (FIRE_JBC_ERR_INT_MB_PER_P |
1153 FIRE_JBC_ERR_INT_MB_PEW_P)) != 0) {
1155 val = FIRE_CTRL_READ_8(sc,
1156 FIRE_MERGE_TRANS_ERR_LOG);
1158 "merge transaction error log %#llx\n",
1159 (unsigned long long)val);
1161 if ((intstat & FIRE_JBC_ERR_INT_IJP_P) != 0) {
1163 val = FIRE_CTRL_READ_8(sc,
1164 FIRE_JBCINT_OTRANS_ERR_LOG);
1166 "JBCINT out transaction error log "
1167 "%#llx\n", (unsigned long long)val);
1168 val = FIRE_CTRL_READ_8(sc,
1169 FIRE_JBCINT_OTRANS_ERR_LOG2);
1171 "JBCINT out transaction error log 2 "
1172 "%#llx\n", (unsigned long long)val);
1174 if ((intstat & (FIRE_JBC_ERR_INT_UE_ASYN_P |
1175 FIRE_JBC_ERR_INT_CE_ASYN_P |
1176 FIRE_JBC_ERR_INT_JTE_P | FIRE_JBC_ERR_INT_JBE_P |
1177 FIRE_JBC_ERR_INT_JUE_P |
1178 FIRE_JBC_ERR_INT_ICISE_P |
1179 FIRE_JBC_ERR_INT_WR_DPE_P |
1180 FIRE_JBC_ERR_INT_RD_DPE_P |
1181 FIRE_JBC_ERR_INT_ILL_BMW_P |
1182 FIRE_JBC_ERR_INT_ILL_BMR_P |
1183 FIRE_JBC_ERR_INT_BJC_P)) != 0) {
1184 if ((intstat & (FIRE_JBC_ERR_INT_UE_ASYN_P |
1185 FIRE_JBC_ERR_INT_JTE_P |
1186 FIRE_JBC_ERR_INT_JBE_P |
1187 FIRE_JBC_ERR_INT_JUE_P |
1188 FIRE_JBC_ERR_INT_ICISE_P |
1189 FIRE_JBC_ERR_INT_WR_DPE_P |
1190 FIRE_JBC_ERR_INT_RD_DPE_P |
1191 FIRE_JBC_ERR_INT_ILL_BMW_P |
1192 FIRE_JBC_ERR_INT_ILL_BMR_P |
1193 FIRE_JBC_ERR_INT_BJC_P)) != 0)
1196 sc->sc_stats_jbc_ce_async++;
1197 val = FIRE_CTRL_READ_8(sc,
1198 FIRE_JBCINT_ITRANS_ERR_LOG);
1200 "JBCINT in transaction error log %#llx\n",
1201 (unsigned long long)val);
1202 val = FIRE_CTRL_READ_8(sc,
1203 FIRE_JBCINT_ITRANS_ERR_LOG2);
1205 "JBCINT in transaction error log 2 "
1206 "%#llx\n", (unsigned long long)val);
1208 if ((intstat & (FIRE_JBC_ERR_INT_PIO_UNMAP_RD_P |
1209 FIRE_JBC_ERR_INT_ILL_ACC_RD_P |
1210 FIRE_JBC_ERR_INT_PIO_UNMAP_P |
1211 FIRE_JBC_ERR_INT_PIO_DPE_P |
1212 FIRE_JBC_ERR_INT_PIO_CPE_P |
1213 FIRE_JBC_ERR_INT_ILL_ACC_P)) != 0) {
1215 val = FIRE_CTRL_READ_8(sc,
1216 FIRE_JBC_CSR_ERR_LOG);
1218 "DMCINT ODCD error log %#llx\n",
1219 (unsigned long long)val);
1221 if ((intstat & (FIRE_JBC_ERR_INT_MB_PEA_P |
1222 FIRE_JBC_ERR_INT_CPE_P | FIRE_JBC_ERR_INT_APE_P |
1223 FIRE_JBC_ERR_INT_PIO_CPE_P |
1224 FIRE_JBC_ERR_INT_JTCEEW_P |
1225 FIRE_JBC_ERR_INT_JTCEEI_P |
1226 FIRE_JBC_ERR_INT_JTCEER_P)) != 0) {
1228 val = FIRE_CTRL_READ_8(sc,
1229 FIRE_FATAL_ERR_LOG);
1230 device_printf(dev, "fatal error log %#llx\n",
1231 (unsigned long long)val);
1232 val = FIRE_CTRL_READ_8(sc,
1233 FIRE_FATAL_ERR_LOG2);
1234 device_printf(dev, "fatal error log 2 "
1235 "%#llx\n", (unsigned long long)val);
1238 mtx_unlock_spin(&sc->sc_pcib_mtx);
1239 panic("%s: fatal JBC core block error",
1240 device_get_nameunit(sc->sc_dev));
1242 FIRE_CTRL_SET(sc, FO_XBC_ERR_STAT_CLR, ~0ULL);
1243 mtx_unlock_spin(&sc->sc_pcib_mtx);
1246 mtx_unlock_spin(&sc->sc_pcib_mtx);
1247 panic("%s: unknown JCB core block error status %#llx",
1248 device_get_nameunit(sc->sc_dev),
1249 (unsigned long long)errstat);
1252 return (FILTER_HANDLED);
1256 fire_pcie(void *arg)
1258 struct fire_msiqarg *fmqa;
1259 struct fire_softc *sc;
1260 struct fo_msiq_record *qrec;
1263 u_int head, msg, msiq;
1266 sc = fmqa->fmqa_fica.fica_sc;
1268 msiq = fmqa->fmqa_msiq;
1269 mtx_lock_spin(&fmqa->fmqa_mtx);
1270 head = (FIRE_PCI_READ_8(sc, fmqa->fmqa_head) & FO_PCI_EQ_HD_MASK) >>
1272 qrec = &fmqa->fmqa_base[head];
1273 word0 = qrec->fomqr_word0;
1275 KASSERT((word0 & FO_MQR_WORD0_FMT_TYPE_MSG) != 0,
1276 ("%s: received non-PCIe message in event queue %d "
1277 "(word0 %#llx)", device_get_nameunit(dev), msiq,
1278 (unsigned long long)word0));
1279 msg = (word0 & FO_MQR_WORD0_DATA0_MASK) >>
1280 FO_MQR_WORD0_DATA0_SHFT;
1282 #define PCIE_MSG_CODE_ERR_COR 0x30
1283 #define PCIE_MSG_CODE_ERR_NONFATAL 0x31
1284 #define PCIE_MSG_CODE_ERR_FATAL 0x33
1286 if (msg == PCIE_MSG_CODE_ERR_COR)
1287 device_printf(dev, "correctable PCIe error\n");
1288 else if (msg == PCIE_MSG_CODE_ERR_NONFATAL ||
1289 msg == PCIE_MSG_CODE_ERR_FATAL)
1290 panic("%s: %sfatal PCIe error",
1291 device_get_nameunit(dev),
1292 msg == PCIE_MSG_CODE_ERR_NONFATAL ? "non-" : "");
1294 panic("%s: received unknown PCIe message %#x",
1295 device_get_nameunit(dev), msg);
1296 qrec->fomqr_word0 &= ~FO_MQR_WORD0_FMT_TYPE_MASK;
1297 head = (head + 1) % sc->sc_msiq_size;
1298 qrec = &fmqa->fmqa_base[head];
1299 word0 = qrec->fomqr_word0;
1300 if (__predict_true((word0 & FO_MQR_WORD0_FMT_TYPE_MASK) == 0))
1303 FIRE_PCI_WRITE_8(sc, fmqa->fmqa_head, (head & FO_PCI_EQ_HD_MASK) <<
1305 if ((FIRE_PCI_READ_8(sc, fmqa->fmqa_tail) &
1306 FO_PCI_EQ_TL_OVERR) != 0) {
1307 device_printf(dev, "event queue %d overflow\n", msiq);
1309 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq,
1310 FIRE_PCI_READ_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq) |
1311 FO_PCI_EQ_CTRL_CLR_COVERR);
1313 mtx_unlock_spin(&fmqa->fmqa_mtx);
1314 return (FILTER_HANDLED);
1318 fire_maxslots(device_t dev)
1325 fire_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg,
1329 return (ofw_pci_read_config_common(dev, PCIE_REGMAX, FO_CONF_OFF(bus,
1330 slot, func, reg), bus, slot, func, reg, width));
1334 fire_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg,
1335 uint32_t val, int width)
1338 ofw_pci_write_config_common(dev, PCIE_REGMAX, FO_CONF_OFF(bus, slot,
1339 func, reg), bus, slot, func, reg, val, width);
1343 fire_route_interrupt(device_t bridge, device_t dev, int pin)
1345 ofw_pci_intr_t mintr;
1347 mintr = ofw_pci_route_interrupt_common(bridge, dev, pin);
1348 if (!PCI_INTERRUPT_VALID(mintr))
1349 device_printf(bridge,
1350 "could not route pin %d for device %d.%d\n",
1351 pin, pci_get_slot(dev), pci_get_function(dev));
1356 fire_dmamap_sync(bus_dma_tag_t dt __unused, bus_dmamap_t map,
1357 bus_dmasync_op_t op)
1360 if ((map->dm_flags & DMF_LOADED) == 0)
1363 if ((op & BUS_DMASYNC_POSTREAD) != 0)
1364 ofw_pci_dmamap_sync_stst_order_common();
1365 else if ((op & BUS_DMASYNC_PREWRITE) != 0)
1370 fire_intr_enable(void *arg)
1372 struct intr_vector *iv;
1373 struct fire_icarg *fica;
1374 struct fire_softc *sc;
1380 fica = iv->iv_icarg;
1383 if (sc->sc_mode == FIRE_MODE_OBERON)
1384 mr |= (iv->iv_mid << OBERON_PCI_IMAP_T_DESTID_SHFT) &
1385 OBERON_PCI_IMAP_T_DESTID_MASK;
1387 mr |= (iv->iv_mid << FIRE_PCI_IMAP_T_JPID_SHFT) &
1388 FIRE_PCI_IMAP_T_JPID_MASK;
1390 * Given that all mondos for the same target are required to use the
1391 * same interrupt controller we just use the CPU ID for indexing the
1395 for (i = 0; i < mp_ncpus; ++i) {
1397 if (pc == NULL || iv->iv_mid != pc->pc_mid)
1399 ctrl = pc->pc_cpuid % 4;
1402 mr |= (1ULL << ctrl) << FO_PCI_IMAP_INT_CTRL_NUM_SHFT &
1403 FO_PCI_IMAP_INT_CTRL_NUM_MASK;
1404 FIRE_PCI_WRITE_8(sc, fica->fica_map, mr);
1408 fire_intr_disable(void *arg)
1410 struct intr_vector *iv;
1411 struct fire_icarg *fica;
1412 struct fire_softc *sc;
1415 fica = iv->iv_icarg;
1417 FIRE_PCI_WRITE_8(sc, fica->fica_map,
1418 FIRE_PCI_READ_8(sc, fica->fica_map) & ~FO_PCI_IMAP_V);
1422 fire_intr_assign(void *arg)
1424 struct intr_vector *iv;
1425 struct fire_icarg *fica;
1426 struct fire_softc *sc;
1430 fica = iv->iv_icarg;
1432 mr = FIRE_PCI_READ_8(sc, fica->fica_map);
1433 if ((mr & FO_PCI_IMAP_V) != 0) {
1434 FIRE_PCI_WRITE_8(sc, fica->fica_map, mr & ~FO_PCI_IMAP_V);
1435 FIRE_PCI_BARRIER(sc, fica->fica_map, 8,
1436 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1438 while (FIRE_PCI_READ_8(sc, fica->fica_clr) != INTCLR_IDLE)
1440 if ((mr & FO_PCI_IMAP_V) != 0)
1441 fire_intr_enable(arg);
1445 fire_intr_clear(void *arg)
1447 struct intr_vector *iv;
1448 struct fire_icarg *fica;
1451 fica = iv->iv_icarg;
1452 FIRE_PCI_WRITE_8(fica->fica_sc, fica->fica_clr, INTCLR_IDLE);
1456 * Given that the event queue implementation matches our current MD and MI
1457 * interrupt frameworks like square pegs fit into round holes we are generous
1458 * and use one event queue per MSI for now, which limits us to 35 MSIs/MSI-Xs
1459 * per Host-PCIe-bridge (we use one event queue for the PCIe error messages).
1460 * This seems tolerable as long as most devices just use one MSI/MSI-X anyway.
1461 * Adding knowledge about MSIs/MSI-Xs to the MD interrupt code should allow us
1462 * to decouple the 1:1 mapping at the cost of no longer being able to bind
1463 * MSIs/MSI-Xs to specific CPUs as we currently have no reliable way to
1464 * quiesce a device while we move its MSIs/MSI-Xs to another event queue.
1468 fire_alloc_msi(device_t dev, device_t child, int count, int maxcount __unused,
1471 struct fire_softc *sc;
1472 u_int i, j, msiqrun;
1474 if (powerof2(count) == 0 || count > 32)
1477 sc = device_get_softc(dev);
1478 mtx_lock(&sc->sc_msi_mtx);
1480 for (i = 0; i < sc->sc_msiq_count; i++) {
1481 for (j = i; j < i + count; j++) {
1482 if (isclr(sc->sc_msiq_bitmap, j) == 0)
1485 if (j == i + count) {
1490 if (i == sc->sc_msiq_count) {
1491 mtx_unlock(&sc->sc_msi_mtx);
1494 for (i = 0; i + count < sc->sc_msi_count; i += count) {
1495 for (j = i; j < i + count; j++)
1496 if (isclr(sc->sc_msi_bitmap, j) == 0)
1498 if (j == i + count) {
1499 for (j = 0; j < count; j++) {
1500 setbit(sc->sc_msiq_bitmap, msiqrun + j);
1501 setbit(sc->sc_msi_bitmap, i + j);
1502 sc->sc_msi_msiq_table[i + j] = msiqrun + j;
1503 irqs[j] = sc->sc_msi_first + i + j;
1505 mtx_unlock(&sc->sc_msi_mtx);
1509 mtx_unlock(&sc->sc_msi_mtx);
1514 fire_release_msi(device_t dev, device_t child, int count, int *irqs)
1516 struct fire_softc *sc;
1519 sc = device_get_softc(dev);
1520 mtx_lock(&sc->sc_msi_mtx);
1521 for (i = 0; i < count; i++) {
1522 clrbit(sc->sc_msiq_bitmap,
1523 sc->sc_msi_msiq_table[irqs[i] - sc->sc_msi_first]);
1524 clrbit(sc->sc_msi_bitmap, irqs[i] - sc->sc_msi_first);
1526 mtx_unlock(&sc->sc_msi_mtx);
1531 fire_alloc_msix(device_t dev, device_t child, int *irq)
1533 struct fire_softc *sc;
1536 sc = device_get_softc(dev);
1537 if ((sc->sc_flags & FIRE_MSIX) == 0)
1539 mtx_lock(&sc->sc_msi_mtx);
1541 for (i = 0; i < sc->sc_msiq_count; i++) {
1542 if (isclr(sc->sc_msiq_bitmap, i) != 0) {
1547 if (i == sc->sc_msiq_count) {
1548 mtx_unlock(&sc->sc_msi_mtx);
1551 for (i = sc->sc_msi_count - 1; i >= 0; i--) {
1552 if (isclr(sc->sc_msi_bitmap, i) != 0) {
1553 setbit(sc->sc_msiq_bitmap, msiq);
1554 setbit(sc->sc_msi_bitmap, i);
1555 sc->sc_msi_msiq_table[i] = msiq;
1556 *irq = sc->sc_msi_first + i;
1557 mtx_unlock(&sc->sc_msi_mtx);
1561 mtx_unlock(&sc->sc_msi_mtx);
1566 fire_release_msix(device_t dev, device_t child, int irq)
1568 struct fire_softc *sc;
1570 sc = device_get_softc(dev);
1571 if ((sc->sc_flags & FIRE_MSIX) == 0)
1573 mtx_lock(&sc->sc_msi_mtx);
1574 clrbit(sc->sc_msiq_bitmap,
1575 sc->sc_msi_msiq_table[irq - sc->sc_msi_first]);
1576 clrbit(sc->sc_msi_bitmap, irq - sc->sc_msi_first);
1577 mtx_unlock(&sc->sc_msi_mtx);
1582 fire_map_msi(device_t dev, device_t child, int irq, uint64_t *addr,
1585 struct fire_softc *sc;
1586 struct pci_devinfo *dinfo;
1588 sc = device_get_softc(dev);
1589 dinfo = device_get_ivars(child);
1590 if (dinfo->cfg.msi.msi_alloc > 0) {
1591 if ((irq & ~sc->sc_msi_data_mask) != 0) {
1592 device_printf(dev, "invalid MSI 0x%x\n", irq);
1596 if ((sc->sc_flags & FIRE_MSIX) == 0)
1598 if (fls(irq) > sc->sc_msix_data_width) {
1599 device_printf(dev, "invalid MSI-X 0x%x\n", irq);
1603 if (dinfo->cfg.msi.msi_alloc > 0 &&
1604 (dinfo->cfg.msi.msi_ctrl & PCIM_MSICTRL_64BIT) == 0)
1605 *addr = sc->sc_msi_addr32;
1607 *addr = sc->sc_msi_addr64;
1613 fire_msiq_handler(void *cookie)
1615 struct intr_vector *iv;
1616 struct fire_msiqarg *fmqa;
1619 fmqa = iv->iv_icarg;
1621 * Note that since fire_intr_clear() will clear the event queue
1622 * interrupt after the handler associated with the MSI [sic] has
1623 * been executed we have to protect the access to the event queue as
1624 * otherwise nested event queue interrupts cause corruption of the
1625 * event queue on MP machines. Obviously especially when abandoning
1626 * the 1:1 mapping it would be better to not clear the event queue
1627 * interrupt after each handler invocation but only once when the
1628 * outstanding MSIs have been processed but unfortunately that
1629 * doesn't work well and leads to interrupt storms with controllers/
1630 * drivers which don't mask interrupts while the handler is executed.
1631 * Maybe delaying clearing the MSI until after the handler has been
1632 * executed could be used to work around this but that's not the
1633 * intended usage and might in turn cause lost MSIs.
1635 mtx_lock_spin(&fmqa->fmqa_mtx);
1636 fire_msiq_common(iv, fmqa);
1637 mtx_unlock_spin(&fmqa->fmqa_mtx);
1641 fire_msiq_filter(void *cookie)
1643 struct intr_vector *iv;
1644 struct fire_msiqarg *fmqa;
1647 fmqa = iv->iv_icarg;
1649 * For filters we don't use fire_intr_clear() since it would clear
1650 * the event queue interrupt while we're still processing the event
1651 * queue as filters and associated post-filter handler are executed
1652 * directly, which in turn would lead to lost MSIs. So we clear the
1653 * event queue interrupt only once after processing the event queue.
1654 * Given that this still guarantees the filters to not be executed
1655 * concurrently and no other CPU can clear the event queue interrupt
1656 * while the event queue is still processed, we don't even need to
1657 * interlock the access to the event queue in this case.
1660 fire_msiq_common(iv, fmqa);
1661 FIRE_PCI_WRITE_8(fmqa->fmqa_fica.fica_sc, fmqa->fmqa_fica.fica_clr,
1667 fire_msiq_common(struct intr_vector *iv, struct fire_msiqarg *fmqa)
1669 struct fire_softc *sc;
1670 struct fo_msiq_record *qrec;
1673 u_int head, msi, msiq;
1675 sc = fmqa->fmqa_fica.fica_sc;
1677 msiq = fmqa->fmqa_msiq;
1678 head = (FIRE_PCI_READ_8(sc, fmqa->fmqa_head) & FO_PCI_EQ_HD_MASK) >>
1680 qrec = &fmqa->fmqa_base[head];
1681 word0 = qrec->fomqr_word0;
1683 if (__predict_false((word0 & FO_MQR_WORD0_FMT_TYPE_MASK) == 0))
1685 KASSERT((word0 & FO_MQR_WORD0_FMT_TYPE_MSI64) != 0 ||
1686 (word0 & FO_MQR_WORD0_FMT_TYPE_MSI32) != 0,
1687 ("%s: received non-MSI/MSI-X message in event queue %d "
1688 "(word0 %#llx)", device_get_nameunit(dev), msiq,
1689 (unsigned long long)word0));
1690 msi = (word0 & FO_MQR_WORD0_DATA0_MASK) >>
1691 FO_MQR_WORD0_DATA0_SHFT;
1693 * Sanity check the MSI/MSI-X as long as we use a 1:1 mapping.
1695 KASSERT(msi == fmqa->fmqa_msi,
1696 ("%s: received non-matching MSI/MSI-X in event queue %d "
1697 "(%d versus %d)", device_get_nameunit(dev), msiq, msi,
1699 FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_CLR_BASE + (msi << 3),
1700 FO_PCI_MSI_CLR_EQWR_N);
1701 if (__predict_false(intr_event_handle(iv->iv_event,
1703 printf("stray MSI/MSI-X in event queue %d\n", msiq);
1704 qrec->fomqr_word0 &= ~FO_MQR_WORD0_FMT_TYPE_MASK;
1705 head = (head + 1) % sc->sc_msiq_size;
1706 qrec = &fmqa->fmqa_base[head];
1707 word0 = qrec->fomqr_word0;
1709 FIRE_PCI_WRITE_8(sc, fmqa->fmqa_head, (head & FO_PCI_EQ_HD_MASK) <<
1711 if (__predict_false((FIRE_PCI_READ_8(sc, fmqa->fmqa_tail) &
1712 FO_PCI_EQ_TL_OVERR) != 0)) {
1713 device_printf(dev, "event queue %d overflow\n", msiq);
1715 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq,
1716 FIRE_PCI_READ_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq) |
1717 FO_PCI_EQ_CTRL_CLR_COVERR);
1722 fire_setup_intr(device_t dev, device_t child, struct resource *ires,
1723 int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg,
1726 struct fire_softc *sc;
1727 struct fire_msiqarg *fmqa;
1732 sc = device_get_softc(dev);
1734 * XXX this assumes that a device only has one INTx, while in fact
1735 * Cassini+ and Saturn can use all four the firmware has assigned
1736 * to them, but so does pci(4).
1738 if (rman_get_rid(ires) != 0) {
1739 msi = rman_get_start(ires);
1740 msiq = sc->sc_msi_msiq_table[msi - sc->sc_msi_first];
1741 vec = INTMAP_VEC(sc->sc_ign, sc->sc_msiq_ino_first + msiq);
1742 msiq += sc->sc_msiq_first;
1743 if (intr_vectors[vec].iv_ic != &fire_ic) {
1745 "invalid interrupt controller for vector 0x%lx\n",
1750 * The MD interrupt code needs the vector rather than the MSI.
1752 rman_set_start(ires, vec);
1753 rman_set_end(ires, vec);
1754 error = bus_generic_setup_intr(dev, child, ires, flags, filt,
1755 intr, arg, cookiep);
1756 rman_set_start(ires, msi);
1757 rman_set_end(ires, msi);
1760 fmqa = intr_vectors[vec].iv_icarg;
1762 * XXX inject our event queue handler.
1765 intr_vectors[vec].iv_func = fire_msiq_filter;
1766 intr_vectors[vec].iv_ic = &fire_msiqc_filter;
1768 * Ensure the event queue interrupt is cleared, it
1769 * might have triggered before. Given we supply NULL
1770 * as ic_clear, inthand_add() won't do this for us.
1772 FIRE_PCI_WRITE_8(sc, fmqa->fmqa_fica.fica_clr,
1775 intr_vectors[vec].iv_func = fire_msiq_handler;
1776 /* Record the MSI/MSI-X as long as we we use a 1:1 mapping. */
1777 fmqa->fmqa_msi = msi;
1778 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_SET_BASE + (msiq << 3),
1779 FO_PCI_EQ_CTRL_SET_EN);
1781 FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + msi,
1782 (FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + msi) &
1783 ~FO_PCI_MSI_MAP_EQNUM_MASK) |
1784 ((msiq << FO_PCI_MSI_MAP_EQNUM_SHFT) &
1785 FO_PCI_MSI_MAP_EQNUM_MASK));
1786 FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_CLR_BASE + msi,
1787 FO_PCI_MSI_CLR_EQWR_N);
1788 FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + msi,
1789 FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + msi) |
1795 * Make sure the vector is fully specified and we registered
1796 * our interrupt controller for it.
1798 vec = rman_get_start(ires);
1799 if (INTIGN(vec) != sc->sc_ign) {
1800 device_printf(dev, "invalid interrupt vector 0x%lx\n", vec);
1803 if (intr_vectors[vec].iv_ic != &fire_ic) {
1805 "invalid interrupt controller for vector 0x%lx\n", vec);
1808 return (bus_generic_setup_intr(dev, child, ires, flags, filt, intr,
1813 fire_teardown_intr(device_t dev, device_t child, struct resource *ires,
1816 struct fire_softc *sc;
1821 sc = device_get_softc(dev);
1822 if (rman_get_rid(ires) != 0) {
1823 msi = rman_get_start(ires);
1824 msiq = sc->sc_msi_msiq_table[msi - sc->sc_msi_first];
1825 vec = INTMAP_VEC(sc->sc_ign, msiq + sc->sc_msiq_ino_first);
1826 msiq += sc->sc_msiq_first;
1828 FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + msi,
1829 FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + msi) &
1832 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq,
1833 FO_PCI_EQ_CTRL_CLR_COVERR | FO_PCI_EQ_CTRL_CLR_E2I |
1834 FO_PCI_EQ_CTRL_CLR_DIS);
1835 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_TL_BASE + msiq,
1836 (0 << FO_PCI_EQ_TL_SHFT) & FO_PCI_EQ_TL_MASK);
1837 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_HD_BASE + msiq,
1838 (0 << FO_PCI_EQ_HD_SHFT) & FO_PCI_EQ_HD_MASK);
1839 intr_vectors[vec].iv_ic = &fire_ic;
1841 * The MD interrupt code needs the vector rather than the MSI.
1843 rman_set_start(ires, vec);
1844 rman_set_end(ires, vec);
1845 error = bus_generic_teardown_intr(dev, child, ires, cookie);
1847 rman_set_start(ires, msi);
1848 rman_set_end(ires, msi);
1851 return (bus_generic_teardown_intr(dev, child, ires, cookie));
1854 static struct resource *
1855 fire_alloc_resource(device_t bus, device_t child, int type, int *rid,
1856 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
1858 struct fire_softc *sc;
1860 if (type == SYS_RES_IRQ && *rid == 0) {
1861 sc = device_get_softc(bus);
1862 start = end = INTMAP_VEC(sc->sc_ign, end);
1864 return (ofw_pci_alloc_resource(bus, child, type, rid, start, end,
1869 fire_get_timecount(struct timecounter *tc)
1871 struct fire_softc *sc;
1874 return (FIRE_CTRL_READ_8(sc, FO_XBC_PRF_CNT0) & TC_COUNTER_MAX_MASK);