2 * Copyright (c) 1999, 2000 Matthew R. Green
3 * Copyright (c) 2001 - 2003 by Thomas Moestl <tmm@FreeBSD.org>
4 * Copyright (c) 2009 by Marius Strobl <marius@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * from: NetBSD: psycho.c,v 1.39 2001/10/07 20:30:41 eeh Exp
31 * from: FreeBSD: psycho.c 183152 2008-09-18 19:45:22Z marius
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
38 * Driver for `Fire' JBus to PCI Express and `Oberon' Uranus to PCI Express
43 #include "opt_ofw_pci.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
48 #include <sys/interrupt.h>
49 #include <sys/kernel.h>
51 #include <sys/malloc.h>
52 #include <sys/module.h>
53 #include <sys/mutex.h>
54 #include <sys/pciio.h>
58 #include <sys/sysctl.h>
59 #include <sys/timetc.h>
61 #include <dev/ofw/ofw_bus.h>
62 #include <dev/ofw/ofw_pci.h>
63 #include <dev/ofw/openfirm.h>
68 #include <machine/bus.h>
69 #include <machine/bus_common.h>
70 #include <machine/bus_private.h>
71 #include <machine/fsr.h>
72 #include <machine/iommureg.h>
73 #include <machine/iommuvar.h>
74 #include <machine/pmap.h>
75 #include <machine/resource.h>
77 #include <dev/pci/pcireg.h>
78 #include <dev/pci/pcivar.h>
80 #include <sparc64/pci/ofw_pci.h>
81 #include <sparc64/pci/firereg.h>
82 #include <sparc64/pci/firevar.h>
88 static bus_space_tag_t fire_alloc_bus_tag(struct fire_softc *sc, int type);
89 static const struct fire_desc *fire_get_desc(device_t dev);
90 static void fire_dmamap_sync(bus_dma_tag_t dt __unused, bus_dmamap_t map,
92 static int fire_get_intrmap(struct fire_softc *sc, u_int ino,
93 bus_addr_t *intrmapptr, bus_addr_t *intrclrptr);
94 static void fire_intr_assign(void *arg);
95 static void fire_intr_clear(void *arg);
96 static void fire_intr_disable(void *arg);
97 static void fire_intr_enable(void *arg);
98 static int fire_intr_register(struct fire_softc *sc, u_int ino);
99 static inline void fire_msiq_common(struct intr_vector *iv,
100 struct fire_msiqarg *fmqa);
101 static void fire_msiq_filter(void *cookie);
102 static void fire_msiq_handler(void *cookie);
103 static void fire_set_intr(struct fire_softc *sc, u_int index, u_int ino,
104 driver_filter_t handler, void *arg);
105 static timecounter_get_t fire_get_timecount;
107 /* Interrupt handlers */
108 static driver_filter_t fire_dmc_pec;
109 static driver_filter_t fire_pcie;
110 static driver_filter_t fire_xcb;
115 static bus_activate_resource_t fire_activate_resource;
116 static pcib_alloc_msi_t fire_alloc_msi;
117 static pcib_alloc_msix_t fire_alloc_msix;
118 static bus_alloc_resource_t fire_alloc_resource;
119 static device_attach_t fire_attach;
120 static bus_deactivate_resource_t fire_deactivate_resource;
121 static bus_get_dma_tag_t fire_get_dma_tag;
122 static ofw_bus_get_node_t fire_get_node;
123 static pcib_map_msi_t fire_map_msi;
124 static pcib_maxslots_t fire_maxslots;
125 static device_probe_t fire_probe;
126 static pcib_read_config_t fire_read_config;
127 static bus_read_ivar_t fire_read_ivar;
128 static pcib_release_msi_t fire_release_msi;
129 static pcib_release_msix_t fire_release_msix;
130 static bus_release_resource_t fire_release_resource;
131 static pcib_route_interrupt_t fire_route_interrupt;
132 static bus_setup_intr_t fire_setup_intr;
133 static bus_teardown_intr_t fire_teardown_intr;
134 static pcib_write_config_t fire_write_config;
136 static device_method_t fire_methods[] = {
137 /* Device interface */
138 DEVMETHOD(device_probe, fire_probe),
139 DEVMETHOD(device_attach, fire_attach),
140 DEVMETHOD(device_shutdown, bus_generic_shutdown),
141 DEVMETHOD(device_suspend, bus_generic_suspend),
142 DEVMETHOD(device_resume, bus_generic_resume),
145 DEVMETHOD(bus_print_child, bus_generic_print_child),
146 DEVMETHOD(bus_read_ivar, fire_read_ivar),
147 DEVMETHOD(bus_setup_intr, fire_setup_intr),
148 DEVMETHOD(bus_teardown_intr, fire_teardown_intr),
149 DEVMETHOD(bus_alloc_resource, fire_alloc_resource),
150 DEVMETHOD(bus_activate_resource, fire_activate_resource),
151 DEVMETHOD(bus_deactivate_resource, fire_deactivate_resource),
152 DEVMETHOD(bus_release_resource, fire_release_resource),
153 DEVMETHOD(bus_get_dma_tag, fire_get_dma_tag),
156 DEVMETHOD(pcib_maxslots, fire_maxslots),
157 DEVMETHOD(pcib_read_config, fire_read_config),
158 DEVMETHOD(pcib_write_config, fire_write_config),
159 DEVMETHOD(pcib_route_interrupt, fire_route_interrupt),
160 DEVMETHOD(pcib_alloc_msi, fire_alloc_msi),
161 DEVMETHOD(pcib_release_msi, fire_release_msi),
162 DEVMETHOD(pcib_alloc_msix, fire_alloc_msix),
163 DEVMETHOD(pcib_release_msix, fire_release_msix),
164 DEVMETHOD(pcib_map_msi, fire_map_msi),
166 /* ofw_bus interface */
167 DEVMETHOD(ofw_bus_get_node, fire_get_node),
172 static devclass_t fire_devclass;
174 DEFINE_CLASS_0(pcib, fire_driver, fire_methods, sizeof(struct fire_softc));
175 EARLY_DRIVER_MODULE(fire, nexus, fire_driver, fire_devclass, 0, 0,
177 MODULE_DEPEND(fire, nexus, 1, 1, 1);
179 static const struct intr_controller fire_ic = {
187 struct fire_softc *fica_sc;
192 static const struct intr_controller fire_msiqc_filter = {
199 struct fire_msiqarg {
200 struct fire_icarg fmqa_fica;
202 struct fo_msiq_record *fmqa_base;
209 #define FIRE_PERF_CNT_QLTY 100
211 #define FIRE_SPC_BARRIER(spc, sc, offs, len, flags) \
212 bus_barrier((sc)->sc_mem_res[(spc)], (offs), (len), (flags))
213 #define FIRE_SPC_READ_8(spc, sc, offs) \
214 bus_read_8((sc)->sc_mem_res[(spc)], (offs))
215 #define FIRE_SPC_WRITE_8(spc, sc, offs, v) \
216 bus_write_8((sc)->sc_mem_res[(spc)], (offs), (v))
219 #define FIRE_SPC_SET(spc, sc, offs, reg, v) \
220 FIRE_SPC_WRITE_8((spc), (sc), (offs), (v))
222 #define FIRE_SPC_SET(spc, sc, offs, reg, v) do { \
223 device_printf((sc)->sc_dev, reg " 0x%016llx -> 0x%016llx\n", \
224 (unsigned long long)FIRE_SPC_READ_8((spc), (sc), (offs)), \
225 (unsigned long long)(v)); \
226 FIRE_SPC_WRITE_8((spc), (sc), (offs), (v)); \
230 #define FIRE_PCI_BARRIER(sc, offs, len, flags) \
231 FIRE_SPC_BARRIER(FIRE_PCI, (sc), (offs), len, flags)
232 #define FIRE_PCI_READ_8(sc, offs) \
233 FIRE_SPC_READ_8(FIRE_PCI, (sc), (offs))
234 #define FIRE_PCI_WRITE_8(sc, offs, v) \
235 FIRE_SPC_WRITE_8(FIRE_PCI, (sc), (offs), (v))
236 #define FIRE_CTRL_BARRIER(sc, offs, len, flags) \
237 FIRE_SPC_BARRIER(FIRE_CTRL, (sc), (offs), len, flags)
238 #define FIRE_CTRL_READ_8(sc, offs) \
239 FIRE_SPC_READ_8(FIRE_CTRL, (sc), (offs))
240 #define FIRE_CTRL_WRITE_8(sc, offs, v) \
241 FIRE_SPC_WRITE_8(FIRE_CTRL, (sc), (offs), (v))
243 #define FIRE_PCI_SET(sc, offs, v) \
244 FIRE_SPC_SET(FIRE_PCI, (sc), (offs), # offs, (v))
245 #define FIRE_CTRL_SET(sc, offs, v) \
246 FIRE_SPC_SET(FIRE_CTRL, (sc), (offs), # offs, (v))
249 const char *fd_string;
254 static const struct fire_desc const fire_compats[] = {
255 { "pciex108e,80f0", FIRE_MODE_FIRE, "Fire" },
257 { "pciex108e,80f8", FIRE_MODE_OBERON, "Oberon" },
262 static const struct fire_desc *
263 fire_get_desc(device_t dev)
265 const struct fire_desc *desc;
268 compat = ofw_bus_get_compat(dev);
271 for (desc = fire_compats; desc->fd_string != NULL; desc++)
272 if (strcmp(desc->fd_string, compat) == 0)
278 fire_probe(device_t dev)
282 dtype = ofw_bus_get_type(dev);
283 if (dtype != NULL && strcmp(dtype, OFW_TYPE_PCIE) == 0 &&
284 fire_get_desc(dev) != NULL) {
285 device_set_desc(dev, "Sun Host-PCIe bridge");
286 return (BUS_PROBE_GENERIC);
292 fire_attach(device_t dev)
294 struct fire_softc *sc;
295 const struct fire_desc *desc;
296 struct ofw_pci_msi_ranges msi_ranges;
297 struct ofw_pci_msi_addr_ranges msi_addr_ranges;
298 struct ofw_pci_msi_eq_to_devino msi_eq_to_devino;
299 struct fire_msiqarg *fmqa;
300 struct timecounter *tc;
301 struct ofw_pci_ranges *range;
302 uint64_t ino_bitmap, val;
304 uint32_t prop, prop_array[2];
309 sc = device_get_softc(dev);
310 node = ofw_bus_get_node(dev);
311 desc = fire_get_desc(dev);
312 mode = desc->fd_mode;
319 mtx_init(&sc->sc_msi_mtx, "msi_mtx", NULL, MTX_DEF);
320 mtx_init(&sc->sc_pcib_mtx, "pcib_mtx", NULL, MTX_SPIN);
323 * Fire and Oberon have two register banks:
324 * (0) per-PBM PCI Express configuration and status registers
325 * (1) (shared) Fire/Oberon controller configuration and status
328 for (i = 0; i < FIRE_NREG; i++) {
330 sc->sc_mem_res[i] = bus_alloc_resource_any(dev,
331 SYS_RES_MEMORY, &j, RF_ACTIVE);
332 if (sc->sc_mem_res[i] == NULL)
333 panic("%s: could not allocate register bank %d",
337 if (OF_getprop(node, "portid", &sc->sc_ign, sizeof(sc->sc_ign)) == -1)
338 panic("%s: could not determine IGN", __func__);
339 if (OF_getprop(node, "module-revision#", &prop, sizeof(prop)) == -1)
340 panic("%s: could not determine revision", __func__);
342 device_printf(dev, "%s, module-revision %d, IGN %#x\n",
343 desc->fd_name, prop, sc->sc_ign);
346 * Hunt through all the interrupt mapping regs and register
347 * the interrupt controller for our interrupt vectors. We do
348 * this early in order to be able to catch stray interrupts.
350 i = OF_getprop(node, "ino-bitmap", (void *)prop_array,
353 panic("%s: could not get ino-bitmap", __func__);
354 ino_bitmap = ((uint64_t)prop_array[1] << 32) | prop_array[0];
355 for (i = 0; i <= FO_MAX_INO; i++) {
356 if ((ino_bitmap & (1ULL << i)) == 0)
358 j = fire_intr_register(sc, i);
360 device_printf(dev, "could not register interrupt "
361 "controller for INO %d (%d)\n", i, j);
364 /* JBC/UBC module initialization */
365 FIRE_CTRL_SET(sc, FO_XBC_ERR_LOG_EN, ~0ULL);
366 FIRE_CTRL_SET(sc, FO_XBC_ERR_STAT_CLR, ~0ULL);
367 /* not enabled by OpenSolaris */
368 FIRE_CTRL_SET(sc, FO_XBC_INT_EN, ~0ULL);
369 if (sc->sc_mode == FIRE_MODE_FIRE) {
370 FIRE_CTRL_SET(sc, FIRE_JBUS_PAR_CTRL,
371 FIRE_JBUS_PAR_CTRL_P_EN);
372 FIRE_CTRL_SET(sc, FIRE_JBC_FATAL_RST_EN,
373 ((1ULL << FIRE_JBC_FATAL_RST_EN_SPARE_P_INT_SHFT) &
374 FIRE_JBC_FATAL_RST_EN_SPARE_P_INT_MASK) |
375 FIRE_JBC_FATAL_RST_EN_MB_PEA_P_INT |
376 FIRE_JBC_FATAL_RST_EN_CPE_P_INT |
377 FIRE_JBC_FATAL_RST_EN_APE_P_INT |
378 FIRE_JBC_FATAL_RST_EN_PIO_CPE_INT |
379 FIRE_JBC_FATAL_RST_EN_JTCEEW_P_INT |
380 FIRE_JBC_FATAL_RST_EN_JTCEEI_P_INT |
381 FIRE_JBC_FATAL_RST_EN_JTCEER_P_INT);
382 FIRE_CTRL_SET(sc, FIRE_JBC_CORE_BLOCK_INT_EN, ~0ULL);
385 /* TLU initialization */
386 FIRE_PCI_SET(sc, FO_PCI_TLU_OEVENT_STAT_CLR,
387 FO_PCI_TLU_OEVENT_S_MASK | FO_PCI_TLU_OEVENT_P_MASK);
388 /* not enabled by OpenSolaris */
389 FIRE_PCI_SET(sc, FO_PCI_TLU_OEVENT_INT_EN,
390 FO_PCI_TLU_OEVENT_S_MASK | FO_PCI_TLU_OEVENT_P_MASK);
391 FIRE_PCI_SET(sc, FO_PCI_TLU_UERR_STAT_CLR,
392 FO_PCI_TLU_UERR_INT_S_MASK | FO_PCI_TLU_UERR_INT_P_MASK);
393 /* not enabled by OpenSolaris */
394 FIRE_PCI_SET(sc, FO_PCI_TLU_UERR_INT_EN,
395 FO_PCI_TLU_UERR_INT_S_MASK | FO_PCI_TLU_UERR_INT_P_MASK);
396 FIRE_PCI_SET(sc, FO_PCI_TLU_CERR_STAT_CLR,
397 FO_PCI_TLU_CERR_INT_S_MASK | FO_PCI_TLU_CERR_INT_P_MASK);
398 /* not enabled by OpenSolaris */
399 FIRE_PCI_SET(sc, FO_PCI_TLU_CERR_INT_EN,
400 FO_PCI_TLU_CERR_INT_S_MASK | FO_PCI_TLU_CERR_INT_P_MASK);
401 val = FIRE_PCI_READ_8(sc, FO_PCI_TLU_CTRL) |
402 ((FO_PCI_TLU_CTRL_L0S_TIM_DFLT << FO_PCI_TLU_CTRL_L0S_TIM_SHFT) &
403 FO_PCI_TLU_CTRL_L0S_TIM_MASK) |
404 ((FO_PCI_TLU_CTRL_CFG_DFLT << FO_PCI_TLU_CTRL_CFG_SHFT) &
405 FO_PCI_TLU_CTRL_CFG_MASK);
406 if (sc->sc_mode == FIRE_MODE_OBERON)
407 val &= ~FO_PCI_TLU_CTRL_NWPR_EN;
408 val |= FO_PCI_TLU_CTRL_CFG_REMAIN_DETECT_QUIET;
409 FIRE_PCI_SET(sc, FO_PCI_TLU_CTRL, val);
410 FIRE_PCI_SET(sc, FO_PCI_TLU_DEV_CTRL, 0);
411 FIRE_PCI_SET(sc, FO_PCI_TLU_LNK_CTRL, FO_PCI_TLU_LNK_CTRL_CLK);
413 /* DLU/LPU initialization */
414 if (sc->sc_mode == FIRE_MODE_OBERON)
415 FIRE_PCI_SET(sc, FO_PCI_LPU_INT_MASK, 0);
417 FIRE_PCI_SET(sc, FO_PCI_LPU_RST, 0);
418 FIRE_PCI_SET(sc, FO_PCI_LPU_LNK_LYR_CFG,
419 FO_PCI_LPU_LNK_LYR_CFG_VC0_EN);
420 FIRE_PCI_SET(sc, FO_PCI_LPU_FLW_CTRL_UPDT_CTRL,
421 FO_PCI_LPU_FLW_CTRL_UPDT_CTRL_FC0_NP_EN |
422 FO_PCI_LPU_FLW_CTRL_UPDT_CTRL_FC0_P_EN);
423 if (sc->sc_mode == FIRE_MODE_OBERON)
424 FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_RPLY_TMR_THRS,
425 (OBERON_PCI_LPU_TXLNK_RPLY_TMR_THRS_DFLT <<
426 FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_SHFT) &
427 FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_MASK);
429 switch ((FIRE_PCI_READ_8(sc, FO_PCI_TLU_LNK_STAT) &
430 FO_PCI_TLU_LNK_STAT_WDTH_MASK) >>
431 FO_PCI_TLU_LNK_STAT_WDTH_SHFT) {
447 mps = (FIRE_PCI_READ_8(sc, FO_PCI_TLU_CTRL) &
448 FO_PCI_TLU_CTRL_CFG_MASK) >> FO_PCI_TLU_CTRL_CFG_SHFT;
449 i = sizeof(fire_freq_nak_tmr_thrs) /
450 sizeof(*fire_freq_nak_tmr_thrs);
453 FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS,
454 (fire_freq_nak_tmr_thrs[mps][lw] <<
455 FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS_SHFT) &
456 FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS_MASK);
457 FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_RPLY_TMR_THRS,
458 (fire_rply_tmr_thrs[mps][lw] <<
459 FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_SHFT) &
460 FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_MASK);
461 FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_RTR_FIFO_PTR,
462 ((FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_DFLT <<
463 FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_SHFT) &
464 FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_MASK) |
465 ((FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_DFLT <<
466 FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_SHFT) &
467 FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_MASK));
468 FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG2,
469 (FO_PCI_LPU_LTSSM_CFG2_12_TO_DFLT <<
470 FO_PCI_LPU_LTSSM_CFG2_12_TO_SHFT) &
471 FO_PCI_LPU_LTSSM_CFG2_12_TO_MASK);
472 FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG3,
473 (FO_PCI_LPU_LTSSM_CFG3_2_TO_DFLT <<
474 FO_PCI_LPU_LTSSM_CFG3_2_TO_SHFT) &
475 FO_PCI_LPU_LTSSM_CFG3_2_TO_MASK);
476 FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG4,
477 ((FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_DFLT <<
478 FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_SHFT) &
479 FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_MASK) |
480 ((FO_PCI_LPU_LTSSM_CFG4_N_FTS_DFLT <<
481 FO_PCI_LPU_LTSSM_CFG4_N_FTS_SHFT) &
482 FO_PCI_LPU_LTSSM_CFG4_N_FTS_MASK));
483 FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG5, 0);
486 /* ILU initialization */
487 FIRE_PCI_SET(sc, FO_PCI_ILU_ERR_STAT_CLR, ~0ULL);
488 /* not enabled by OpenSolaris */
489 FIRE_PCI_SET(sc, FO_PCI_ILU_INT_EN, ~0ULL);
491 /* IMU initialization */
492 FIRE_PCI_SET(sc, FO_PCI_IMU_ERR_STAT_CLR, ~0ULL);
493 FIRE_PCI_SET(sc, FO_PCI_IMU_INT_EN,
494 FIRE_PCI_READ_8(sc, FO_PCI_IMU_INT_EN) &
495 ~(FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_S |
496 FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_S |
497 FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_S |
498 FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_P |
499 FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_P |
500 FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_P));
502 /* MMU initialization */
503 FIRE_PCI_SET(sc, FO_PCI_MMU_ERR_STAT_CLR,
504 FO_PCI_MMU_ERR_INT_S_MASK | FO_PCI_MMU_ERR_INT_P_MASK);
505 /* not enabled by OpenSolaris */
506 FIRE_PCI_SET(sc, FO_PCI_MMU_INT_EN,
507 FO_PCI_MMU_ERR_INT_S_MASK | FO_PCI_MMU_ERR_INT_P_MASK);
509 /* DMC initialization */
510 FIRE_PCI_SET(sc, FO_PCI_DMC_CORE_BLOCK_INT_EN, ~0ULL);
511 FIRE_PCI_SET(sc, FO_PCI_DMC_DBG_SEL_PORTA, 0);
512 FIRE_PCI_SET(sc, FO_PCI_DMC_DBG_SEL_PORTB, 0);
514 /* PEC initialization */
515 FIRE_PCI_SET(sc, FO_PCI_PEC_CORE_BLOCK_INT_EN, ~0ULL);
517 /* Establish handlers for interesting interrupts. */
518 if ((ino_bitmap & (1ULL << FO_DMC_PEC_INO)) != 0)
519 fire_set_intr(sc, 1, FO_DMC_PEC_INO, fire_dmc_pec, sc);
520 if ((ino_bitmap & (1ULL << FO_XCB_INO)) != 0)
521 fire_set_intr(sc, 0, FO_XCB_INO, fire_xcb, sc);
523 /* MSI/MSI-X support */
524 if (OF_getprop(node, "#msi", &sc->sc_msi_count,
525 sizeof(sc->sc_msi_count)) == -1)
526 panic("%s: could not determine MSI count", __func__);
527 if (OF_getprop(node, "msi-ranges", &msi_ranges,
528 sizeof(msi_ranges)) == -1)
529 sc->sc_msi_first = 0;
531 sc->sc_msi_first = msi_ranges.first;
532 if (OF_getprop(node, "msi-data-mask", &sc->sc_msi_data_mask,
533 sizeof(sc->sc_msi_data_mask)) == -1)
534 panic("%s: could not determine MSI data mask", __func__);
535 if (OF_getprop(node, "msix-data-width", &sc->sc_msix_data_width,
536 sizeof(sc->sc_msix_data_width)) > 0)
537 sc->sc_flags |= FIRE_MSIX;
538 if (OF_getprop(node, "msi-address-ranges", &msi_addr_ranges,
539 sizeof(msi_addr_ranges)) == -1)
540 panic("%s: could not determine MSI address ranges", __func__);
541 sc->sc_msi_addr32 = OFW_PCI_MSI_ADDR_RANGE_32(&msi_addr_ranges);
542 sc->sc_msi_addr64 = OFW_PCI_MSI_ADDR_RANGE_64(&msi_addr_ranges);
543 if (OF_getprop(node, "#msi-eqs", &sc->sc_msiq_count,
544 sizeof(sc->sc_msiq_count)) == -1)
545 panic("%s: could not determine MSI event queue count",
547 if (OF_getprop(node, "msi-eq-size", &sc->sc_msiq_size,
548 sizeof(sc->sc_msiq_size)) == -1)
549 panic("%s: could not determine MSI event queue size",
551 if (OF_getprop(node, "msi-eq-to-devino", &msi_eq_to_devino,
552 sizeof(msi_eq_to_devino)) == -1 &&
553 OF_getprop(node, "msi-eq-devino", &msi_eq_to_devino,
554 sizeof(msi_eq_to_devino)) == -1) {
555 sc->sc_msiq_first = 0;
556 sc->sc_msiq_ino_first = FO_EQ_FIRST_INO;
558 sc->sc_msiq_first = msi_eq_to_devino.eq_first;
559 sc->sc_msiq_ino_first = msi_eq_to_devino.devino_first;
561 if (sc->sc_msiq_ino_first < FO_EQ_FIRST_INO ||
562 sc->sc_msiq_ino_first + sc->sc_msiq_count - 1 > FO_EQ_LAST_INO)
563 panic("%s: event queues exceed INO range", __func__);
564 sc->sc_msi_bitmap = malloc(roundup2(sc->sc_msi_count, NBBY) / NBBY,
565 M_DEVBUF, M_NOWAIT | M_ZERO);
566 if (sc->sc_msi_bitmap == NULL)
567 panic("%s: could not malloc MSI bitmap", __func__);
568 sc->sc_msi_msiq_table = malloc(sc->sc_msi_count *
569 sizeof(*sc->sc_msi_msiq_table), M_DEVBUF, M_NOWAIT | M_ZERO);
570 if (sc->sc_msi_msiq_table == NULL)
571 panic("%s: could not malloc MSI-MSI event queue table",
573 sc->sc_msiq_bitmap = malloc(roundup2(sc->sc_msiq_count, NBBY) / NBBY,
574 M_DEVBUF, M_NOWAIT | M_ZERO);
575 if (sc->sc_msiq_bitmap == NULL)
576 panic("%s: could not malloc MSI event queue bitmap", __func__);
577 j = FO_EQ_RECORD_SIZE * FO_EQ_NRECORDS * sc->sc_msiq_count;
578 sc->sc_msiq = contigmalloc(j, M_DEVBUF, M_NOWAIT, 0, ~0UL,
580 if (sc->sc_msiq == NULL)
581 panic("%s: could not contigmalloc MSI event queue", __func__);
582 memset(sc->sc_msiq, 0, j);
583 FIRE_PCI_SET(sc, FO_PCI_EQ_BASE_ADDR, FO_PCI_EQ_BASE_ADDR_BYPASS |
584 (pmap_kextract((vm_offset_t)sc->sc_msiq) &
585 FO_PCI_EQ_BASE_ADDR_MASK));
586 for (i = 0; i < sc->sc_msi_count; i++) {
587 j = (i + sc->sc_msi_first) << 3;
588 FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + j,
589 FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + j) &
592 for (i = 0; i < sc->sc_msiq_count; i++) {
593 j = i + sc->sc_msiq_ino_first;
594 if ((ino_bitmap & (1ULL << j)) == 0) {
595 mtx_lock(&sc->sc_msi_mtx);
596 setbit(sc->sc_msiq_bitmap, i);
597 mtx_unlock(&sc->sc_msi_mtx);
599 fmqa = intr_vectors[INTMAP_VEC(sc->sc_ign, j)].iv_icarg;
600 mtx_init(&fmqa->fmqa_mtx, "msiq_mtx", NULL, MTX_SPIN);
602 (struct fo_msiq_record *)((caddr_t)sc->sc_msiq +
603 (FO_EQ_RECORD_SIZE * FO_EQ_NRECORDS * i));
604 j = i + sc->sc_msiq_first;
607 fmqa->fmqa_head = FO_PCI_EQ_HD_BASE + j;
608 fmqa->fmqa_tail = FO_PCI_EQ_TL_BASE + j;
609 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + j,
610 FO_PCI_EQ_CTRL_CLR_COVERR | FO_PCI_EQ_CTRL_CLR_E2I |
611 FO_PCI_EQ_CTRL_CLR_DIS);
612 FIRE_PCI_WRITE_8(sc, fmqa->fmqa_tail,
613 (0 << FO_PCI_EQ_TL_SHFT) & FO_PCI_EQ_TL_MASK);
614 FIRE_PCI_WRITE_8(sc, fmqa->fmqa_head,
615 (0 << FO_PCI_EQ_HD_SHFT) & FO_PCI_EQ_HD_MASK);
617 FIRE_PCI_SET(sc, FO_PCI_MSI_32_BIT_ADDR, sc->sc_msi_addr32 &
618 FO_PCI_MSI_32_BIT_ADDR_MASK);
619 FIRE_PCI_SET(sc, FO_PCI_MSI_64_BIT_ADDR, sc->sc_msi_addr64 &
620 FO_PCI_MSI_64_BIT_ADDR_MASK);
623 * Establish a handler for interesting PCIe messages and disable
626 mtx_lock(&sc->sc_msi_mtx);
627 for (i = 0; i < sc->sc_msiq_count; i++) {
628 if (isclr(sc->sc_msiq_bitmap, i) != 0) {
633 if (i == sc->sc_msiq_count) {
634 mtx_unlock(&sc->sc_msi_mtx);
635 panic("%s: no spare event queue for PCIe messages", __func__);
637 setbit(sc->sc_msiq_bitmap, j);
638 mtx_unlock(&sc->sc_msi_mtx);
639 i = INTMAP_VEC(sc->sc_ign, j + sc->sc_msiq_ino_first);
640 if (bus_set_resource(dev, SYS_RES_IRQ, 2, i, 1) != 0)
641 panic("%s: failed to add interrupt for PCIe messages",
643 fire_set_intr(sc, 2, INTINO(i), fire_pcie, intr_vectors[i].iv_icarg);
644 j += sc->sc_msiq_first;
646 * "Please note that setting the EQNUM field to a value larger than
647 * 35 will yield unpredictable results."
650 panic("%s: invalid queue for PCIe messages (%d)",
652 FIRE_PCI_SET(sc, FO_PCI_ERR_COR, FO_PCI_ERR_PME_V |
653 ((j << FO_PCI_ERR_PME_EQNUM_SHFT) & FO_PCI_ERR_PME_EQNUM_MASK));
654 FIRE_PCI_SET(sc, FO_PCI_ERR_NONFATAL, FO_PCI_ERR_PME_V |
655 ((j << FO_PCI_ERR_PME_EQNUM_SHFT) & FO_PCI_ERR_PME_EQNUM_MASK));
656 FIRE_PCI_SET(sc, FO_PCI_ERR_FATAL, FO_PCI_ERR_PME_V |
657 ((j << FO_PCI_ERR_PME_EQNUM_SHFT) & FO_PCI_ERR_PME_EQNUM_MASK));
658 FIRE_PCI_SET(sc, FO_PCI_PM_PME, 0);
659 FIRE_PCI_SET(sc, FO_PCI_PME_TO_ACK, 0);
660 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_SET_BASE + (j << 3),
661 FO_PCI_EQ_CTRL_SET_EN);
663 #define TC_COUNTER_MAX_MASK 0xffffffff
666 * Setup JBC/UBC performance counter 0 in bus cycle counting
667 * mode as timecounter. Unfortunately, at least with Fire all
668 * JBus-driven performance counters just don't advance in bus
669 * cycle counting mode.
671 if (device_get_unit(dev) == 0) {
672 FIRE_CTRL_SET(sc, FO_XBC_PRF_CNT0, 0);
673 FIRE_CTRL_SET(sc, FO_XBC_PRF_CNT1, 0);
674 FIRE_CTRL_SET(sc, FO_XBC_PRF_CNT_SEL,
675 (FO_XBC_PRF_CNT_NONE << FO_XBC_PRF_CNT_CNT1_SHFT) |
676 (FO_XBC_PRF_CNT_XB_CLK << FO_XBC_PRF_CNT_CNT0_SHFT));
678 device_printf(dev, "FO_XBC_PRF_CNT0 0x%016llx\n",
679 (long long unsigned)FIRE_CTRL_READ_8(sc,
681 device_printf(dev, "FO_XBC_PRF_CNT0 0x%016llx\n",
682 (long long unsigned)FIRE_CTRL_READ_8(sc,
685 tc = malloc(sizeof(*tc), M_DEVBUF, M_NOWAIT | M_ZERO);
687 panic("%s: could not malloc timecounter", __func__);
688 tc->tc_get_timecount = fire_get_timecount;
689 tc->tc_poll_pps = NULL;
690 tc->tc_counter_mask = TC_COUNTER_MAX_MASK;
691 if (OF_getprop(OF_peer(0), "clock-frequency", &prop,
693 panic("%s: could not determine clock frequency",
695 tc->tc_frequency = prop;
696 tc->tc_name = strdup(device_get_nameunit(dev), M_DEVBUF);
697 tc->tc_quality = -FIRE_PERF_CNT_QLTY;
703 * Set up the IOMMU. Both Fire and Oberon have one per PBM, but
704 * neither has a streaming buffer.
706 memcpy(&sc->sc_dma_methods, &iommu_dma_methods,
707 sizeof(sc->sc_dma_methods));
708 sc->sc_is.is_flags = IOMMU_FIRE | IOMMU_PRESERVE_PROM;
709 if (sc->sc_mode == FIRE_MODE_OBERON) {
710 sc->sc_is.is_flags |= IOMMU_FLUSH_CACHE;
711 sc->sc_is.is_pmaxaddr = IOMMU_MAXADDR(OBERON_IOMMU_BITS);
713 sc->sc_dma_methods.dm_dmamap_sync = fire_dmamap_sync;
714 sc->sc_is.is_pmaxaddr = IOMMU_MAXADDR(FIRE_IOMMU_BITS);
716 sc->sc_is.is_sb[0] = sc->sc_is.is_sb[1] = 0;
717 /* Punch in our copies. */
718 sc->sc_is.is_bustag = rman_get_bustag(sc->sc_mem_res[FIRE_PCI]);
719 sc->sc_is.is_bushandle = rman_get_bushandle(sc->sc_mem_res[FIRE_PCI]);
720 sc->sc_is.is_iommu = FO_PCI_MMU;
721 val = FIRE_PCI_READ_8(sc, FO_PCI_MMU + IMR_CTL);
722 iommu_init(device_get_nameunit(sc->sc_dev), &sc->sc_is, 7, -1, 0);
724 device_printf(dev, "FO_PCI_MMU + IMR_CTL 0x%016llx -> 0x%016llx\n",
725 (long long unsigned)val, (long long unsigned)sc->sc_is.is_cr);
728 /* Initialize memory and I/O rmans. */
729 sc->sc_pci_io_rman.rm_type = RMAN_ARRAY;
730 sc->sc_pci_io_rman.rm_descr = "Fire PCI I/O Ports";
731 if (rman_init(&sc->sc_pci_io_rman) != 0 ||
732 rman_manage_region(&sc->sc_pci_io_rman, 0, FO_IO_SIZE) != 0)
733 panic("%s: failed to set up I/O rman", __func__);
734 sc->sc_pci_mem_rman.rm_type = RMAN_ARRAY;
735 sc->sc_pci_mem_rman.rm_descr = "Fire PCI Memory";
736 if (rman_init(&sc->sc_pci_mem_rman) != 0 ||
737 rman_manage_region(&sc->sc_pci_mem_rman, 0, FO_MEM_SIZE) != 0)
738 panic("%s: failed to set up memory rman", __func__);
740 i = OF_getprop_alloc(node, "ranges", sizeof(*range), (void **)&range);
742 * Make sure that the expected ranges are present. The
743 * OFW_PCI_CS_MEM64 one is not currently used though.
745 if (i != FIRE_NRANGE)
746 panic("%s: unsupported number of ranges", __func__);
748 * Find the addresses of the various bus spaces.
749 * There should not be multiple ones of one kind.
750 * The physical start addresses of the ranges are the configuration,
751 * memory and I/O handles.
753 for (i = 0; i < FIRE_NRANGE; i++) {
754 j = OFW_PCI_RANGE_CS(&range[i]);
755 if (sc->sc_pci_bh[j] != 0)
756 panic("%s: duplicate range for space %d",
758 sc->sc_pci_bh[j] = OFW_PCI_RANGE_PHYS(&range[i]);
760 free(range, M_OFWPROP);
762 /* Allocate our tags. */
763 sc->sc_pci_memt = fire_alloc_bus_tag(sc, PCI_MEMORY_BUS_SPACE);
764 sc->sc_pci_iot = fire_alloc_bus_tag(sc, PCI_IO_BUS_SPACE);
765 sc->sc_pci_cfgt = fire_alloc_bus_tag(sc, PCI_CONFIG_BUS_SPACE);
766 if (bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0,
767 sc->sc_is.is_pmaxaddr, ~0, NULL, NULL, sc->sc_is.is_pmaxaddr,
768 0xff, 0xffffffff, 0, NULL, NULL, &sc->sc_pci_dmat) != 0)
769 panic("%s: bus_dma_tag_create failed", __func__);
770 /* Customize the tag. */
771 sc->sc_pci_dmat->dt_cookie = &sc->sc_is;
772 sc->sc_pci_dmat->dt_mt = &sc->sc_dma_methods;
775 * Get the bus range from the firmware.
776 * NB: Neither Fire nor Oberon support PCI bus reenumeration.
778 i = OF_getprop(node, "bus-range", (void *)prop_array,
781 panic("%s: could not get bus-range", __func__);
782 if (i != sizeof(prop_array))
783 panic("%s: broken bus-range (%d)", __func__, i);
784 sc->sc_pci_secbus = prop_array[0];
785 sc->sc_pci_subbus = prop_array[1];
786 if (bootverbose != 0)
787 device_printf(dev, "bus range %u to %u; PCI bus %d\n",
788 sc->sc_pci_secbus, sc->sc_pci_subbus, sc->sc_pci_secbus);
790 ofw_bus_setup_iinfo(node, &sc->sc_pci_iinfo, sizeof(ofw_pci_intr_t));
792 #define FIRE_SYSCTL_ADD_UINT(name, arg, desc) \
793 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), \
794 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, \
795 (name), CTLFLAG_RD, (arg), 0, (desc))
797 FIRE_SYSCTL_ADD_UINT("ilu_err", &sc->sc_stats_ilu_err,
798 "ILU unknown errors");
799 FIRE_SYSCTL_ADD_UINT("jbc_ce_async", &sc->sc_stats_jbc_ce_async,
800 "JBC correctable errors");
801 FIRE_SYSCTL_ADD_UINT("jbc_unsol_int", &sc->sc_stats_jbc_unsol_int,
802 "JBC unsolicited interrupt ACK/NACK errors");
803 FIRE_SYSCTL_ADD_UINT("jbc_unsol_rd", &sc->sc_stats_jbc_unsol_rd,
804 "JBC unsolicited read response errors");
805 FIRE_SYSCTL_ADD_UINT("mmu_err", &sc->sc_stats_mmu_err, "MMU errors");
806 FIRE_SYSCTL_ADD_UINT("tlu_ce", &sc->sc_stats_tlu_ce,
807 "DLU/TLU correctable errors");
808 FIRE_SYSCTL_ADD_UINT("tlu_oe_non_fatal",
809 &sc->sc_stats_tlu_oe_non_fatal,
810 "DLU/TLU other event non-fatal errors summary"),
811 FIRE_SYSCTL_ADD_UINT("tlu_oe_rx_err", &sc->sc_stats_tlu_oe_rx_err,
812 "DLU/TLU receive other event errors"),
813 FIRE_SYSCTL_ADD_UINT("tlu_oe_tx_err", &sc->sc_stats_tlu_oe_tx_err,
814 "DLU/TLU transmit other event errors"),
815 FIRE_SYSCTL_ADD_UINT("ubc_dmardue", &sc->sc_stats_ubc_dmardue,
816 "UBC DMARDUE erros");
818 #undef FIRE_SYSCTL_ADD_UINT
820 device_add_child(dev, "pci", -1);
821 return (bus_generic_attach(dev));
825 fire_set_intr(struct fire_softc *sc, u_int index, u_int ino,
826 driver_filter_t handler, void *arg)
832 sc->sc_irq_res[index] = bus_alloc_resource_any(sc->sc_dev,
833 SYS_RES_IRQ, &rid, RF_ACTIVE);
834 if (sc->sc_irq_res[index] == NULL ||
835 INTINO(vec = rman_get_start(sc->sc_irq_res[index])) != ino ||
836 INTIGN(vec) != sc->sc_ign ||
837 intr_vectors[vec].iv_ic != &fire_ic ||
838 bus_setup_intr(sc->sc_dev, sc->sc_irq_res[index],
839 INTR_TYPE_MISC | INTR_FAST, handler, NULL, arg,
840 &sc->sc_ihand[index]) != 0)
841 panic("%s: failed to set up interrupt %d", __func__, index);
845 fire_intr_register(struct fire_softc *sc, u_int ino)
847 struct fire_icarg *fica;
848 bus_addr_t intrclr, intrmap;
851 if (fire_get_intrmap(sc, ino, &intrmap, &intrclr) == 0)
853 fica = malloc((ino >= FO_EQ_FIRST_INO && ino <= FO_EQ_LAST_INO) ?
854 sizeof(struct fire_msiqarg) : sizeof(struct fire_icarg), M_DEVBUF,
859 fica->fica_map = intrmap;
860 fica->fica_clr = intrclr;
861 error = (intr_controller_register(INTMAP_VEC(sc->sc_ign, ino),
864 free(fica, M_DEVBUF);
869 fire_get_intrmap(struct fire_softc *sc, u_int ino, bus_addr_t *intrmapptr,
870 bus_addr_t *intrclrptr)
873 if (ino > FO_MAX_INO) {
874 device_printf(sc->sc_dev, "out of range INO %d requested\n",
880 if (intrmapptr != NULL)
881 *intrmapptr = FO_PCI_INT_MAP_BASE + ino;
882 if (intrclrptr != NULL)
883 *intrclrptr = FO_PCI_INT_CLR_BASE + ino;
891 fire_dmc_pec(void *arg)
893 struct fire_softc *sc;
895 uint64_t cestat, dmcstat, ilustat, imustat, mcstat, mmustat, mmutfar;
896 uint64_t mmutfsr, oestat, pecstat, uestat, val;
897 u_int fatal, oenfatal;
902 mtx_lock_spin(&sc->sc_pcib_mtx);
903 mcstat = FIRE_PCI_READ_8(sc, FO_PCI_MULTI_CORE_ERR_STAT);
904 if ((mcstat & FO_PCI_MULTI_CORE_ERR_STAT_DMC) != 0) {
905 dmcstat = FIRE_PCI_READ_8(sc, FO_PCI_DMC_CORE_BLOCK_ERR_STAT);
906 if ((dmcstat & FO_PCI_DMC_CORE_BLOCK_INT_EN_IMU) != 0) {
907 imustat = FIRE_PCI_READ_8(sc, FO_PCI_IMU_INT_STAT);
908 device_printf(dev, "IMU error %#llx\n",
909 (unsigned long long)imustat);
911 FO_PCI_IMU_ERR_INT_EQ_NOT_EN_P) != 0) {
913 val = FIRE_PCI_READ_8(sc,
914 FO_PCI_IMU_SCS_ERR_LOG);
915 device_printf(dev, "SCS error log %#llx\n",
916 (unsigned long long)val);
918 if ((imustat & FO_PCI_IMU_ERR_INT_EQ_OVER_P) != 0) {
920 val = FIRE_PCI_READ_8(sc,
921 FO_PCI_IMU_EQS_ERR_LOG);
922 device_printf(dev, "EQS error log %#llx\n",
923 (unsigned long long)val);
925 if ((imustat & (FO_PCI_IMU_ERR_INT_MSI_MAL_ERR_P |
926 FO_PCI_IMU_ERR_INT_MSI_PAR_ERR_P |
927 FO_PCI_IMU_ERR_INT_PMEACK_MES_NOT_EN_P |
928 FO_PCI_IMU_ERR_INT_PMPME_MES_NOT_EN_P |
929 FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_P |
930 FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_P |
931 FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_P |
932 FO_PCI_IMU_ERR_INT_MSI_NOT_EN_P)) != 0) {
934 val = FIRE_PCI_READ_8(sc,
935 FO_PCI_IMU_RDS_ERR_LOG);
936 device_printf(dev, "RDS error log %#llx\n",
937 (unsigned long long)val);
940 if ((dmcstat & FO_PCI_DMC_CORE_BLOCK_INT_EN_MMU) != 0) {
942 mmustat = FIRE_PCI_READ_8(sc, FO_PCI_MMU_INT_STAT);
943 mmutfar = FIRE_PCI_READ_8(sc,
944 FO_PCI_MMU_TRANS_FAULT_ADDR);
945 mmutfsr = FIRE_PCI_READ_8(sc,
946 FO_PCI_MMU_TRANS_FAULT_STAT);
947 if ((mmustat & (FO_PCI_MMU_ERR_INT_TBW_DPE_P |
948 FO_PCI_MMU_ERR_INT_TBW_ERR_P |
949 FO_PCI_MMU_ERR_INT_TBW_UDE_P |
950 FO_PCI_MMU_ERR_INT_TBW_DME_P |
951 FO_PCI_MMU_ERR_INT_TTC_CAE_P |
952 FIRE_PCI_MMU_ERR_INT_TTC_DPE_P |
953 OBERON_PCI_MMU_ERR_INT_TTC_DUE_P |
954 FO_PCI_MMU_ERR_INT_TRN_ERR_P)) != 0)
957 sc->sc_stats_mmu_err++;
958 FIRE_PCI_WRITE_8(sc, FO_PCI_MMU_ERR_STAT_CLR,
962 "MMU error %#llx: TFAR %#llx TFSR %#llx\n",
963 (unsigned long long)mmustat,
964 (unsigned long long)mmutfar,
965 (unsigned long long)mmutfsr);
968 if ((mcstat & FO_PCI_MULTI_CORE_ERR_STAT_PEC) != 0) {
969 pecstat = FIRE_PCI_READ_8(sc, FO_PCI_PEC_CORE_BLOCK_INT_STAT);
970 if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_UERR) != 0) {
972 uestat = FIRE_PCI_READ_8(sc,
973 FO_PCI_TLU_UERR_INT_STAT);
975 "DLU/TLU uncorrectable error %#llx\n",
976 (unsigned long long)uestat);
977 if ((uestat & (FO_PCI_TLU_UERR_INT_UR_P |
978 OBERON_PCI_TLU_UERR_INT_POIS_P |
979 FO_PCI_TLU_UERR_INT_MFP_P |
980 FO_PCI_TLU_UERR_INT_ROF_P |
981 FO_PCI_TLU_UERR_INT_UC_P |
982 FIRE_PCI_TLU_UERR_INT_PP_P |
983 OBERON_PCI_TLU_UERR_INT_POIS_P)) != 0) {
984 val = FIRE_PCI_READ_8(sc,
985 FO_PCI_TLU_RX_UERR_HDR1_LOG);
987 "receive header log %#llx\n",
988 (unsigned long long)val);
989 val = FIRE_PCI_READ_8(sc,
990 FO_PCI_TLU_RX_UERR_HDR2_LOG);
992 "receive header log 2 %#llx\n",
993 (unsigned long long)val);
995 if ((uestat & FO_PCI_TLU_UERR_INT_CTO_P) != 0) {
996 val = FIRE_PCI_READ_8(sc,
997 FO_PCI_TLU_TX_UERR_HDR1_LOG);
999 "transmit header log %#llx\n",
1000 (unsigned long long)val);
1001 val = FIRE_PCI_READ_8(sc,
1002 FO_PCI_TLU_TX_UERR_HDR2_LOG);
1004 "transmit header log 2 %#llx\n",
1005 (unsigned long long)val);
1007 if ((uestat & FO_PCI_TLU_UERR_INT_DLP_P) != 0) {
1008 val = FIRE_PCI_READ_8(sc,
1009 FO_PCI_LPU_LNK_LYR_INT_STAT);
1011 "link layer interrupt and status %#llx\n",
1012 (unsigned long long)val);
1014 if ((uestat & FO_PCI_TLU_UERR_INT_TE_P) != 0) {
1015 val = FIRE_PCI_READ_8(sc,
1016 FO_PCI_LPU_PHY_LYR_INT_STAT);
1018 "phy layer interrupt and status %#llx\n",
1019 (unsigned long long)val);
1022 if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_CERR) != 0) {
1023 sc->sc_stats_tlu_ce++;
1024 cestat = FIRE_PCI_READ_8(sc,
1025 FO_PCI_TLU_CERR_INT_STAT);
1027 "DLU/TLU correctable error %#llx\n",
1028 (unsigned long long)cestat);
1029 val = FIRE_PCI_READ_8(sc,
1030 FO_PCI_LPU_LNK_LYR_INT_STAT);
1032 "link layer interrupt and status %#llx\n",
1033 (unsigned long long)val);
1034 if ((cestat & FO_PCI_TLU_CERR_INT_RE_P) != 0) {
1035 FIRE_PCI_WRITE_8(sc,
1036 FO_PCI_LPU_LNK_LYR_INT_STAT, val);
1037 val = FIRE_PCI_READ_8(sc,
1038 FO_PCI_LPU_PHY_LYR_INT_STAT);
1040 "phy layer interrupt and status %#llx\n",
1041 (unsigned long long)val);
1043 FIRE_PCI_WRITE_8(sc, FO_PCI_TLU_CERR_STAT_CLR,
1046 if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_OEVENT) != 0) {
1048 oestat = FIRE_PCI_READ_8(sc,
1049 FO_PCI_TLU_OEVENT_INT_STAT);
1050 device_printf(dev, "DLU/TLU other event %#llx\n",
1051 (unsigned long long)oestat);
1052 if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
1053 FO_PCI_TLU_OEVENT_MRC_P |
1054 FO_PCI_TLU_OEVENT_WUC_P |
1055 FO_PCI_TLU_OEVENT_RUC_P |
1056 FO_PCI_TLU_OEVENT_CRS_P)) != 0) {
1057 val = FIRE_PCI_READ_8(sc,
1058 FO_PCI_TLU_RX_OEVENT_HDR1_LOG);
1060 "receive header log %#llx\n",
1061 (unsigned long long)val);
1062 val = FIRE_PCI_READ_8(sc,
1063 FO_PCI_TLU_RX_OEVENT_HDR2_LOG);
1065 "receive header log 2 %#llx\n",
1066 (unsigned long long)val);
1067 if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
1068 FO_PCI_TLU_OEVENT_MRC_P |
1069 FO_PCI_TLU_OEVENT_WUC_P |
1070 FO_PCI_TLU_OEVENT_RUC_P)) != 0)
1073 sc->sc_stats_tlu_oe_rx_err++;
1077 if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
1078 FO_PCI_TLU_OEVENT_CTO_P |
1079 FO_PCI_TLU_OEVENT_WUC_P |
1080 FO_PCI_TLU_OEVENT_RUC_P)) != 0) {
1081 val = FIRE_PCI_READ_8(sc,
1082 FO_PCI_TLU_TX_OEVENT_HDR1_LOG);
1084 "transmit header log %#llx\n",
1085 (unsigned long long)val);
1086 val = FIRE_PCI_READ_8(sc,
1087 FO_PCI_TLU_TX_OEVENT_HDR2_LOG);
1089 "transmit header log 2 %#llx\n",
1090 (unsigned long long)val);
1091 if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
1092 FO_PCI_TLU_OEVENT_CTO_P |
1093 FO_PCI_TLU_OEVENT_WUC_P |
1094 FO_PCI_TLU_OEVENT_RUC_P)) != 0)
1097 sc->sc_stats_tlu_oe_tx_err++;
1101 if ((oestat & (FO_PCI_TLU_OEVENT_ERO_P |
1102 FO_PCI_TLU_OEVENT_EMP_P |
1103 FO_PCI_TLU_OEVENT_EPE_P |
1104 FIRE_PCI_TLU_OEVENT_ERP_P |
1105 OBERON_PCI_TLU_OEVENT_ERBU_P |
1106 FIRE_PCI_TLU_OEVENT_EIP_P |
1107 OBERON_PCI_TLU_OEVENT_EIUE_P)) != 0) {
1109 val = FIRE_PCI_READ_8(sc,
1110 FO_PCI_LPU_LNK_LYR_INT_STAT);
1112 "link layer interrupt and status %#llx\n",
1113 (unsigned long long)val);
1115 if ((oestat & (FO_PCI_TLU_OEVENT_IIP_P |
1116 FO_PCI_TLU_OEVENT_EDP_P |
1117 FIRE_PCI_TLU_OEVENT_EHP_P |
1118 OBERON_PCI_TLU_OEVENT_TLUEITMO_S |
1119 FO_PCI_TLU_OEVENT_ERU_P)) != 0)
1121 if ((oestat & (FO_PCI_TLU_OEVENT_NFP_P |
1122 FO_PCI_TLU_OEVENT_LWC_P |
1123 FO_PCI_TLU_OEVENT_LIN_P |
1124 FO_PCI_TLU_OEVENT_LRS_P |
1125 FO_PCI_TLU_OEVENT_LDN_P |
1126 FO_PCI_TLU_OEVENT_LUP_P)) != 0)
1128 if (oenfatal != 0) {
1129 sc->sc_stats_tlu_oe_non_fatal++;
1130 FIRE_PCI_WRITE_8(sc,
1131 FO_PCI_TLU_OEVENT_STAT_CLR, oestat);
1132 if ((oestat & FO_PCI_TLU_OEVENT_LIN_P) != 0)
1133 FIRE_PCI_WRITE_8(sc,
1134 FO_PCI_LPU_LNK_LYR_INT_STAT,
1136 FO_PCI_LPU_LNK_LYR_INT_STAT));
1139 if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_ILU) != 0) {
1140 ilustat = FIRE_PCI_READ_8(sc, FO_PCI_ILU_INT_STAT);
1141 device_printf(dev, "ILU error %#llx\n",
1142 (unsigned long long)ilustat);
1143 if ((ilustat & (FIRE_PCI_ILU_ERR_INT_IHB_PE_P |
1144 FIRE_PCI_ILU_ERR_INT_IHB_PE_P)) != 0)
1147 sc->sc_stats_ilu_err++;
1148 FIRE_PCI_WRITE_8(sc, FO_PCI_ILU_INT_STAT,
1153 mtx_unlock_spin(&sc->sc_pcib_mtx);
1155 panic("%s: fatal DMC/PEC error",
1156 device_get_nameunit(sc->sc_dev));
1157 return (FILTER_HANDLED);
1163 struct fire_softc *sc;
1165 uint64_t errstat, intstat, val;
1171 mtx_lock_spin(&sc->sc_pcib_mtx);
1172 if (sc->sc_mode == FIRE_MODE_OBERON) {
1173 intstat = FIRE_CTRL_READ_8(sc, FO_XBC_INT_STAT);
1174 device_printf(dev, "UBC error: interrupt status %#llx\n",
1175 (unsigned long long)intstat);
1176 if ((intstat & ~(OBERON_UBC_ERR_INT_DMARDUEB_P |
1177 OBERON_UBC_ERR_INT_DMARDUEA_P)) != 0)
1180 sc->sc_stats_ubc_dmardue++;
1182 mtx_unlock_spin(&sc->sc_pcib_mtx);
1183 panic("%s: fatal UBC core block error",
1184 device_get_nameunit(sc->sc_dev));
1186 FIRE_CTRL_SET(sc, FO_XBC_ERR_STAT_CLR, ~0ULL);
1187 mtx_unlock_spin(&sc->sc_pcib_mtx);
1190 errstat = FIRE_CTRL_READ_8(sc, FIRE_JBC_CORE_BLOCK_ERR_STAT);
1191 if ((errstat & (FIRE_JBC_CORE_BLOCK_ERR_STAT_MERGE |
1192 FIRE_JBC_CORE_BLOCK_ERR_STAT_JBCINT |
1193 FIRE_JBC_CORE_BLOCK_ERR_STAT_DMCINT)) != 0) {
1194 intstat = FIRE_CTRL_READ_8(sc, FO_XBC_INT_STAT);
1195 device_printf(dev, "JBC interrupt status %#llx\n",
1196 (unsigned long long)intstat);
1197 if ((intstat & FIRE_JBC_ERR_INT_EBUS_TO_P) != 0) {
1198 val = FIRE_CTRL_READ_8(sc,
1199 FIRE_JBC_CSR_ERR_LOG);
1200 device_printf(dev, "CSR error log %#llx\n",
1201 (unsigned long long)val);
1203 if ((intstat & (FIRE_JBC_ERR_INT_UNSOL_RD_P |
1204 FIRE_JBC_ERR_INT_UNSOL_INT_P)) != 0) {
1206 FIRE_JBC_ERR_INT_UNSOL_RD_P) != 0)
1207 sc->sc_stats_jbc_unsol_rd++;
1209 FIRE_JBC_ERR_INT_UNSOL_INT_P) != 0)
1210 sc->sc_stats_jbc_unsol_int++;
1211 val = FIRE_CTRL_READ_8(sc,
1212 FIRE_DMCINT_IDC_ERR_LOG);
1214 "DMCINT IDC error log %#llx\n",
1215 (unsigned long long)val);
1217 if ((intstat & (FIRE_JBC_ERR_INT_MB_PER_P |
1218 FIRE_JBC_ERR_INT_MB_PEW_P)) != 0) {
1220 val = FIRE_CTRL_READ_8(sc,
1221 FIRE_MERGE_TRANS_ERR_LOG);
1223 "merge transaction error log %#llx\n",
1224 (unsigned long long)val);
1226 if ((intstat & FIRE_JBC_ERR_INT_IJP_P) != 0) {
1228 val = FIRE_CTRL_READ_8(sc,
1229 FIRE_JBCINT_OTRANS_ERR_LOG);
1231 "JBCINT out transaction error log "
1232 "%#llx\n", (unsigned long long)val);
1233 val = FIRE_CTRL_READ_8(sc,
1234 FIRE_JBCINT_OTRANS_ERR_LOG2);
1236 "JBCINT out transaction error log 2 "
1237 "%#llx\n", (unsigned long long)val);
1239 if ((intstat & (FIRE_JBC_ERR_INT_UE_ASYN_P |
1240 FIRE_JBC_ERR_INT_CE_ASYN_P |
1241 FIRE_JBC_ERR_INT_JTE_P | FIRE_JBC_ERR_INT_JBE_P |
1242 FIRE_JBC_ERR_INT_JUE_P |
1243 FIRE_JBC_ERR_INT_ICISE_P |
1244 FIRE_JBC_ERR_INT_WR_DPE_P |
1245 FIRE_JBC_ERR_INT_RD_DPE_P |
1246 FIRE_JBC_ERR_INT_ILL_BMW_P |
1247 FIRE_JBC_ERR_INT_ILL_BMR_P |
1248 FIRE_JBC_ERR_INT_BJC_P)) != 0) {
1249 if ((intstat & (FIRE_JBC_ERR_INT_UE_ASYN_P |
1250 FIRE_JBC_ERR_INT_JTE_P |
1251 FIRE_JBC_ERR_INT_JBE_P |
1252 FIRE_JBC_ERR_INT_JUE_P |
1253 FIRE_JBC_ERR_INT_ICISE_P |
1254 FIRE_JBC_ERR_INT_WR_DPE_P |
1255 FIRE_JBC_ERR_INT_RD_DPE_P |
1256 FIRE_JBC_ERR_INT_ILL_BMW_P |
1257 FIRE_JBC_ERR_INT_ILL_BMR_P |
1258 FIRE_JBC_ERR_INT_BJC_P)) != 0)
1261 sc->sc_stats_jbc_ce_async++;
1262 val = FIRE_CTRL_READ_8(sc,
1263 FIRE_JBCINT_ITRANS_ERR_LOG);
1265 "JBCINT in transaction error log %#llx\n",
1266 (unsigned long long)val);
1267 val = FIRE_CTRL_READ_8(sc,
1268 FIRE_JBCINT_ITRANS_ERR_LOG2);
1270 "JBCINT in transaction error log 2 "
1271 "%#llx\n", (unsigned long long)val);
1273 if ((intstat & (FIRE_JBC_ERR_INT_PIO_UNMAP_RD_P |
1274 FIRE_JBC_ERR_INT_ILL_ACC_RD_P |
1275 FIRE_JBC_ERR_INT_PIO_UNMAP_P |
1276 FIRE_JBC_ERR_INT_PIO_DPE_P |
1277 FIRE_JBC_ERR_INT_PIO_CPE_P |
1278 FIRE_JBC_ERR_INT_ILL_ACC_P)) != 0) {
1280 val = FIRE_CTRL_READ_8(sc,
1281 FIRE_JBC_CSR_ERR_LOG);
1283 "DMCINT ODCD error log %#llx\n",
1284 (unsigned long long)val);
1286 if ((intstat & (FIRE_JBC_ERR_INT_MB_PEA_P |
1287 FIRE_JBC_ERR_INT_CPE_P | FIRE_JBC_ERR_INT_APE_P |
1288 FIRE_JBC_ERR_INT_PIO_CPE_P |
1289 FIRE_JBC_ERR_INT_JTCEEW_P |
1290 FIRE_JBC_ERR_INT_JTCEEI_P |
1291 FIRE_JBC_ERR_INT_JTCEER_P)) != 0) {
1293 val = FIRE_CTRL_READ_8(sc,
1294 FIRE_FATAL_ERR_LOG);
1295 device_printf(dev, "fatal error log %#llx\n",
1296 (unsigned long long)val);
1297 val = FIRE_CTRL_READ_8(sc,
1298 FIRE_FATAL_ERR_LOG2);
1299 device_printf(dev, "fatal error log 2 "
1300 "%#llx\n", (unsigned long long)val);
1303 mtx_unlock_spin(&sc->sc_pcib_mtx);
1304 panic("%s: fatal JBC core block error",
1305 device_get_nameunit(sc->sc_dev));
1307 FIRE_CTRL_SET(sc, FO_XBC_ERR_STAT_CLR, ~0ULL);
1308 mtx_unlock_spin(&sc->sc_pcib_mtx);
1311 mtx_unlock_spin(&sc->sc_pcib_mtx);
1312 panic("%s: unknown JCB core block error status %#llx",
1313 device_get_nameunit(sc->sc_dev),
1314 (unsigned long long)errstat);
1317 return (FILTER_HANDLED);
1321 fire_pcie(void *arg)
1323 struct fire_msiqarg *fmqa;
1324 struct fire_softc *sc;
1325 struct fo_msiq_record *qrec;
1328 u_int head, msg, msiq;
1331 sc = fmqa->fmqa_fica.fica_sc;
1333 msiq = fmqa->fmqa_msiq;
1334 mtx_lock_spin(&fmqa->fmqa_mtx);
1335 head = (FIRE_PCI_READ_8(sc, fmqa->fmqa_head) & FO_PCI_EQ_HD_MASK) >>
1337 qrec = &fmqa->fmqa_base[head];
1338 word0 = qrec->fomqr_word0;
1340 KASSERT((word0 & FO_MQR_WORD0_FMT_TYPE_MSG) != 0,
1341 ("%s: received non-PCIe message in event queue %d "
1342 "(word0 %#llx)", device_get_nameunit(dev), msiq,
1343 (unsigned long long)word0));
1344 msg = (word0 & FO_MQR_WORD0_DATA0_MASK) >>
1345 FO_MQR_WORD0_DATA0_SHFT;
1347 #define PCIE_MSG_CODE_ERR_COR 0x30
1348 #define PCIE_MSG_CODE_ERR_NONFATAL 0x31
1349 #define PCIE_MSG_CODE_ERR_FATAL 0x33
1351 if (msg == PCIE_MSG_CODE_ERR_COR)
1352 device_printf(dev, "correctable PCIe error\n");
1353 else if (msg == PCIE_MSG_CODE_ERR_NONFATAL ||
1354 msg == PCIE_MSG_CODE_ERR_FATAL)
1355 panic("%s: %sfatal PCIe error",
1356 device_get_nameunit(dev),
1357 msg == PCIE_MSG_CODE_ERR_NONFATAL ? "non-" : "");
1359 panic("%s: received unknown PCIe message %#x",
1360 device_get_nameunit(dev), msg);
1361 qrec->fomqr_word0 &= ~FO_MQR_WORD0_FMT_TYPE_MASK;
1362 head = (head + 1) % sc->sc_msiq_size;
1363 qrec = &fmqa->fmqa_base[head];
1364 word0 = qrec->fomqr_word0;
1365 if (__predict_true((word0 & FO_MQR_WORD0_FMT_TYPE_MASK) == 0))
1368 FIRE_PCI_WRITE_8(sc, fmqa->fmqa_head, (head & FO_PCI_EQ_HD_MASK) <<
1370 if ((FIRE_PCI_READ_8(sc, fmqa->fmqa_tail) &
1371 FO_PCI_EQ_TL_OVERR) != 0) {
1372 device_printf(dev, "event queue %d overflow\n", msiq);
1374 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq,
1375 FIRE_PCI_READ_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq) |
1376 FO_PCI_EQ_CTRL_CLR_COVERR);
1378 mtx_unlock_spin(&fmqa->fmqa_mtx);
1379 return (FILTER_HANDLED);
1383 fire_maxslots(device_t dev)
1390 fire_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg,
1393 struct fire_softc *sc;
1394 bus_space_handle_t bh;
1401 sc = device_get_softc(dev);
1402 if (bus < sc->sc_pci_secbus || bus > sc->sc_pci_subbus ||
1403 slot > PCI_SLOTMAX || func > PCI_FUNCMAX || reg > PCIE_REGMAX)
1406 offset = FO_CONF_OFF(bus, slot, func, reg);
1407 bh = sc->sc_pci_bh[OFW_PCI_CS_CONFIG];
1410 i = bus_space_peek_1(sc->sc_pci_cfgt, bh, offset, &byte);
1414 i = bus_space_peek_2(sc->sc_pci_cfgt, bh, offset, &shrt);
1418 i = bus_space_peek_4(sc->sc_pci_cfgt, bh, offset, &wrd);
1422 panic("%s: bad width", __func__);
1428 printf("%s: read data error reading: %d.%d.%d: 0x%x\n",
1429 __func__, bus, slot, func, reg);
1437 fire_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg,
1438 uint32_t val, int width)
1440 struct fire_softc *sc;
1441 bus_space_handle_t bh;
1444 sc = device_get_softc(dev);
1445 if (bus < sc->sc_pci_secbus || bus > sc->sc_pci_subbus ||
1446 slot > PCI_SLOTMAX || func > PCI_FUNCMAX || reg > PCIE_REGMAX)
1449 offset = FO_CONF_OFF(bus, slot, func, reg);
1450 bh = sc->sc_pci_bh[OFW_PCI_CS_CONFIG];
1453 bus_space_write_1(sc->sc_pci_cfgt, bh, offset, val);
1456 bus_space_write_2(sc->sc_pci_cfgt, bh, offset, val);
1459 bus_space_write_4(sc->sc_pci_cfgt, bh, offset, val);
1462 panic("%s: bad width", __func__);
1468 fire_route_interrupt(device_t bridge, device_t dev, int pin)
1470 struct fire_softc *sc;
1471 struct ofw_pci_register reg;
1472 ofw_pci_intr_t pintr, mintr;
1473 uint8_t maskbuf[sizeof(reg) + sizeof(pintr)];
1475 sc = device_get_softc(bridge);
1477 if (ofw_bus_lookup_imap(ofw_bus_get_node(dev), &sc->sc_pci_iinfo,
1478 ®, sizeof(reg), &pintr, sizeof(pintr), &mintr, sizeof(mintr),
1482 device_printf(bridge, "could not route pin %d for device %d.%d\n",
1483 pin, pci_get_slot(dev), pci_get_function(dev));
1484 return (PCI_INVALID_IRQ);
1488 fire_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
1490 struct fire_softc *sc;
1492 sc = device_get_softc(dev);
1494 case PCIB_IVAR_DOMAIN:
1495 *result = device_get_unit(dev);
1498 *result = sc->sc_pci_secbus;
1504 #define VIS_BLOCKSIZE 64
1507 fire_dmamap_sync(bus_dma_tag_t dt __unused, bus_dmamap_t map,
1508 bus_dmasync_op_t op)
1510 static u_char buf[VIS_BLOCKSIZE] __aligned(VIS_BLOCKSIZE);
1513 if ((map->dm_flags & DMF_LOADED) == 0 ||
1514 (op & ~BUS_DMASYNC_POSTWRITE) == 0)
1519 wr(fprs, reg | FPRS_FEF, 0);
1520 __asm __volatile("stda %%f0, [%0] %1"
1521 : : "r" (buf), "n" (ASI_BLK_COMMIT_S));
1528 fire_intr_enable(void *arg)
1530 struct intr_vector *iv;
1531 struct fire_icarg *fica;
1532 struct fire_softc *sc;
1538 fica = iv->iv_icarg;
1541 if (sc->sc_mode == FIRE_MODE_OBERON)
1542 mr |= (iv->iv_mid << OBERON_PCI_IMAP_T_DESTID_SHFT) &
1543 OBERON_PCI_IMAP_T_DESTID_MASK;
1545 mr |= (iv->iv_mid << FIRE_PCI_IMAP_T_JPID_SHFT) &
1546 FIRE_PCI_IMAP_T_JPID_MASK;
1548 * Given that all mondos for the same target are required to use the
1549 * same interrupt controller we just use the CPU ID for indexing the
1553 for (i = 0; i < mp_ncpus; ++i) {
1555 if (pc == NULL || iv->iv_mid != pc->pc_mid)
1557 ctrl = pc->pc_cpuid % 4;
1560 mr |= (1ULL << ctrl) << FO_PCI_IMAP_INT_CTRL_NUM_SHFT &
1561 FO_PCI_IMAP_INT_CTRL_NUM_MASK;
1562 FIRE_PCI_WRITE_8(sc, fica->fica_map, mr);
1566 fire_intr_disable(void *arg)
1568 struct intr_vector *iv;
1569 struct fire_icarg *fica;
1570 struct fire_softc *sc;
1573 fica = iv->iv_icarg;
1575 FIRE_PCI_WRITE_8(sc, fica->fica_map,
1576 FIRE_PCI_READ_8(sc, fica->fica_map) & ~FO_PCI_IMAP_V);
1580 fire_intr_assign(void *arg)
1582 struct intr_vector *iv;
1583 struct fire_icarg *fica;
1584 struct fire_softc *sc;
1588 fica = iv->iv_icarg;
1590 mr = FIRE_PCI_READ_8(sc, fica->fica_map);
1591 if ((mr & FO_PCI_IMAP_V) != 0) {
1592 FIRE_PCI_WRITE_8(sc, fica->fica_map, mr & ~FO_PCI_IMAP_V);
1593 FIRE_PCI_BARRIER(sc, fica->fica_map, 8,
1594 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1596 while (FIRE_PCI_READ_8(sc, fica->fica_clr) != INTCLR_IDLE)
1598 if ((mr & FO_PCI_IMAP_V) != 0)
1599 fire_intr_enable(arg);
1603 fire_intr_clear(void *arg)
1605 struct intr_vector *iv;
1606 struct fire_icarg *fica;
1609 fica = iv->iv_icarg;
1610 FIRE_PCI_WRITE_8(fica->fica_sc, fica->fica_clr, INTCLR_IDLE);
1614 * Given that the event queue implementation matches our current MD and MI
1615 * interrupt frameworks like square pegs fit into round holes we are generous
1616 * and use one event queue per MSI for now, which limits us to 35 MSIs/MSI-Xs
1617 * per Host-PCIe-bridge (we use one event queue for the PCIe error messages).
1618 * This seems tolerable as long as most devices just use one MSI/MSI-X anyway.
1619 * Adding knowledge about MSIs/MSI-Xs to the MD interrupt code should allow us
1620 * to decouple the 1:1 mapping at the cost of no longer being able to bind
1621 * MSIs/MSI-Xs to specific CPUs as we currently have no reliable way to
1622 * quiesce a device while we move its MSIs/MSI-Xs to another event queue.
1626 fire_alloc_msi(device_t dev, device_t child, int count, int maxcount __unused,
1629 struct fire_softc *sc;
1630 u_int i, j, msiqrun;
1632 if (powerof2(count) == 0 || count > 32)
1635 sc = device_get_softc(dev);
1636 mtx_lock(&sc->sc_msi_mtx);
1638 for (i = 0; i < sc->sc_msiq_count; i++) {
1639 for (j = i; j < i + count; j++) {
1640 if (isclr(sc->sc_msiq_bitmap, j) == 0)
1643 if (j == i + count) {
1648 if (i == sc->sc_msiq_count) {
1649 mtx_unlock(&sc->sc_msi_mtx);
1652 for (i = 0; i + count < sc->sc_msi_count; i += count) {
1653 for (j = i; j < i + count; j++)
1654 if (isclr(sc->sc_msi_bitmap, j) == 0)
1656 if (j == i + count) {
1657 for (j = 0; j < count; j++) {
1658 setbit(sc->sc_msiq_bitmap, msiqrun + j);
1659 setbit(sc->sc_msi_bitmap, i + j);
1660 sc->sc_msi_msiq_table[i + j] = msiqrun + j;
1661 irqs[j] = sc->sc_msi_first + i + j;
1663 mtx_unlock(&sc->sc_msi_mtx);
1667 mtx_unlock(&sc->sc_msi_mtx);
1672 fire_release_msi(device_t dev, device_t child, int count, int *irqs)
1674 struct fire_softc *sc;
1677 sc = device_get_softc(dev);
1678 mtx_lock(&sc->sc_msi_mtx);
1679 for (i = 0; i < count; i++) {
1680 clrbit(sc->sc_msiq_bitmap,
1681 sc->sc_msi_msiq_table[irqs[i] - sc->sc_msi_first]);
1682 clrbit(sc->sc_msi_bitmap, irqs[i] - sc->sc_msi_first);
1684 mtx_unlock(&sc->sc_msi_mtx);
1689 fire_alloc_msix(device_t dev, device_t child, int *irq)
1691 struct fire_softc *sc;
1694 sc = device_get_softc(dev);
1695 if ((sc->sc_flags & FIRE_MSIX) == 0)
1697 mtx_lock(&sc->sc_msi_mtx);
1699 for (i = 0; i < sc->sc_msiq_count; i++) {
1700 if (isclr(sc->sc_msiq_bitmap, i) != 0) {
1705 if (i == sc->sc_msiq_count) {
1706 mtx_unlock(&sc->sc_msi_mtx);
1709 for (i = sc->sc_msi_count - 1; i >= 0; i--) {
1710 if (isclr(sc->sc_msi_bitmap, i) != 0) {
1711 setbit(sc->sc_msiq_bitmap, msiq);
1712 setbit(sc->sc_msi_bitmap, i);
1713 sc->sc_msi_msiq_table[i] = msiq;
1714 *irq = sc->sc_msi_first + i;
1715 mtx_unlock(&sc->sc_msi_mtx);
1719 mtx_unlock(&sc->sc_msi_mtx);
1724 fire_release_msix(device_t dev, device_t child, int irq)
1726 struct fire_softc *sc;
1728 sc = device_get_softc(dev);
1729 if ((sc->sc_flags & FIRE_MSIX) == 0)
1731 mtx_lock(&sc->sc_msi_mtx);
1732 clrbit(sc->sc_msiq_bitmap,
1733 sc->sc_msi_msiq_table[irq - sc->sc_msi_first]);
1734 clrbit(sc->sc_msi_bitmap, irq - sc->sc_msi_first);
1735 mtx_unlock(&sc->sc_msi_mtx);
1740 fire_map_msi(device_t dev, device_t child, int irq, uint64_t *addr,
1743 struct fire_softc *sc;
1744 struct pci_devinfo *dinfo;
1746 sc = device_get_softc(dev);
1747 dinfo = device_get_ivars(child);
1748 if (dinfo->cfg.msi.msi_alloc > 0) {
1749 if ((irq & ~sc->sc_msi_data_mask) != 0) {
1750 device_printf(dev, "invalid MSI 0x%x\n", irq);
1754 if ((sc->sc_flags & FIRE_MSIX) == 0)
1756 if (fls(irq) > sc->sc_msix_data_width) {
1757 device_printf(dev, "invalid MSI-X 0x%x\n", irq);
1761 if (dinfo->cfg.msi.msi_alloc > 0 &&
1762 (dinfo->cfg.msi.msi_ctrl & PCIM_MSICTRL_64BIT) == 0)
1763 *addr = sc->sc_msi_addr32;
1765 *addr = sc->sc_msi_addr64;
1771 fire_msiq_handler(void *cookie)
1773 struct intr_vector *iv;
1774 struct fire_msiqarg *fmqa;
1777 fmqa = iv->iv_icarg;
1779 * Note that since fire_intr_clear() will clear the event queue
1780 * interrupt after the handler associated with the MSI [sic] has
1781 * been executed we have to protect the access to the event queue as
1782 * otherwise nested event queue interrupts cause corruption of the
1783 * event queue on MP machines. Obviously especially when abandoning
1784 * the 1:1 mapping it would be better to not clear the event queue
1785 * interrupt after each handler invocation but only once when the
1786 * outstanding MSIs have been processed but unfortunately that
1787 * doesn't work well and leads to interrupt storms with controllers/
1788 * drivers which don't mask interrupts while the handler is executed.
1789 * Maybe delaying clearing the MSI until after the handler has been
1790 * executed could be used to work around this but that's not the
1791 * intended usage and might in turn cause lost MSIs.
1793 mtx_lock_spin(&fmqa->fmqa_mtx);
1794 fire_msiq_common(iv, fmqa);
1795 mtx_unlock_spin(&fmqa->fmqa_mtx);
1799 fire_msiq_filter(void *cookie)
1801 struct intr_vector *iv;
1802 struct fire_msiqarg *fmqa;
1805 fmqa = iv->iv_icarg;
1807 * For filters we don't use fire_intr_clear() since it would clear
1808 * the event queue interrupt while we're still processing the event
1809 * queue as filters and associated post-filter handler are executed
1810 * directly, which in turn would lead to lost MSIs. So we clear the
1811 * event queue interrupt only once after processing the event queue.
1812 * Given that this still guarantees the filters to not be executed
1813 * concurrently and no other CPU can clear the event queue interrupt
1814 * while the event queue is still processed, we don't even need to
1815 * interlock the access to the event queue in this case.
1818 fire_msiq_common(iv, fmqa);
1819 FIRE_PCI_WRITE_8(fmqa->fmqa_fica.fica_sc, fmqa->fmqa_fica.fica_clr,
1825 fire_msiq_common(struct intr_vector *iv, struct fire_msiqarg *fmqa)
1827 struct fire_softc *sc;
1828 struct fo_msiq_record *qrec;
1831 u_int head, msi, msiq;
1833 sc = fmqa->fmqa_fica.fica_sc;
1835 msiq = fmqa->fmqa_msiq;
1836 head = (FIRE_PCI_READ_8(sc, fmqa->fmqa_head) & FO_PCI_EQ_HD_MASK) >>
1838 qrec = &fmqa->fmqa_base[head];
1839 word0 = qrec->fomqr_word0;
1841 if (__predict_false((word0 & FO_MQR_WORD0_FMT_TYPE_MASK) == 0))
1843 KASSERT((word0 & FO_MQR_WORD0_FMT_TYPE_MSI64) != 0 ||
1844 (word0 & FO_MQR_WORD0_FMT_TYPE_MSI32) != 0,
1845 ("%s: received non-MSI/MSI-X message in event queue %d "
1846 "(word0 %#llx)", device_get_nameunit(dev), msiq,
1847 (unsigned long long)word0));
1848 msi = (word0 & FO_MQR_WORD0_DATA0_MASK) >>
1849 FO_MQR_WORD0_DATA0_SHFT;
1851 * Sanity check the MSI/MSI-X as long as we use a 1:1 mapping.
1853 KASSERT(msi == fmqa->fmqa_msi,
1854 ("%s: received non-matching MSI/MSI-X in event queue %d "
1855 "(%d versus %d)", device_get_nameunit(dev), msiq, msi,
1857 FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_CLR_BASE + (msi << 3),
1858 FO_PCI_MSI_CLR_EQWR_N);
1859 if (__predict_false(intr_event_handle(iv->iv_event,
1861 printf("stray MSI/MSI-X in event queue %d\n", msiq);
1862 qrec->fomqr_word0 &= ~FO_MQR_WORD0_FMT_TYPE_MASK;
1863 head = (head + 1) % sc->sc_msiq_size;
1864 qrec = &fmqa->fmqa_base[head];
1865 word0 = qrec->fomqr_word0;
1867 FIRE_PCI_WRITE_8(sc, fmqa->fmqa_head, (head & FO_PCI_EQ_HD_MASK) <<
1869 if (__predict_false((FIRE_PCI_READ_8(sc, fmqa->fmqa_tail) &
1870 FO_PCI_EQ_TL_OVERR) != 0)) {
1871 device_printf(dev, "event queue %d overflow\n", msiq);
1873 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq,
1874 FIRE_PCI_READ_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq) |
1875 FO_PCI_EQ_CTRL_CLR_COVERR);
1880 fire_setup_intr(device_t dev, device_t child, struct resource *ires,
1881 int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg,
1884 struct fire_softc *sc;
1885 struct fire_msiqarg *fmqa;
1890 sc = device_get_softc(dev);
1892 * XXX this assumes that a device only has one INTx, while in fact
1893 * Cassini+ and Saturn can use all four the firmware has assigned
1894 * to them, but so does pci(4).
1896 if (rman_get_rid(ires) != 0) {
1897 msi = rman_get_start(ires);
1898 msiq = sc->sc_msi_msiq_table[msi - sc->sc_msi_first];
1899 vec = INTMAP_VEC(sc->sc_ign, sc->sc_msiq_ino_first + msiq);
1900 msiq += sc->sc_msiq_first;
1901 if (intr_vectors[vec].iv_ic != &fire_ic) {
1903 "invalid interrupt controller for vector 0x%lx\n",
1908 * The MD interrupt code needs the vector rather than the MSI.
1910 rman_set_start(ires, vec);
1911 rman_set_end(ires, vec);
1912 error = bus_generic_setup_intr(dev, child, ires, flags, filt,
1913 intr, arg, cookiep);
1914 rman_set_start(ires, msi);
1915 rman_set_end(ires, msi);
1918 fmqa = intr_vectors[vec].iv_icarg;
1920 * XXX inject our event queue handler.
1923 intr_vectors[vec].iv_func = fire_msiq_filter;
1924 intr_vectors[vec].iv_ic = &fire_msiqc_filter;
1926 * Ensure the event queue interrupt is cleared, it
1927 * might have triggered before. Given we supply NULL
1928 * as ic_clear, inthand_add() won't do this for us.
1930 FIRE_PCI_WRITE_8(sc, fmqa->fmqa_fica.fica_clr,
1933 intr_vectors[vec].iv_func = fire_msiq_handler;
1934 /* Record the MSI/MSI-X as long as we we use a 1:1 mapping. */
1935 fmqa->fmqa_msi = msi;
1936 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_SET_BASE + (msiq << 3),
1937 FO_PCI_EQ_CTRL_SET_EN);
1939 FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + msi,
1940 (FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + msi) &
1941 ~FO_PCI_MSI_MAP_EQNUM_MASK) |
1942 ((msiq << FO_PCI_MSI_MAP_EQNUM_SHFT) &
1943 FO_PCI_MSI_MAP_EQNUM_MASK));
1944 FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_CLR_BASE + msi,
1945 FO_PCI_MSI_CLR_EQWR_N);
1946 FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + msi,
1947 FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + msi) |
1953 * Make sure the vector is fully specified and we registered
1954 * our interrupt controller for it.
1956 vec = rman_get_start(ires);
1957 if (INTIGN(vec) != sc->sc_ign) {
1958 device_printf(dev, "invalid interrupt vector 0x%lx\n", vec);
1961 if (intr_vectors[vec].iv_ic != &fire_ic) {
1963 "invalid interrupt controller for vector 0x%lx\n", vec);
1966 return (bus_generic_setup_intr(dev, child, ires, flags, filt, intr,
1971 fire_teardown_intr(device_t dev, device_t child, struct resource *ires,
1974 struct fire_softc *sc;
1979 sc = device_get_softc(dev);
1980 if (rman_get_rid(ires) != 0) {
1981 msi = rman_get_start(ires);
1982 msiq = sc->sc_msi_msiq_table[msi - sc->sc_msi_first];
1983 vec = INTMAP_VEC(sc->sc_ign, msiq + sc->sc_msiq_ino_first);
1984 msiq += sc->sc_msiq_first;
1986 FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + msi,
1987 FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + msi) &
1990 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq,
1991 FO_PCI_EQ_CTRL_CLR_COVERR | FO_PCI_EQ_CTRL_CLR_E2I |
1992 FO_PCI_EQ_CTRL_CLR_DIS);
1993 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_TL_BASE + msiq,
1994 (0 << FO_PCI_EQ_TL_SHFT) & FO_PCI_EQ_TL_MASK);
1995 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_HD_BASE + msiq,
1996 (0 << FO_PCI_EQ_HD_SHFT) & FO_PCI_EQ_HD_MASK);
1997 intr_vectors[vec].iv_ic = &fire_ic;
1999 * The MD interrupt code needs the vector rather than the MSI.
2001 rman_set_start(ires, vec);
2002 rman_set_end(ires, vec);
2003 error = bus_generic_teardown_intr(dev, child, ires, cookie);
2005 rman_set_start(ires, msi);
2006 rman_set_end(ires, msi);
2009 return (bus_generic_teardown_intr(dev, child, ires, cookie));
2012 static struct resource *
2013 fire_alloc_resource(device_t bus, device_t child, int type, int *rid,
2014 u_long start, u_long end, u_long count, u_int flags)
2016 struct fire_softc *sc;
2017 struct resource *rv;
2020 bus_space_handle_t bh;
2021 int needactivate = flags & RF_ACTIVE;
2023 flags &= ~RF_ACTIVE;
2025 sc = device_get_softc(bus);
2026 if (type == SYS_RES_IRQ) {
2028 * XXX: Don't accept blank ranges for now, only single
2029 * interrupts. The other case should not happen with
2030 * the MI PCI code...
2031 * XXX: This may return a resource that is out of the
2032 * range that was specified. Is this correct...?
2035 panic("%s: XXX: interrupt range", __func__);
2037 start = end = INTMAP_VEC(sc->sc_ign, end);
2038 return (BUS_ALLOC_RESOURCE(device_get_parent(bus), child,
2039 type, rid, start, end, count, flags));
2042 case SYS_RES_MEMORY:
2043 rm = &sc->sc_pci_mem_rman;
2044 bt = sc->sc_pci_memt;
2045 bh = sc->sc_pci_bh[OFW_PCI_CS_MEM32];
2047 case SYS_RES_IOPORT:
2048 rm = &sc->sc_pci_io_rman;
2049 bt = sc->sc_pci_iot;
2050 bh = sc->sc_pci_bh[OFW_PCI_CS_IO];
2057 rv = rman_reserve_resource(rm, start, end, count, flags, child);
2060 rman_set_rid(rv, *rid);
2061 bh += rman_get_start(rv);
2062 rman_set_bustag(rv, bt);
2063 rman_set_bushandle(rv, bh);
2066 if (bus_activate_resource(child, type, *rid, rv)) {
2067 rman_release_resource(rv);
2075 fire_activate_resource(device_t bus, device_t child, int type, int rid,
2081 if (type == SYS_RES_IRQ)
2082 return (BUS_ACTIVATE_RESOURCE(device_get_parent(bus), child,
2084 if (type == SYS_RES_MEMORY) {
2086 * Need to memory-map the device space, as some drivers
2087 * depend on the virtual address being set and usable.
2089 error = sparc64_bus_mem_map(rman_get_bustag(r),
2090 rman_get_bushandle(r), rman_get_size(r), 0, 0, &p);
2093 rman_set_virtual(r, p);
2095 return (rman_activate_resource(r));
2099 fire_deactivate_resource(device_t bus, device_t child, int type, int rid,
2103 if (type == SYS_RES_IRQ)
2104 return (BUS_DEACTIVATE_RESOURCE(device_get_parent(bus), child,
2106 if (type == SYS_RES_MEMORY) {
2107 sparc64_bus_mem_unmap(rman_get_virtual(r), rman_get_size(r));
2108 rman_set_virtual(r, NULL);
2110 return (rman_deactivate_resource(r));
2114 fire_release_resource(device_t bus, device_t child, int type, int rid,
2119 if (type == SYS_RES_IRQ)
2120 return (BUS_RELEASE_RESOURCE(device_get_parent(bus), child,
2122 if (rman_get_flags(r) & RF_ACTIVE) {
2123 error = bus_deactivate_resource(child, type, rid, r);
2127 return (rman_release_resource(r));
2130 static bus_dma_tag_t
2131 fire_get_dma_tag(device_t bus, device_t child)
2133 struct fire_softc *sc;
2135 sc = device_get_softc(bus);
2136 return (sc->sc_pci_dmat);
2140 fire_get_node(device_t bus, device_t dev)
2142 struct fire_softc *sc;
2144 sc = device_get_softc(bus);
2145 /* We only have one child, the PCI bus, which needs our own node. */
2146 return (sc->sc_node);
2149 static bus_space_tag_t
2150 fire_alloc_bus_tag(struct fire_softc *sc, int type)
2154 bt = malloc(sizeof(struct bus_space_tag), M_DEVBUF,
2157 panic("%s: out of memory", __func__);
2159 bt->bst_cookie = sc;
2160 bt->bst_parent = rman_get_bustag(sc->sc_mem_res[FIRE_PCI]);
2161 bt->bst_type = type;
2166 fire_get_timecount(struct timecounter *tc)
2168 struct fire_softc *sc;
2171 return (FIRE_CTRL_READ_8(sc, FO_XBC_PRF_CNT0) & TC_COUNTER_MAX_MASK);