2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2020 Ruslan Bukin <br@bsdpad.com>
6 * This software was developed by SRI International and the University of
7 * Cambridge Computer Laboratory (Department of Computer Science and
8 * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
9 * DARPA SSITH research programme.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include "opt_platform.h"
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/malloc.h>
41 #include <sys/types.h>
42 #include <sys/sysctl.h>
43 #include <sys/kernel.h>
45 #include <sys/module.h>
47 #include <sys/endian.h>
48 #include <sys/cpuset.h>
49 #include <sys/mutex.h>
52 #include <machine/intr.h>
53 #include <machine/bus.h>
56 #include <vm/vm_extern.h>
57 #include <vm/vm_kern.h>
60 #include <dev/ofw/openfirm.h>
61 #include <dev/ofw/ofw_bus.h>
62 #include <dev/ofw/ofw_bus_subr.h>
64 #include <dev/pci/pcireg.h>
65 #include <dev/pci/pcivar.h>
66 #include <dev/pci/pci_host_generic.h>
67 #include <dev/pci/pci_host_generic_fdt.h>
68 #include <dev/pci/pcib_private.h>
70 #include "xlnx_pcib.h"
72 #include "ofw_bus_if.h"
77 #define XLNX_PCIB_MAX_MSI 64
79 static int xlnx_pcib_fdt_attach(device_t);
80 static int xlnx_pcib_fdt_probe(device_t);
81 static int xlnx_pcib_fdt_get_id(device_t, device_t, enum pci_id_type,
83 static void xlnx_pcib_msi_mask(device_t dev, struct intr_irqsrc *isrc,
86 struct xlnx_pcib_softc {
87 struct generic_pcie_fdt_softc fdt_sc;
88 struct resource *res[4];
91 struct xlnx_pcib_irqsrc *isrcs;
96 static struct resource_spec xlnx_pcib_spec[] = {
97 { SYS_RES_MEMORY, 0, RF_ACTIVE },
98 { SYS_RES_IRQ, 0, RF_ACTIVE },
99 { SYS_RES_IRQ, 1, RF_ACTIVE },
100 { SYS_RES_IRQ, 2, RF_ACTIVE },
104 struct xlnx_pcib_irqsrc {
105 struct intr_irqsrc isrc;
107 #define XLNX_IRQ_FLAG_USED (1 << 0)
112 xlnx_pcib_clear_err_interrupts(struct generic_pcie_core_softc *sc)
116 reg = bus_read_4(sc->res, XLNX_PCIE_RPERRFRR);
118 if (reg & RPERRFRR_VALID) {
119 device_printf(sc->dev, "Requested ID: %x\n",
120 reg & RPERRFRR_REQ_ID_M);
121 bus_write_4(sc->res, XLNX_PCIE_RPERRFRR, ~0U);
126 xlnx_pcib_intr(void *arg)
128 struct generic_pcie_fdt_softc *fdt_sc;
129 struct generic_pcie_core_softc *sc;
130 struct xlnx_pcib_softc *xlnx_sc;
131 uint32_t val, mask, status;
134 fdt_sc = &xlnx_sc->fdt_sc;
137 val = bus_read_4(sc->res, XLNX_PCIE_IDR);
138 mask = bus_read_4(sc->res, XLNX_PCIE_IMR);
142 return (FILTER_HANDLED);
144 if (status & IMR_LINK_DOWN)
145 device_printf(sc->dev, "Link down");
147 if (status & IMR_HOT_RESET)
148 device_printf(sc->dev, "Hot reset");
150 if (status & IMR_CORRECTABLE)
151 xlnx_pcib_clear_err_interrupts(sc);
153 if (status & IMR_FATAL)
154 xlnx_pcib_clear_err_interrupts(sc);
156 if (status & IMR_NON_FATAL)
157 xlnx_pcib_clear_err_interrupts(sc);
159 if (status & IMR_MSI) {
160 device_printf(sc->dev, "MSI interrupt");
162 /* FIFO mode MSI not implemented. */
165 if (status & IMR_INTX) {
166 device_printf(sc->dev, "INTx received");
168 /* Not implemented. */
171 if (status & IMR_SLAVE_UNSUPP_REQ)
172 device_printf(sc->dev, "Slave unsupported request");
174 if (status & IMR_SLAVE_UNEXP_COMPL)
175 device_printf(sc->dev, "Slave unexpected completion");
177 if (status & IMR_SLAVE_COMPL_TIMOUT)
178 device_printf(sc->dev, "Slave completion timeout");
180 if (status & IMR_SLAVE_ERROR_POISON)
181 device_printf(sc->dev, "Slave error poison");
183 if (status & IMR_SLAVE_COMPL_ABORT)
184 device_printf(sc->dev, "Slave completion abort");
186 if (status & IMR_SLAVE_ILLEG_BURST)
187 device_printf(sc->dev, "Slave illegal burst");
189 if (status & IMR_MASTER_DECERR)
190 device_printf(sc->dev, "Master decode error");
192 if (status & IMR_MASTER_SLVERR)
193 device_printf(sc->dev, "Master slave error");
195 bus_write_4(sc->res, XLNX_PCIE_IDR, val);
197 return (FILTER_HANDLED);
201 xlnx_pcib_handle_msi_intr(void *arg, int msireg)
203 struct generic_pcie_fdt_softc *fdt_sc;
204 struct generic_pcie_core_softc *sc;
205 struct xlnx_pcib_softc *xlnx_sc;
206 struct xlnx_pcib_irqsrc *xi;
207 struct trapframe *tf;
213 fdt_sc = &xlnx_sc->fdt_sc;
215 tf = curthread->td_intr_frame;
218 reg = bus_read_4(sc->res, msireg);
220 for (i = 0; i < sizeof(uint32_t) * 8; i++) {
221 if (reg & (1 << i)) {
222 bus_write_4(sc->res, msireg, (1 << i));
225 if (msireg == XLNX_PCIE_RPMSIID2)
228 xi = &xlnx_sc->isrcs[irq];
229 if (intr_isrc_dispatch(&xi->isrc, tf) != 0) {
231 xlnx_pcib_msi_mask(sc->dev,
233 device_printf(sc->dev,
234 "Stray irq %u disabled\n", irq);
242 xlnx_pcib_msi0_intr(void *arg)
245 xlnx_pcib_handle_msi_intr(arg, XLNX_PCIE_RPMSIID1);
247 return (FILTER_HANDLED);
251 xlnx_pcib_msi1_intr(void *arg)
254 xlnx_pcib_handle_msi_intr(arg, XLNX_PCIE_RPMSIID2);
256 return (FILTER_HANDLED);
260 xlnx_pcib_register_msi(struct xlnx_pcib_softc *sc)
266 sc->isrcs = malloc(sizeof(*sc->isrcs) * XLNX_PCIB_MAX_MSI, M_DEVBUF,
269 name = device_get_nameunit(sc->dev);
271 for (irq = 0; irq < XLNX_PCIB_MAX_MSI; irq++) {
272 sc->isrcs[irq].irq = irq;
273 error = intr_isrc_register(&sc->isrcs[irq].isrc,
274 sc->dev, 0, "%s,%u", name, irq);
276 return (error); /* XXX deregister ISRCs */
279 if (intr_msi_register(sc->dev,
280 OF_xref_from_node(ofw_bus_get_node(sc->dev))) != 0)
287 xlnx_pcib_init(struct xlnx_pcib_softc *sc)
292 /* Disable interrupts. */
293 bus_write_4(sc->res[0], XLNX_PCIE_IMR, 0);
295 /* Clear pending interrupts.*/
296 reg = bus_read_4(sc->res[0], XLNX_PCIE_IDR);
297 bus_write_4(sc->res[0], XLNX_PCIE_IDR, reg);
299 /* Setup an MSI page. */
300 sc->msi_page = kmem_alloc_contig(PAGE_SIZE, M_WAITOK, 0,
301 BUS_SPACE_MAXADDR, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
302 addr = vtophys(sc->msi_page);
303 bus_write_4(sc->res[0], XLNX_PCIE_RPMSIBR1, (addr >> 32));
304 bus_write_4(sc->res[0], XLNX_PCIE_RPMSIBR2, (addr >> 0));
306 /* Enable the bridge. */
307 reg = bus_read_4(sc->res[0], XLNX_PCIE_RPSCR);
309 bus_write_4(sc->res[0], XLNX_PCIE_RPSCR, reg);
311 /* Enable interrupts. */
314 | IMR_CFG_COMPL_STATUS_M
321 | IMR_SLAVE_UNSUPP_REQ
322 | IMR_SLAVE_UNEXP_COMPL
323 | IMR_SLAVE_COMPL_TIMOUT
324 | IMR_SLAVE_ERROR_POISON
325 | IMR_SLAVE_COMPL_ABORT
326 | IMR_SLAVE_ILLEG_BURST
329 bus_write_4(sc->res[0], XLNX_PCIE_IMR, reg);
333 xlnx_pcib_fdt_probe(device_t dev)
336 if (!ofw_bus_status_okay(dev))
339 if (ofw_bus_is_compatible(dev, "xlnx,xdma-host-3.00")) {
340 device_set_desc(dev, "Xilinx XDMA PCIe Controller");
341 return (BUS_PROBE_DEFAULT);
348 xlnx_pcib_fdt_attach(device_t dev)
350 struct xlnx_pcib_softc *sc;
353 sc = device_get_softc(dev);
356 mtx_init(&sc->mtx, "msi_mtx", NULL, MTX_DEF);
358 if (bus_alloc_resources(dev, xlnx_pcib_spec, sc->res)) {
359 device_printf(dev, "could not allocate resources\n");
363 /* Setup MISC interrupt handler. */
364 error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE,
365 xlnx_pcib_intr, NULL, sc, &sc->intr_cookie[0]);
367 device_printf(dev, "could not setup interrupt handler.\n");
371 /* Setup MSI0 interrupt handler. */
372 error = bus_setup_intr(dev, sc->res[2], INTR_TYPE_MISC | INTR_MPSAFE,
373 xlnx_pcib_msi0_intr, NULL, sc, &sc->intr_cookie[1]);
375 device_printf(dev, "could not setup interrupt handler.\n");
379 /* Setup MSI1 interrupt handler. */
380 error = bus_setup_intr(dev, sc->res[3], INTR_TYPE_MISC | INTR_MPSAFE,
381 xlnx_pcib_msi1_intr, NULL, sc, &sc->intr_cookie[2]);
383 device_printf(dev, "could not setup interrupt handler.\n");
390 * Allow the core driver to map registers.
391 * We will be accessing the device memory using core_softc.
393 bus_release_resources(dev, xlnx_pcib_spec, sc->res);
395 error = xlnx_pcib_register_msi(sc);
399 return (pci_host_generic_attach(dev));
403 xlnx_pcib_fdt_get_id(device_t pci, device_t child, enum pci_id_type type,
409 if (type != PCI_ID_MSI)
410 return (pcib_get_id(pci, child, type, id));
412 node = ofw_bus_get_node(pci);
413 if (OF_hasprop(node, "msi-map"))
414 return (generic_pcie_get_id(pci, child, type, id));
416 bsf = pci_get_rid(child);
417 *id = (pci_get_domain(child) << PCI_RID_DOMAIN_SHIFT) | bsf;
423 xlnx_pcib_req_valid(struct generic_pcie_core_softc *sc,
424 u_int bus, u_int slot, u_int func, u_int reg)
426 bus_space_handle_t h;
433 if ((bus < sc->bus_start) || (bus > sc->bus_end))
435 if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) ||
439 if (bus == 0 && slot > 0)
442 val = bus_space_read_4(t, h, XLNX_PCIE_PHYSCR);
443 if ((val & PHYSCR_LINK_UP) == 0) {
454 xlnx_pcib_read_config(device_t dev, u_int bus, u_int slot,
455 u_int func, u_int reg, int bytes)
457 struct generic_pcie_fdt_softc *fdt_sc;
458 struct xlnx_pcib_softc *xlnx_sc;
459 struct generic_pcie_core_softc *sc;
460 bus_space_handle_t h;
465 xlnx_sc = device_get_softc(dev);
466 fdt_sc = &xlnx_sc->fdt_sc;
469 if (!xlnx_pcib_req_valid(sc, bus, slot, func, reg))
472 offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg);
476 data = bus_space_read_4(t, h, offset & ~3);
480 data >>= (offset & 3) * 8;
484 data >>= (offset & 3) * 8;
485 data = le16toh(data);
488 data = le32toh(data);
498 xlnx_pcib_write_config(device_t dev, u_int bus, u_int slot,
499 u_int func, u_int reg, uint32_t val, int bytes)
501 struct generic_pcie_fdt_softc *fdt_sc;
502 struct xlnx_pcib_softc *xlnx_sc;
503 struct generic_pcie_core_softc *sc;
504 bus_space_handle_t h;
509 xlnx_sc = device_get_softc(dev);
510 fdt_sc = &xlnx_sc->fdt_sc;
513 if (!xlnx_pcib_req_valid(sc, bus, slot, func, reg))
516 offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg);
522 * 32-bit access used due to a bug in the Xilinx bridge that
523 * requires to write primary and secondary buses in one blast.
525 * TODO: This is probably wrong on big-endian.
529 data = bus_space_read_4(t, h, offset & ~3);
530 data &= ~(0xff << ((offset & 3) * 8));
531 data |= (val & 0xff) << ((offset & 3) * 8);
532 bus_space_write_4(t, h, offset & ~3, htole32(data));
535 data = bus_space_read_4(t, h, offset & ~3);
536 data &= ~(0xffff << ((offset & 3) * 8));
537 data |= (val & 0xffff) << ((offset & 3) * 8);
538 bus_space_write_4(t, h, offset & ~3, htole32(data));
541 bus_space_write_4(t, h, offset, htole32(val));
549 xlnx_pcib_alloc_msi(device_t pci, device_t child, int count, int maxcount,
552 phandle_t msi_parent;
554 ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent,
556 msi_parent = OF_xref_from_node(ofw_bus_get_node(pci));
557 return (intr_alloc_msi(pci, child, msi_parent, count, maxcount,
562 xlnx_pcib_release_msi(device_t pci, device_t child, int count, int *irqs)
564 phandle_t msi_parent;
566 ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent,
568 msi_parent = OF_xref_from_node(ofw_bus_get_node(pci));
569 return (intr_release_msi(pci, child, msi_parent, count, irqs));
573 xlnx_pcib_map_msi(device_t pci, device_t child, int irq, uint64_t *addr,
576 phandle_t msi_parent;
578 ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent,
580 msi_parent = OF_xref_from_node(ofw_bus_get_node(pci));
581 return (intr_map_msi(pci, child, msi_parent, irq, addr, data));
585 xlnx_pcib_msi_alloc_msi(device_t dev, device_t child, int count, int maxcount,
586 device_t *pic, struct intr_irqsrc **srcs)
588 struct xlnx_pcib_softc *sc;
592 sc = device_get_softc(dev);
598 for (irq = 0; (irq + count - 1) < XLNX_PCIB_MAX_MSI; irq++) {
599 /* Assume the range is valid. */
602 /* Check this range is valid. */
603 for (end_irq = irq; end_irq < irq + count; end_irq++) {
604 if (sc->isrcs[end_irq].flags & XLNX_IRQ_FLAG_USED) {
605 /* This is already used. */
615 if (!found || irq == (XLNX_PCIB_MAX_MSI - 1)) {
616 /* Not enough interrupts were found. */
617 mtx_unlock(&sc->mtx);
621 /* Mark the interrupt as used. */
622 for (i = 0; i < count; i++)
623 sc->isrcs[irq + i].flags |= XLNX_IRQ_FLAG_USED;
625 mtx_unlock(&sc->mtx);
627 for (i = 0; i < count; i++)
628 srcs[i] = (struct intr_irqsrc *)&sc->isrcs[irq + i];
630 *pic = device_get_parent(dev);
636 xlnx_pcib_msi_release_msi(device_t dev, device_t child, int count,
637 struct intr_irqsrc **isrc)
639 struct xlnx_pcib_softc *sc;
640 struct xlnx_pcib_irqsrc *xi;
643 sc = device_get_softc(dev);
645 for (i = 0; i < count; i++) {
646 xi = (struct xlnx_pcib_irqsrc *)isrc[i];
648 KASSERT(xi->flags & XLNX_IRQ_FLAG_USED,
649 ("%s: Releasing an unused MSI interrupt", __func__));
651 xi->flags &= ~XLNX_IRQ_FLAG_USED;
654 mtx_unlock(&sc->mtx);
659 xlnx_pcib_msi_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
660 uint64_t *addr, uint32_t *data)
662 struct xlnx_pcib_softc *sc;
663 struct xlnx_pcib_irqsrc *xi;
665 sc = device_get_softc(dev);
666 xi = (struct xlnx_pcib_irqsrc *)isrc;
668 *addr = vtophys(sc->msi_page);
675 xlnx_pcib_msi_mask(device_t dev, struct intr_irqsrc *isrc, bool mask)
677 struct generic_pcie_fdt_softc *fdt_sc;
678 struct generic_pcie_core_softc *sc;
679 struct xlnx_pcib_softc *xlnx_sc;
680 struct xlnx_pcib_irqsrc *xi;
681 uint32_t msireg, irq;
684 xlnx_sc = device_get_softc(dev);
685 fdt_sc = &xlnx_sc->fdt_sc;
688 xi = (struct xlnx_pcib_irqsrc *)isrc;
692 msireg = XLNX_PCIE_RPMSIID1_MASK;
694 msireg = XLNX_PCIE_RPMSIID2_MASK;
696 reg = bus_read_4(sc->res, msireg);
701 bus_write_4(sc->res, msireg, reg);
705 xlnx_pcib_msi_disable_intr(device_t dev, struct intr_irqsrc *isrc)
708 xlnx_pcib_msi_mask(dev, isrc, true);
712 xlnx_pcib_msi_enable_intr(device_t dev, struct intr_irqsrc *isrc)
715 xlnx_pcib_msi_mask(dev, isrc, false);
719 xlnx_pcib_msi_post_filter(device_t dev, struct intr_irqsrc *isrc)
725 xlnx_pcib_msi_post_ithread(device_t dev, struct intr_irqsrc *isrc)
728 xlnx_pcib_msi_mask(dev, isrc, false);
732 xlnx_pcib_msi_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
735 xlnx_pcib_msi_mask(dev, isrc, true);
739 xlnx_pcib_msi_setup_intr(device_t dev, struct intr_irqsrc *isrc,
740 struct resource *res, struct intr_map_data *data)
747 xlnx_pcib_msi_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
748 struct resource *res, struct intr_map_data *data)
754 static device_method_t xlnx_pcib_fdt_methods[] = {
755 /* Device interface */
756 DEVMETHOD(device_probe, xlnx_pcib_fdt_probe),
757 DEVMETHOD(device_attach, xlnx_pcib_fdt_attach),
760 DEVMETHOD(pcib_get_id, xlnx_pcib_fdt_get_id),
761 DEVMETHOD(pcib_read_config, xlnx_pcib_read_config),
762 DEVMETHOD(pcib_write_config, xlnx_pcib_write_config),
763 DEVMETHOD(pcib_alloc_msi, xlnx_pcib_alloc_msi),
764 DEVMETHOD(pcib_release_msi, xlnx_pcib_release_msi),
765 DEVMETHOD(pcib_map_msi, xlnx_pcib_map_msi),
768 DEVMETHOD(msi_alloc_msi, xlnx_pcib_msi_alloc_msi),
769 DEVMETHOD(msi_release_msi, xlnx_pcib_msi_release_msi),
770 DEVMETHOD(msi_map_msi, xlnx_pcib_msi_map_msi),
772 /* Interrupt controller interface */
773 DEVMETHOD(pic_disable_intr, xlnx_pcib_msi_disable_intr),
774 DEVMETHOD(pic_enable_intr, xlnx_pcib_msi_enable_intr),
775 DEVMETHOD(pic_setup_intr, xlnx_pcib_msi_setup_intr),
776 DEVMETHOD(pic_teardown_intr, xlnx_pcib_msi_teardown_intr),
777 DEVMETHOD(pic_post_filter, xlnx_pcib_msi_post_filter),
778 DEVMETHOD(pic_post_ithread, xlnx_pcib_msi_post_ithread),
779 DEVMETHOD(pic_pre_ithread, xlnx_pcib_msi_pre_ithread),
785 DEFINE_CLASS_1(pcib, xlnx_pcib_fdt_driver, xlnx_pcib_fdt_methods,
786 sizeof(struct xlnx_pcib_softc), generic_pcie_fdt_driver);
788 static devclass_t xlnx_pcib_fdt_devclass;
790 DRIVER_MODULE(xlnx_pcib, simplebus, xlnx_pcib_fdt_driver,
791 xlnx_pcib_fdt_devclass, 0, 0);
792 DRIVER_MODULE(xlnx_pcib, ofwbus, xlnx_pcib_fdt_driver,
793 xlnx_pcib_fdt_devclass, 0, 0);