2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright 2019 Cisco Systems, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/types.h>
33 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/module.h>
38 #include <sys/systm.h>
39 #include <sys/malloc.h>
41 #include <machine/bus.h>
42 #include <machine/resource.h>
45 #include <sys/mutex.h>
46 #include <sys/taskqueue.h>
48 #include <sys/pciio.h>
49 #include <dev/pci/pcivar.h>
50 #include <dev/pci/pcireg.h>
51 #include <dev/pci/pci_private.h>
52 #include <dev/pci/pcib_private.h>
54 #define TASK_QUEUE_INTR 1
55 #include <dev/vmd/vmd.h>
65 #define BUS_RESTRICT 1
68 #define INTEL_VENDOR_ID 0x8086
69 #define INTEL_DEVICE_ID_201d 0x201d
70 #define INTEL_DEVICE_ID_28c0 0x28c0
71 #define INTEL_DEVICE_ID_467f 0x467f
72 #define INTEL_DEVICE_ID_4c3d 0x4c3d
73 #define INTEL_DEVICE_ID_9a0b 0x9a0b
76 #define VMD_BUS_RESTRICT 0x1
78 #define VMD_CONFIG 0x44
79 #define VMD_BUS_START(x) ((x >> 8) & 0x3)
83 static struct vmd_type vmd_devs[] = {
84 { INTEL_VENDOR_ID, INTEL_DEVICE_ID_201d, "Intel Volume Management Device", 0 },
85 { INTEL_VENDOR_ID, INTEL_DEVICE_ID_28c0, "Intel Volume Management Device", BUS_RESTRICT },
86 { INTEL_VENDOR_ID, INTEL_DEVICE_ID_467f, "Intel Volume Management Device", BUS_RESTRICT },
87 { INTEL_VENDOR_ID, INTEL_DEVICE_ID_4c3d, "Intel Volume Management Device", BUS_RESTRICT },
88 { INTEL_VENDOR_ID, INTEL_DEVICE_ID_9a0b, "Intel Volume Management Device", BUS_RESTRICT },
93 vmd_probe(device_t dev)
99 vid = pci_get_vendor(dev);
100 did = pci_get_device(dev);
102 while (t->vmd_name != NULL) {
103 if (vid == t->vmd_vid &&
105 device_set_desc(dev, t->vmd_name);
106 return (BUS_PROBE_DEFAULT);
115 vmd_free(struct vmd_softc *sc)
118 struct vmd_irq_handler *elm, *tmp;
120 if (sc->vmd_bus.rman.rm_end != 0)
121 rman_fini(&sc->vmd_bus.rman);
123 #ifdef TASK_QUEUE_INTR
124 if (sc->vmd_irq_tq != NULL) {
125 taskqueue_drain(sc->vmd_irq_tq, &sc->vmd_irq_task);
126 taskqueue_free(sc->vmd_irq_tq);
127 sc->vmd_irq_tq = NULL;
130 if (sc->vmd_irq != NULL) {
131 for (i = 0; i < sc->vmd_msix_count; i++) {
132 if (sc->vmd_irq[i].vmd_res != NULL) {
133 bus_teardown_intr(sc->vmd_dev,
134 sc->vmd_irq[i].vmd_res,
135 sc->vmd_irq[i].vmd_handle);
136 bus_release_resource(sc->vmd_dev, SYS_RES_IRQ,
137 sc->vmd_irq[i].vmd_rid,
138 sc->vmd_irq[i].vmd_res);
141 TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list ,vmd_link,
143 TAILQ_REMOVE(&sc->vmd_irq[0].vmd_list, elm, vmd_link);
147 free(sc->vmd_irq, M_DEVBUF);
149 pci_release_msi(sc->vmd_dev);
150 for (i = 0; i < VMD_MAX_BAR; i++) {
151 if (sc->vmd_regs_resource[i] != NULL)
152 bus_release_resource(sc->vmd_dev, SYS_RES_MEMORY,
154 sc->vmd_regs_resource[i]);
156 if (sc->vmd_io_resource)
157 bus_release_resource(device_get_parent(sc->vmd_dev),
158 SYS_RES_IOPORT, sc->vmd_io_rid, sc->vmd_io_resource);
160 #ifndef TASK_QUEUE_INTR
161 if (mtx_initialized(&sc->vmd_irq_lock)) {
162 mtx_destroy(&sc->vmd_irq_lock);
167 /* Hidden PCI Roots are hidden in BAR(0). */
170 vmd_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width)
173 struct vmd_softc *sc;
176 sc = device_get_softc(dev);
177 if (b < sc->vmd_bus_start)
180 offset = ((b - sc->vmd_bus_start) << 20) + (s << 15) + (f << 12) + reg;
184 return (bus_space_read_4(sc->vmd_btag, sc->vmd_bhandle,
187 return (bus_space_read_2(sc->vmd_btag, sc->vmd_bhandle,
190 return (bus_space_read_1(sc->vmd_btag, sc->vmd_bhandle,
193 KASSERT(1, ("Invalid width requested"));
199 vmd_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg,
200 uint32_t val, int width)
203 struct vmd_softc *sc;
206 sc = device_get_softc(dev);
207 if (b < sc->vmd_bus_start)
210 offset = ((b - sc->vmd_bus_start) << 20) + (s << 15) + (f << 12) + reg;
214 return (bus_space_write_4(sc->vmd_btag, sc->vmd_bhandle,
217 return (bus_space_write_2(sc->vmd_btag, sc->vmd_bhandle,
220 return (bus_space_write_1(sc->vmd_btag, sc->vmd_bhandle,
223 panic("Failed to specific width");
228 vmd_pci_read_config(device_t dev, device_t child, int reg, int width)
230 struct pci_devinfo *dinfo = device_get_ivars(child);
231 pcicfgregs *cfg = &dinfo->cfg;
233 return vmd_read_config(dev, cfg->bus, cfg->slot, cfg->func, reg, width);
237 vmd_pci_write_config(device_t dev, device_t child, int reg, uint32_t val,
240 struct pci_devinfo *dinfo = device_get_ivars(child);
241 pcicfgregs *cfg = &dinfo->cfg;
243 vmd_write_config(dev, cfg->bus, cfg->slot, cfg->func, reg, val, width);
246 static struct pci_devinfo *
247 vmd_alloc_devinfo(device_t dev)
249 struct pci_devinfo *dinfo;
251 dinfo = malloc(sizeof(*dinfo), M_DEVBUF, M_WAITOK | M_ZERO);
259 struct vmd_softc *sc;
260 #ifndef TASK_QUEUE_INTR
261 struct vmd_irq_handler *elm, *tmp_elm;
264 irq = (struct vmd_irq *)arg;
266 #ifdef TASK_QUEUE_INTR
267 taskqueue_enqueue(sc->vmd_irq_tq, &sc->vmd_irq_task);
269 mtx_lock(&sc->vmd_irq_lock);
270 TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list, vmd_link, tmp_elm) {
271 (elm->vmd_intr)(elm->vmd_arg);
273 mtx_unlock(&sc->vmd_irq_lock);
277 #ifdef TASK_QUEUE_INTR
279 vmd_handle_irq(void *context, int pending)
281 struct vmd_irq_handler *elm, *tmp_elm;
282 struct vmd_softc *sc;
286 TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list, vmd_link, tmp_elm) {
287 (elm->vmd_intr)(elm->vmd_arg);
293 vmd_attach(device_t dev)
295 struct vmd_softc *sc;
296 struct pcib_secbus *bus;
308 sc = device_get_softc(dev);
309 bzero(sc, sizeof(*sc));
313 pci_enable_busmaster(dev);
315 #ifdef TASK_QUEUE_INTR
316 sc->vmd_irq_tq = taskqueue_create_fast("vmd_taskq", M_NOWAIT,
317 taskqueue_thread_enqueue, &sc->vmd_irq_tq);
318 taskqueue_start_threads(&sc->vmd_irq_tq, 1, PI_DISK, "%s taskq",
319 device_get_nameunit(sc->vmd_dev));
320 TASK_INIT(&sc->vmd_irq_task, 0, vmd_handle_irq, sc);
322 mtx_init(&sc->vmd_irq_lock, "VMD IRQ lock", NULL, MTX_DEF);
324 for (i = 0, j = 0; i < VMD_MAX_BAR; i++, j++ ) {
325 sc->vmd_regs_rid[i] = PCIR_BAR(j);
326 bar = pci_read_config(dev, PCIR_BAR(0), 4);
327 if (PCI_BAR_MEM(bar) && (bar & PCIM_BAR_MEM_TYPE) ==
330 if ((sc->vmd_regs_resource[i] = bus_alloc_resource_any(
331 sc->vmd_dev, SYS_RES_MEMORY, &sc->vmd_regs_rid[i],
332 RF_ACTIVE)) == NULL) {
333 device_printf(dev, "Cannot allocate resources\n");
338 sc->vmd_io_rid = PCIR_IOBASEL_1;
339 sc->vmd_io_resource = bus_alloc_resource_any(
340 device_get_parent(sc->vmd_dev), SYS_RES_IOPORT, &sc->vmd_io_rid,
342 if (sc->vmd_io_resource == NULL) {
343 device_printf(dev, "Cannot allocate IO\n");
347 sc->vmd_btag = rman_get_bustag(sc->vmd_regs_resource[0]);
348 sc->vmd_bhandle = rman_get_bushandle(sc->vmd_regs_resource[0]);
350 pci_write_config(dev, PCIR_PRIBUS_2,
351 pcib_get_bus(device_get_parent(dev)), 1);
354 vid = pci_get_vendor(dev);
355 did = pci_get_device(dev);
357 sc->vmd_bus_start = 0;
358 while (t->vmd_name != NULL) {
359 if (vid == t->vmd_vid &&
361 if (t->flags == BUS_RESTRICT) {
362 if (pci_read_config(dev, VMD_CAP, 2) &
364 switch (VMD_BUS_START(pci_read_config(
365 dev, VMD_CONFIG, 2))) {
367 sc->vmd_bus_start = 128;
370 sc->vmd_bus_start = 224;
374 "Unknown bug offset\n");
383 device_printf(dev, "VMD bus starts at %d\n", sc->vmd_bus_start);
385 sec_reg = PCIR_SECBUS_1;
387 bus->sub_reg = PCIR_SUBBUS_1;
388 bus->sec = vmd_read_config(dev, b, s, f, sec_reg, 1);
389 bus->sub = vmd_read_config(dev, b, s, f, bus->sub_reg, 1);
391 bus->rman.rm_start = sc->vmd_bus_start;
392 bus->rman.rm_end = PCI_BUSMAX;
393 bus->rman.rm_type = RMAN_ARRAY;
394 snprintf(buf, sizeof(buf), "%s bus numbers", device_get_nameunit(dev));
395 bus->rman.rm_descr = strdup(buf, M_DEVBUF);
396 error = rman_init(&bus->rman);
398 device_printf(dev, "Failed to initialize %s bus number rman\n",
399 device_get_nameunit(dev));
400 bus->rman.rm_end = 0;
405 * Allocate a bus range. This will return an existing bus range
406 * if one exists, or a new bus range if one does not.
409 bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid,
411 if (bus->res == NULL) {
413 * Fall back to just allocating a range of a single bus
416 bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid,
418 } else if (rman_get_size(bus->res) < min_count) {
420 * Attempt to grow the existing range to satisfy the
421 * minimum desired count.
423 (void)bus_adjust_resource(dev, PCI_RES_BUS, bus->res,
424 rman_get_start(bus->res), rman_get_start(bus->res) +
429 * Add the initial resource to the rman.
431 if (bus->res != NULL) {
432 error = rman_manage_region(&bus->rman, rman_get_start(bus->res),
433 rman_get_end(bus->res));
435 device_printf(dev, "Failed to add resource to rman\n");
438 bus->sec = rman_get_start(bus->res);
439 bus->sub = rman_get_end(bus->res);
442 sc->vmd_msix_count = pci_msix_count(dev);
443 if (pci_alloc_msix(dev, &sc->vmd_msix_count) == 0) {
444 sc->vmd_irq = malloc(sizeof(struct vmd_irq) *
446 M_DEVBUF, M_WAITOK | M_ZERO);
448 for (i = 0; i < sc->vmd_msix_count; i++) {
449 sc->vmd_irq[i].vmd_rid = i + 1;
450 sc->vmd_irq[i].vmd_sc = sc;
451 sc->vmd_irq[i].vmd_instance = i;
452 sc->vmd_irq[i].vmd_res = bus_alloc_resource_any(dev,
453 SYS_RES_IRQ, &sc->vmd_irq[i].vmd_rid,
455 if (sc->vmd_irq[i].vmd_res == NULL) {
456 device_printf(dev,"Failed to alloc irq\n");
460 TAILQ_INIT(&sc->vmd_irq[i].vmd_list);
461 if (bus_setup_intr(dev, sc->vmd_irq[i].vmd_res,
462 INTR_TYPE_MISC | INTR_MPSAFE, NULL, vmd_intr,
463 &sc->vmd_irq[i], &sc->vmd_irq[i].vmd_handle)) {
464 device_printf(sc->vmd_dev,
465 "Cannot set up interrupt\n");
466 sc->vmd_irq[i].vmd_res = NULL;
472 sc->vmd_child = device_add_child(dev, NULL, -1);
473 if (sc->vmd_child == NULL) {
474 device_printf(dev, "Failed to attach child\n");
478 error = device_probe_and_attach(sc->vmd_child);
480 device_printf(dev, "Failed to add probe child: %d\n", error);
481 (void)device_delete_child(dev, sc->vmd_child);
493 vmd_detach(device_t dev)
495 struct vmd_softc *sc;
498 sc = device_get_softc(dev);
499 if (sc->vmd_child != NULL) {
500 err = bus_generic_detach(sc->vmd_child);
503 err = device_delete_child(dev, sc->vmd_child);
511 /* Pass request to alloc an MSI-X message up to the parent bridge. */
513 vmd_alloc_msix(device_t pcib, device_t dev, int *irq)
515 struct vmd_softc *sc = device_get_softc(pcib);
519 if (sc->vmd_flags & PCIB_DISABLE_MSIX)
521 bus = device_get_parent(pcib);
522 ret = PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq);
526 static struct resource *
527 vmd_alloc_resource(device_t dev, device_t child, int type, int *rid,
528 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
530 /* Start at max PCI vmd_domain and work down */
531 if (type == PCI_RES_BUS) {
532 return (pci_domain_alloc_bus(PCI_DOMAINMAX -
533 device_get_unit(dev), child, rid, start, end,
537 return (pcib_alloc_resource(dev, child, type, rid, start, end,
542 vmd_adjust_resource(device_t dev, device_t child, int type,
543 struct resource *r, rman_res_t start, rman_res_t end)
545 struct resource *res = r;
547 if (type == PCI_RES_BUS)
548 return (pci_domain_adjust_bus(PCI_DOMAINMAX -
549 device_get_unit(dev), child, res, start, end));
550 return (pcib_adjust_resource(dev, child, type, res, start, end));
554 vmd_release_resource(device_t dev, device_t child, int type, int rid,
557 if (type == PCI_RES_BUS)
558 return (pci_domain_release_bus(PCI_DOMAINMAX -
559 device_get_unit(dev), child, rid, r));
560 return (pcib_release_resource(dev, child, type, rid, r));
564 vmd_shutdown(device_t dev)
570 vmd_pcib_route_interrupt(device_t pcib, device_t dev, int pin)
572 return (pcib_route_interrupt(pcib, dev, pin));
576 vmd_pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount,
579 return (pcib_alloc_msi(pcib, dev, count, maxcount, irqs));
583 vmd_pcib_release_msi(device_t pcib, device_t dev, int count, int *irqs)
586 return (pcib_release_msi(pcib, dev, count, irqs));
590 vmd_pcib_release_msix(device_t pcib, device_t dev, int irq) {
591 return pcib_release_msix(pcib, dev, irq);
595 vmd_setup_intr(device_t dev, device_t child, struct resource *irq,
596 int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg,
599 struct vmd_irq_handler *elm;
600 struct vmd_softc *sc;
603 sc = device_get_softc(dev);
606 * There appears to be no steering of VMD interrupts from device
611 elm = malloc(sizeof(*elm), M_DEVBUF, M_NOWAIT|M_ZERO);
612 elm->vmd_child = child;
613 elm->vmd_intr = intr;
614 elm->vmd_rid = rman_get_rid(irq);
616 TAILQ_INSERT_TAIL(&sc->vmd_irq[i].vmd_list, elm, vmd_link);
618 return (bus_generic_setup_intr(dev, child, irq, flags, filter, intr,
623 vmd_teardown_intr(device_t dev, device_t child, struct resource *irq,
626 struct vmd_irq_handler *elm, *tmp;;
627 struct vmd_softc *sc;
629 sc = device_get_softc(dev);
630 TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list, vmd_link, tmp) {
631 if (elm->vmd_child == child &&
632 elm->vmd_rid == rman_get_rid(irq)) {
633 TAILQ_REMOVE(&sc->vmd_irq[0].vmd_list, elm, vmd_link);
638 return (bus_generic_teardown_intr(dev, child, irq, cookie));
641 static device_method_t vmd_pci_methods[] = {
642 /* Device interface */
643 DEVMETHOD(device_probe, vmd_probe),
644 DEVMETHOD(device_attach, vmd_attach),
645 DEVMETHOD(device_detach, vmd_detach),
646 DEVMETHOD(device_shutdown, vmd_shutdown),
649 DEVMETHOD(bus_read_ivar, pcib_read_ivar),
650 DEVMETHOD(bus_write_ivar, pcib_write_ivar),
651 DEVMETHOD(bus_alloc_resource, vmd_alloc_resource),
652 DEVMETHOD(bus_adjust_resource, vmd_adjust_resource),
653 DEVMETHOD(bus_release_resource, vmd_release_resource),
654 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
655 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
656 DEVMETHOD(bus_setup_intr, vmd_setup_intr),
657 DEVMETHOD(bus_teardown_intr, vmd_teardown_intr),
660 DEVMETHOD(pci_read_config, vmd_pci_read_config),
661 DEVMETHOD(pci_write_config, vmd_pci_write_config),
662 DEVMETHOD(pci_alloc_devinfo, vmd_alloc_devinfo),
665 DEVMETHOD(pcib_maxslots, pcib_maxslots),
666 DEVMETHOD(pcib_read_config, vmd_read_config),
667 DEVMETHOD(pcib_write_config, vmd_write_config),
668 DEVMETHOD(pcib_route_interrupt, vmd_pcib_route_interrupt),
669 DEVMETHOD(pcib_alloc_msi, vmd_pcib_alloc_msi),
670 DEVMETHOD(pcib_release_msi, vmd_pcib_release_msi),
671 DEVMETHOD(pcib_alloc_msix, vmd_alloc_msix),
672 DEVMETHOD(pcib_release_msix, vmd_pcib_release_msix),
673 DEVMETHOD(pcib_map_msi, pcib_map_msi),
678 static devclass_t vmd_devclass;
680 DEFINE_CLASS_0(vmd, vmd_pci_driver, vmd_pci_methods, sizeof(struct vmd_softc));
681 DRIVER_MODULE(vmd, pci, vmd_pci_driver, vmd_devclass, NULL, NULL);
682 MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, vmd,
683 vmd_devs, nitems(vmd_devs) - 1);
684 MODULE_DEPEND(vmd, vmd_bus, 1, 1, 1);