2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright 2019 Cisco Systems, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/types.h>
33 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/module.h>
38 #include <sys/systm.h>
39 #include <sys/malloc.h>
41 #include <machine/bus.h>
42 #include <machine/resource.h>
45 #include <sys/mutex.h>
46 #include <sys/taskqueue.h>
48 #include <sys/pciio.h>
49 #include <dev/pci/pcivar.h>
50 #include <dev/pci/pcireg.h>
51 #include <dev/pci/pci_private.h>
52 #include <dev/pci/pcib_private.h>
54 #define TASK_QUEUE_INTR 1
55 #include <dev/vmd/vmd.h>
66 #define INTEL_VENDOR_ID 0x8086
67 #define INTEL_DEVICE_ID_VMD 0x201d
68 #define INTEL_DEVICE_ID_VMD2 0x28c0
70 static struct vmd_type vmd_devs[] = {
71 { INTEL_VENDOR_ID, INTEL_DEVICE_ID_VMD, "Intel Volume Management Device" },
72 { INTEL_VENDOR_ID, INTEL_DEVICE_ID_VMD2, "Intel Volume Management Device" },
77 vmd_probe(device_t dev)
83 vid = pci_get_vendor(dev);
84 did = pci_get_device(dev);
86 while (t->vmd_name != NULL) {
87 if (vid == t->vmd_vid &&
89 device_set_desc(dev, t->vmd_name);
90 return (BUS_PROBE_DEFAULT);
99 vmd_free(struct vmd_softc *sc)
102 struct vmd_irq_handler *elm, *tmp;
104 #ifdef TASK_QUEUE_INTR
105 if (sc->vmd_irq_tq != NULL) {
106 taskqueue_drain(sc->vmd_irq_tq, &sc->vmd_irq_task);
107 taskqueue_free(sc->vmd_irq_tq);
108 sc->vmd_irq_tq = NULL;
111 if (sc->vmd_irq != NULL) {
112 for (i = 0; i < sc->vmd_msix_count; i++) {
113 if (sc->vmd_irq[i].vmd_res != NULL) {
114 bus_teardown_intr(sc->vmd_dev,
115 sc->vmd_irq[i].vmd_res,
116 sc->vmd_irq[i].vmd_handle);
117 bus_release_resource(sc->vmd_dev, SYS_RES_IRQ,
118 sc->vmd_irq[i].vmd_rid,
119 sc->vmd_irq[i].vmd_res);
122 TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list ,vmd_link,
124 TAILQ_REMOVE(&sc->vmd_irq[0].vmd_list, elm, vmd_link);
128 free(sc->vmd_irq, M_DEVBUF);
130 pci_release_msi(sc->vmd_dev);
131 for (i = 0; i < VMD_MAX_BAR; i++) {
132 if (sc->vmd_regs_resource[i] != NULL)
133 bus_release_resource(sc->vmd_dev, SYS_RES_MEMORY,
135 sc->vmd_regs_resource[i]);
137 if (sc->vmd_io_resource)
138 bus_release_resource(device_get_parent(sc->vmd_dev),
139 SYS_RES_IOPORT, sc->vmd_io_rid, sc->vmd_io_resource);
141 #ifndef TASK_QUEUE_INTR
142 if (mtx_initialized(&sc->vmd_irq_lock)) {
143 mtx_destroy(&sc->vmd_irq_lock);
148 /* Hidden PCI Roots are hidden in BAR(0). */
151 vmd_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width)
154 struct vmd_softc *sc;
157 offset = (b << 20) + (s << 15) + (f << 12) + reg;
158 sc = device_get_softc(dev);
161 return (bus_space_read_4(sc->vmd_btag, sc->vmd_bhandle,
164 return (bus_space_read_2(sc->vmd_btag, sc->vmd_bhandle,
167 return (bus_space_read_1(sc->vmd_btag, sc->vmd_bhandle,
170 KASSERT(1, ("Invalid width requested"));
176 vmd_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg,
177 uint32_t val, int width)
180 struct vmd_softc *sc;
183 offset = (b << 20) + (s << 15) + (f << 12) + reg;
184 sc = device_get_softc(dev);
188 return (bus_space_write_4(sc->vmd_btag, sc->vmd_bhandle,
191 return (bus_space_write_2(sc->vmd_btag, sc->vmd_bhandle,
194 return (bus_space_write_1(sc->vmd_btag, sc->vmd_bhandle,
197 panic("Failed to specific width");
202 vmd_pci_read_config(device_t dev, device_t child, int reg, int width)
204 struct pci_devinfo *dinfo = device_get_ivars(child);
205 pcicfgregs *cfg = &dinfo->cfg;
207 return vmd_read_config(dev, cfg->bus, cfg->slot, cfg->func, reg, width);
211 vmd_pci_write_config(device_t dev, device_t child, int reg, uint32_t val,
214 struct pci_devinfo *dinfo = device_get_ivars(child);
215 pcicfgregs *cfg = &dinfo->cfg;
217 vmd_write_config(dev, cfg->bus, cfg->slot, cfg->func, reg, val, width);
220 static struct pci_devinfo *
221 vmd_alloc_devinfo(device_t dev)
223 struct pci_devinfo *dinfo;
225 dinfo = malloc(sizeof(*dinfo), M_DEVBUF, M_WAITOK | M_ZERO);
233 struct vmd_softc *sc;
234 #ifndef TASK_QUEUE_INTR
235 struct vmd_irq_handler *elm, *tmp_elm;
238 irq = (struct vmd_irq *)arg;
240 #ifdef TASK_QUEUE_INTR
241 taskqueue_enqueue(sc->vmd_irq_tq, &sc->vmd_irq_task);
243 mtx_lock(&sc->vmd_irq_lock);
244 TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list, vmd_link, tmp_elm) {
245 (elm->vmd_intr)(elm->vmd_arg);
247 mtx_unlock(&sc->vmd_irq_lock);
251 #ifdef TASK_QUEUE_INTR
253 vmd_handle_irq(void *context, int pending)
255 struct vmd_irq_handler *elm, *tmp_elm;
256 struct vmd_softc *sc;
260 TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list, vmd_link, tmp_elm) {
261 (elm->vmd_intr)(elm->vmd_arg);
267 vmd_attach(device_t dev)
269 struct vmd_softc *sc;
270 struct pcib_secbus *bus;
280 sc = device_get_softc(dev);
281 bzero(sc, sizeof(*sc));
285 pci_enable_busmaster(dev);
287 #ifdef TASK_QUEUE_INTR
288 sc->vmd_irq_tq = taskqueue_create_fast("vmd_taskq", M_NOWAIT,
289 taskqueue_thread_enqueue, &sc->vmd_irq_tq);
290 taskqueue_start_threads(&sc->vmd_irq_tq, 1, PI_DISK, "%s taskq",
291 device_get_nameunit(sc->vmd_dev));
292 TASK_INIT(&sc->vmd_irq_task, 0, vmd_handle_irq, sc);
294 mtx_init(&sc->vmd_irq_lock, "VMD IRQ lock", NULL, MTX_DEF);
296 for (i = 0, j = 0; i < VMD_MAX_BAR; i++, j++ ) {
297 sc->vmd_regs_rid[i] = PCIR_BAR(j);
298 bar = pci_read_config(dev, PCIR_BAR(0), 4);
299 if (PCI_BAR_MEM(bar) && (bar & PCIM_BAR_MEM_TYPE) ==
302 if ((sc->vmd_regs_resource[i] = bus_alloc_resource_any(
303 sc->vmd_dev, SYS_RES_MEMORY, &sc->vmd_regs_rid[i],
304 RF_ACTIVE)) == NULL) {
305 device_printf(dev, "Cannot allocate resources\n");
310 sc->vmd_io_rid = PCIR_IOBASEL_1;
311 sc->vmd_io_resource = bus_alloc_resource_any(
312 device_get_parent(sc->vmd_dev), SYS_RES_IOPORT, &sc->vmd_io_rid,
314 if (sc->vmd_io_resource == NULL) {
315 device_printf(dev, "Cannot allocate IO\n");
320 sc->vmd_btag = rman_get_bustag(sc->vmd_regs_resource[0]);
321 sc->vmd_bhandle = rman_get_bushandle(sc->vmd_regs_resource[0]);
323 pci_write_config(dev, PCIR_PRIBUS_2,
324 pcib_get_bus(device_get_parent(dev)), 1);
326 sec_reg = PCIR_SECBUS_1;
328 bus->sub_reg = PCIR_SUBBUS_1;
329 bus->sec = vmd_read_config(dev, b, s, f, sec_reg, 1);
330 bus->sub = vmd_read_config(dev, b, s, f, bus->sub_reg, 1);
332 bus->rman.rm_start = 0;
333 bus->rman.rm_end = PCI_BUSMAX;
334 bus->rman.rm_type = RMAN_ARRAY;
335 snprintf(buf, sizeof(buf), "%s bus numbers", device_get_nameunit(dev));
336 bus->rman.rm_descr = strdup(buf, M_DEVBUF);
337 error = rman_init(&bus->rman);
340 device_printf(dev, "Failed to initialize %s bus number rman\n",
341 device_get_nameunit(dev));
346 * Allocate a bus range. This will return an existing bus range
347 * if one exists, or a new bus range if one does not.
350 bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid,
352 if (bus->res == NULL) {
354 * Fall back to just allocating a range of a single bus
357 bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid,
359 } else if (rman_get_size(bus->res) < min_count) {
361 * Attempt to grow the existing range to satisfy the
362 * minimum desired count.
364 (void)bus_adjust_resource(dev, PCI_RES_BUS, bus->res,
365 rman_get_start(bus->res), rman_get_start(bus->res) +
371 * Add the initial resource to the rman.
373 if (bus->res != NULL) {
374 error = rman_manage_region(&bus->rman, rman_get_start(bus->res),
375 rman_get_end(bus->res));
377 device_printf(dev, "Failed to add resource to rman\n");
380 bus->sec = rman_get_start(bus->res);
381 bus->sub = rman_get_end(bus->res);
384 sc->vmd_msix_count = pci_msix_count(dev);
385 if (pci_alloc_msix(dev, &sc->vmd_msix_count) == 0) {
386 sc->vmd_irq = malloc(sizeof(struct vmd_irq) *
388 M_DEVBUF, M_WAITOK | M_ZERO);
390 for (i = 0; i < sc->vmd_msix_count; i++) {
391 sc->vmd_irq[i].vmd_rid = i + 1;
392 sc->vmd_irq[i].vmd_sc = sc;
393 sc->vmd_irq[i].vmd_instance = i;
394 sc->vmd_irq[i].vmd_res = bus_alloc_resource_any(dev,
395 SYS_RES_IRQ, &sc->vmd_irq[i].vmd_rid,
397 if (sc->vmd_irq[i].vmd_res == NULL) {
398 device_printf(dev,"Failed to alloc irq\n");
402 TAILQ_INIT(&sc->vmd_irq[i].vmd_list);
403 if (bus_setup_intr(dev, sc->vmd_irq[i].vmd_res,
404 INTR_TYPE_MISC | INTR_MPSAFE, NULL, vmd_intr,
405 &sc->vmd_irq[i], &sc->vmd_irq[i].vmd_handle)) {
406 device_printf(sc->vmd_dev,
407 "Cannot set up interrupt\n");
408 sc->vmd_irq[i].vmd_res = NULL;
414 sc->vmd_child = device_add_child(dev, NULL, -1);
416 if (sc->vmd_child == NULL) {
417 device_printf(dev, "Failed to attach child\n");
421 error = device_probe_and_attach(sc->vmd_child);
423 device_printf(dev, "Failed to add probe child\n");
436 vmd_detach(device_t dev)
438 struct vmd_softc *sc;
441 sc = device_get_softc(dev);
442 if (sc->vmd_child != NULL) {
443 err = bus_generic_detach(sc->vmd_child);
446 err = device_delete_child(dev, sc->vmd_child);
450 if (sc->vmd_bus.rman.rm_end != 0)
451 rman_fini(&sc->vmd_bus.rman);
457 /* Pass request to alloc an MSI-X message up to the parent bridge. */
459 vmd_alloc_msix(device_t pcib, device_t dev, int *irq)
461 struct vmd_softc *sc = device_get_softc(pcib);
465 if (sc->vmd_flags & PCIB_DISABLE_MSIX)
467 bus = device_get_parent(pcib);
468 ret = PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq);
472 static struct resource *
473 vmd_alloc_resource(device_t dev, device_t child, int type, int *rid,
474 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
476 /* Start at max PCI vmd_domain and work down */
477 if (type == PCI_RES_BUS) {
478 return (pci_domain_alloc_bus(PCI_DOMAINMAX -
479 device_get_unit(dev), child, rid, start, end,
483 return (pcib_alloc_resource(dev, child, type, rid, start, end,
488 vmd_adjust_resource(device_t dev, device_t child, int type,
489 struct resource *r, rman_res_t start, rman_res_t end)
491 struct resource *res = r;
493 if (type == PCI_RES_BUS)
494 return (pci_domain_adjust_bus(PCI_DOMAINMAX -
495 device_get_unit(dev), child, res, start, end));
496 return (pcib_adjust_resource(dev, child, type, res, start, end));
500 vmd_release_resource(device_t dev, device_t child, int type, int rid,
503 if (type == PCI_RES_BUS)
504 return (pci_domain_release_bus(PCI_DOMAINMAX -
505 device_get_unit(dev), child, rid, r));
506 return (pcib_release_resource(dev, child, type, rid, r));
510 vmd_shutdown(device_t dev)
516 vmd_pcib_route_interrupt(device_t pcib, device_t dev, int pin)
518 return (pcib_route_interrupt(pcib, dev, pin));
523 vmd_pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount,
526 return (pcib_alloc_msi(pcib, dev, count, maxcount, irqs));
530 vmd_pcib_release_msi(device_t pcib, device_t dev, int count, int *irqs)
533 return (pcib_release_msi(pcib, dev, count, irqs));
537 vmd_pcib_release_msix(device_t pcib, device_t dev, int irq) {
538 return pcib_release_msix(pcib, dev, irq);
542 vmd_setup_intr(device_t dev, device_t child, struct resource *irq,
543 int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg,
546 struct vmd_irq_handler *elm;
547 struct vmd_softc *sc;
550 sc = device_get_softc(dev);
553 * There appears to be no steering of VMD interrupts from device
558 elm = malloc(sizeof(*elm), M_DEVBUF, M_NOWAIT|M_ZERO);
559 elm->vmd_child = child;
560 elm->vmd_intr = intr;
561 elm->vmd_rid = rman_get_rid(irq);
563 TAILQ_INSERT_TAIL(&sc->vmd_irq[i].vmd_list, elm, vmd_link);
565 return (bus_generic_setup_intr(dev, child, irq, flags, filter, intr,
570 vmd_teardown_intr(device_t dev, device_t child, struct resource *irq,
573 struct vmd_irq_handler *elm, *tmp;;
574 struct vmd_softc *sc;
576 sc = device_get_softc(dev);
577 TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list, vmd_link, tmp) {
578 if (elm->vmd_child == child &&
579 elm->vmd_rid == rman_get_rid(irq)) {
580 TAILQ_REMOVE(&sc->vmd_irq[0].vmd_list, elm, vmd_link);
585 return (bus_generic_teardown_intr(dev, child, irq, cookie));
588 static device_method_t vmd_pci_methods[] = {
589 /* Device interface */
590 DEVMETHOD(device_probe, vmd_probe),
591 DEVMETHOD(device_attach, vmd_attach),
592 DEVMETHOD(device_detach, vmd_detach),
593 DEVMETHOD(device_shutdown, vmd_shutdown),
596 DEVMETHOD(bus_read_ivar, pcib_read_ivar),
597 DEVMETHOD(bus_write_ivar, pcib_write_ivar),
598 DEVMETHOD(bus_alloc_resource, vmd_alloc_resource),
599 DEVMETHOD(bus_adjust_resource, vmd_adjust_resource),
600 DEVMETHOD(bus_release_resource, vmd_release_resource),
601 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
602 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
603 DEVMETHOD(bus_setup_intr, vmd_setup_intr),
604 DEVMETHOD(bus_teardown_intr, vmd_teardown_intr),
607 DEVMETHOD(pci_read_config, vmd_pci_read_config),
608 DEVMETHOD(pci_write_config, vmd_pci_write_config),
609 DEVMETHOD(pci_alloc_devinfo, vmd_alloc_devinfo),
612 DEVMETHOD(pcib_maxslots, pcib_maxslots),
613 DEVMETHOD(pcib_read_config, vmd_read_config),
614 DEVMETHOD(pcib_write_config, vmd_write_config),
615 DEVMETHOD(pcib_route_interrupt, vmd_pcib_route_interrupt),
616 DEVMETHOD(pcib_alloc_msi, vmd_pcib_alloc_msi),
617 DEVMETHOD(pcib_release_msi, vmd_pcib_release_msi),
618 DEVMETHOD(pcib_alloc_msix, vmd_alloc_msix),
619 DEVMETHOD(pcib_release_msix, vmd_pcib_release_msix),
620 DEVMETHOD(pcib_map_msi, pcib_map_msi),
625 static devclass_t vmd_devclass;
627 DEFINE_CLASS_0(vmd, vmd_pci_driver, vmd_pci_methods, sizeof(struct vmd_softc));
628 DRIVER_MODULE(vmd, pci, vmd_pci_driver, vmd_devclass, NULL, NULL);
629 MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, vmd,
630 vmd_devs, nitems(vmd_devs) - 1);
631 MODULE_DEPEND(vmd, vmd_bus, 1, 1, 1);