2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd.
4 * Copyright (c) 2020-2022 The FreeBSD Foundation
6 * Portions of this software were developed by Björn Zeeb
7 * under sponsorship from the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice unmodified, this list of conditions, and the following
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/sysctl.h>
41 #include <sys/mutex.h>
42 #include <sys/fcntl.h>
44 #include <sys/filio.h>
45 #include <sys/pciio.h>
46 #include <sys/pctrie.h>
47 #include <sys/rwlock.h>
52 #include <machine/stdarg.h>
54 #include <dev/pci/pcivar.h>
55 #include <dev/pci/pci_private.h>
56 #include <dev/pci/pci_iov.h>
57 #include <dev/backlight/backlight.h>
59 #include <linux/kobject.h>
60 #include <linux/device.h>
61 #include <linux/slab.h>
62 #include <linux/module.h>
63 #include <linux/cdev.h>
64 #include <linux/file.h>
65 #include <linux/sysfs.h>
68 #include <linux/vmalloc.h>
69 #include <linux/pci.h>
70 #include <linux/compat.h>
72 #include <linux/backlight.h>
74 #include "backlight_if.h"
77 /* Undef the linux function macro defined in linux/pci.h */
80 static device_probe_t linux_pci_probe;
81 static device_attach_t linux_pci_attach;
82 static device_detach_t linux_pci_detach;
83 static device_suspend_t linux_pci_suspend;
84 static device_resume_t linux_pci_resume;
85 static device_shutdown_t linux_pci_shutdown;
86 static pci_iov_init_t linux_pci_iov_init;
87 static pci_iov_uninit_t linux_pci_iov_uninit;
88 static pci_iov_add_vf_t linux_pci_iov_add_vf;
89 static int linux_backlight_get_status(device_t dev, struct backlight_props *props);
90 static int linux_backlight_update_status(device_t dev, struct backlight_props *props);
91 static int linux_backlight_get_info(device_t dev, struct backlight_info *info);
93 static device_method_t pci_methods[] = {
94 DEVMETHOD(device_probe, linux_pci_probe),
95 DEVMETHOD(device_attach, linux_pci_attach),
96 DEVMETHOD(device_detach, linux_pci_detach),
97 DEVMETHOD(device_suspend, linux_pci_suspend),
98 DEVMETHOD(device_resume, linux_pci_resume),
99 DEVMETHOD(device_shutdown, linux_pci_shutdown),
100 DEVMETHOD(pci_iov_init, linux_pci_iov_init),
101 DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit),
102 DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf),
104 /* backlight interface */
105 DEVMETHOD(backlight_update_status, linux_backlight_update_status),
106 DEVMETHOD(backlight_get_status, linux_backlight_get_status),
107 DEVMETHOD(backlight_get_info, linux_backlight_get_info),
111 struct linux_dma_priv {
114 uint64_t dma_coherent_mask;
115 bus_dma_tag_t dmat_coherent;
119 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock)
120 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock)
123 linux_is_drm(struct pci_driver *pdrv)
125 return (pdrv->name != NULL && strcmp(pdrv->name, "drmn") == 0);
129 linux_pdev_dma_uninit(struct pci_dev *pdev)
131 struct linux_dma_priv *priv;
133 priv = pdev->dev.dma_priv;
135 bus_dma_tag_destroy(priv->dmat);
136 if (priv->dmat_coherent)
137 bus_dma_tag_destroy(priv->dmat_coherent);
138 mtx_destroy(&priv->lock);
139 pdev->dev.dma_priv = NULL;
140 free(priv, M_DEVBUF);
145 linux_pdev_dma_init(struct pci_dev *pdev)
147 struct linux_dma_priv *priv;
150 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO);
152 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF);
153 pctrie_init(&priv->ptree);
155 pdev->dev.dma_priv = priv;
157 /* Create a default DMA tags. */
158 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64));
161 /* Coherent is lower 32bit only by default in Linux. */
162 error = linux_dma_tag_init_coherent(&pdev->dev, DMA_BIT_MASK(32));
169 linux_pdev_dma_uninit(pdev);
174 linux_dma_tag_init(struct device *dev, u64 dma_mask)
176 struct linux_dma_priv *priv;
179 priv = dev->dma_priv;
182 if (priv->dma_mask == dma_mask)
185 bus_dma_tag_destroy(priv->dmat);
188 priv->dma_mask = dma_mask;
190 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev),
191 1, 0, /* alignment, boundary */
192 dma_mask, /* lowaddr */
193 BUS_SPACE_MAXADDR, /* highaddr */
194 NULL, NULL, /* filtfunc, filtfuncarg */
195 BUS_SPACE_MAXSIZE, /* maxsize */
197 BUS_SPACE_MAXSIZE, /* maxsegsz */
199 NULL, NULL, /* lockfunc, lockfuncarg */
205 linux_dma_tag_init_coherent(struct device *dev, u64 dma_mask)
207 struct linux_dma_priv *priv;
210 priv = dev->dma_priv;
212 if (priv->dmat_coherent) {
213 if (priv->dma_coherent_mask == dma_mask)
216 bus_dma_tag_destroy(priv->dmat_coherent);
219 priv->dma_coherent_mask = dma_mask;
221 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev),
222 1, 0, /* alignment, boundary */
223 dma_mask, /* lowaddr */
224 BUS_SPACE_MAXADDR, /* highaddr */
225 NULL, NULL, /* filtfunc, filtfuncarg */
226 BUS_SPACE_MAXSIZE, /* maxsize */
228 BUS_SPACE_MAXSIZE, /* maxsegsz */
230 NULL, NULL, /* lockfunc, lockfuncarg */
231 &priv->dmat_coherent);
235 static struct pci_driver *
236 linux_pci_find(device_t dev, const struct pci_device_id **idp)
238 const struct pci_device_id *id;
239 struct pci_driver *pdrv;
245 vendor = pci_get_vendor(dev);
246 device = pci_get_device(dev);
247 subvendor = pci_get_subvendor(dev);
248 subdevice = pci_get_subdevice(dev);
250 spin_lock(&pci_lock);
251 list_for_each_entry(pdrv, &pci_drivers, node) {
252 for (id = pdrv->id_table; id->vendor != 0; id++) {
253 if (vendor == id->vendor &&
254 (PCI_ANY_ID == id->device || device == id->device) &&
255 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) &&
256 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) {
258 spin_unlock(&pci_lock);
263 spin_unlock(&pci_lock);
268 lkpi_pci_dev_release(struct device *dev)
271 lkpi_devres_release_free_list(dev);
272 spin_lock_destroy(&dev->devres_lock);
276 lkpifill_pci_dev(device_t dev, struct pci_dev *pdev)
279 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev));
280 pdev->vendor = pci_get_vendor(dev);
281 pdev->device = pci_get_device(dev);
282 pdev->subsystem_vendor = pci_get_subvendor(dev);
283 pdev->subsystem_device = pci_get_subdevice(dev);
284 pdev->class = pci_get_class(dev);
285 pdev->revision = pci_get_revid(dev);
286 pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO);
288 * This should be the upstream bridge; pci_upstream_bridge()
289 * handles that case on demand as otherwise we'll shadow the
290 * entire PCI hierarchy.
292 pdev->bus->self = pdev;
293 pdev->bus->number = pci_get_bus(dev);
294 pdev->bus->domain = pci_get_domain(dev);
295 pdev->dev.bsddev = dev;
296 pdev->dev.parent = &linux_root_device;
297 pdev->dev.release = lkpi_pci_dev_release;
298 INIT_LIST_HEAD(&pdev->dev.irqents);
299 kobject_init(&pdev->dev.kobj, &linux_dev_ktype);
300 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev));
301 kobject_add(&pdev->dev.kobj, &linux_root_device.kobj,
302 kobject_name(&pdev->dev.kobj));
303 spin_lock_init(&pdev->dev.devres_lock);
304 INIT_LIST_HEAD(&pdev->dev.devres_head);
308 lkpinew_pci_dev_release(struct device *dev)
310 struct pci_dev *pdev;
312 pdev = to_pci_dev(dev);
313 if (pdev->root != NULL)
314 pci_dev_put(pdev->root);
315 if (pdev->bus->self != pdev)
316 pci_dev_put(pdev->bus->self);
317 free(pdev->bus, M_DEVBUF);
318 free(pdev, M_DEVBUF);
322 lkpinew_pci_dev(device_t dev)
324 struct pci_dev *pdev;
326 pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK|M_ZERO);
327 lkpifill_pci_dev(dev, pdev);
328 pdev->dev.release = lkpinew_pci_dev_release;
334 lkpi_pci_get_class(unsigned int class, struct pci_dev *from)
337 device_t devfrom = NULL;
338 struct pci_dev *pdev;
341 devfrom = from->dev.bsddev;
343 dev = pci_find_class_from(class >> 16, (class >> 8) & 0xFF, devfrom);
347 pdev = lkpinew_pci_dev(dev);
352 lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus,
356 struct pci_dev *pdev;
358 dev = pci_find_dbsf(domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
362 pdev = lkpinew_pci_dev(dev);
367 linux_pci_probe(device_t dev)
369 const struct pci_device_id *id;
370 struct pci_driver *pdrv;
372 if ((pdrv = linux_pci_find(dev, &id)) == NULL)
374 if (device_get_driver(dev) != &pdrv->bsddriver)
376 device_set_desc(dev, pdrv->name);
378 /* Assume BSS initialized (should never return BUS_PROBE_SPECIFIC). */
379 if (pdrv->bsd_probe_return == 0)
380 return (BUS_PROBE_DEFAULT);
382 return (pdrv->bsd_probe_return);
386 linux_pci_attach(device_t dev)
388 const struct pci_device_id *id;
389 struct pci_driver *pdrv;
390 struct pci_dev *pdev;
392 pdrv = linux_pci_find(dev, &id);
393 pdev = device_get_softc(dev);
398 return (linux_pci_attach_device(dev, pdrv, id, pdev));
402 linux_pci_attach_device(device_t dev, struct pci_driver *pdrv,
403 const struct pci_device_id *id, struct pci_dev *pdev)
405 struct resource_list_entry *rle;
411 linux_set_current(curthread);
413 parent = device_get_parent(dev);
414 isdrm = pdrv != NULL && linux_is_drm(pdrv);
417 struct pci_devinfo *dinfo;
419 dinfo = device_get_ivars(parent);
420 device_set_ivars(dev, dinfo);
423 lkpifill_pci_dev(dev, pdev);
425 PCI_GET_ID(device_get_parent(parent), parent, PCI_ID_RID, &rid);
427 PCI_GET_ID(parent, dev, PCI_ID_RID, &rid);
430 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0, false);
432 pdev->dev.irq = rle->start;
434 pdev->dev.irq = LINUX_IRQ_INVALID;
435 pdev->irq = pdev->dev.irq;
436 error = linux_pdev_dma_init(pdev);
440 TAILQ_INIT(&pdev->mmio);
442 spin_lock(&pci_lock);
443 list_add(&pdev->links, &pci_devices);
444 spin_unlock(&pci_lock);
447 error = pdrv->probe(pdev, id);
454 free(pdev->bus, M_DEVBUF);
455 linux_pdev_dma_uninit(pdev);
457 spin_lock(&pci_lock);
458 list_del(&pdev->links);
459 spin_unlock(&pci_lock);
460 put_device(&pdev->dev);
465 linux_pci_detach(device_t dev)
467 struct pci_dev *pdev;
469 pdev = device_get_softc(dev);
473 device_set_desc(dev, NULL);
475 return (linux_pci_detach_device(pdev));
479 linux_pci_detach_device(struct pci_dev *pdev)
482 linux_set_current(curthread);
484 if (pdev->pdrv != NULL)
485 pdev->pdrv->remove(pdev);
487 if (pdev->root != NULL)
488 pci_dev_put(pdev->root);
489 free(pdev->bus, M_DEVBUF);
490 linux_pdev_dma_uninit(pdev);
492 spin_lock(&pci_lock);
493 list_del(&pdev->links);
494 spin_unlock(&pci_lock);
495 put_device(&pdev->dev);
501 lkpi_pci_disable_dev(struct device *dev)
504 (void) pci_disable_io(dev->bsddev, SYS_RES_MEMORY);
505 (void) pci_disable_io(dev->bsddev, SYS_RES_IOPORT);
510 lkpi_pci_devres_get_alloc(struct pci_dev *pdev)
512 struct pci_devres *dr;
514 dr = lkpi_devres_find(&pdev->dev, lkpi_pci_devres_release, NULL, NULL);
516 dr = lkpi_devres_alloc(lkpi_pci_devres_release, sizeof(*dr),
517 GFP_KERNEL | __GFP_ZERO);
519 lkpi_devres_add(&pdev->dev, dr);
526 lkpi_pci_devres_release(struct device *dev, void *p)
528 struct pci_devres *dr;
529 struct pci_dev *pdev;
532 pdev = to_pci_dev(dev);
535 if (pdev->msix_enabled)
536 lkpi_pci_disable_msix(pdev);
537 if (pdev->msi_enabled)
538 lkpi_pci_disable_msi(pdev);
540 if (dr->enable_io && lkpi_pci_disable_dev(dev) == 0)
541 dr->enable_io = false;
543 if (dr->region_mask == 0)
545 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) {
547 if ((dr->region_mask & (1 << bar)) == 0)
549 pci_release_region(pdev, bar);
554 lkpi_pcim_iomap_table_release(struct device *dev, void *p)
556 struct pcim_iomap_devres *dr;
557 struct pci_dev *pdev;
561 pdev = to_pci_dev(dev);
562 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) {
564 if (dr->mmio_table[bar] == NULL)
567 pci_iounmap(pdev, dr->mmio_table[bar]);
572 linux_pci_suspend(device_t dev)
574 const struct dev_pm_ops *pmops;
575 struct pm_message pm = { };
576 struct pci_dev *pdev;
580 linux_set_current(curthread);
581 pdev = device_get_softc(dev);
582 pmops = pdev->pdrv->driver.pm;
584 if (pdev->pdrv->suspend != NULL)
585 error = -pdev->pdrv->suspend(pdev, pm);
586 else if (pmops != NULL && pmops->suspend != NULL) {
587 error = -pmops->suspend(&pdev->dev);
588 if (error == 0 && pmops->suspend_late != NULL)
589 error = -pmops->suspend_late(&pdev->dev);
595 linux_pci_resume(device_t dev)
597 const struct dev_pm_ops *pmops;
598 struct pci_dev *pdev;
602 linux_set_current(curthread);
603 pdev = device_get_softc(dev);
604 pmops = pdev->pdrv->driver.pm;
606 if (pdev->pdrv->resume != NULL)
607 error = -pdev->pdrv->resume(pdev);
608 else if (pmops != NULL && pmops->resume != NULL) {
609 if (pmops->resume_early != NULL)
610 error = -pmops->resume_early(&pdev->dev);
611 if (error == 0 && pmops->resume != NULL)
612 error = -pmops->resume(&pdev->dev);
618 linux_pci_shutdown(device_t dev)
620 struct pci_dev *pdev;
622 linux_set_current(curthread);
623 pdev = device_get_softc(dev);
624 if (pdev->pdrv->shutdown != NULL)
625 pdev->pdrv->shutdown(pdev);
630 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config)
632 struct pci_dev *pdev;
635 linux_set_current(curthread);
636 pdev = device_get_softc(dev);
637 if (pdev->pdrv->bsd_iov_init != NULL)
638 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config);
645 linux_pci_iov_uninit(device_t dev)
647 struct pci_dev *pdev;
649 linux_set_current(curthread);
650 pdev = device_get_softc(dev);
651 if (pdev->pdrv->bsd_iov_uninit != NULL)
652 pdev->pdrv->bsd_iov_uninit(dev);
656 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config)
658 struct pci_dev *pdev;
661 linux_set_current(curthread);
662 pdev = device_get_softc(dev);
663 if (pdev->pdrv->bsd_iov_add_vf != NULL)
664 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config);
671 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc)
675 linux_set_current(curthread);
676 spin_lock(&pci_lock);
677 list_add(&pdrv->node, &pci_drivers);
678 spin_unlock(&pci_lock);
679 if (pdrv->bsddriver.name == NULL)
680 pdrv->bsddriver.name = pdrv->name;
681 pdrv->bsddriver.methods = pci_methods;
682 pdrv->bsddriver.size = sizeof(struct pci_dev);
685 error = devclass_add_driver(dc, &pdrv->bsddriver,
686 BUS_PASS_DEFAULT, &pdrv->bsdclass);
692 linux_pci_register_driver(struct pci_driver *pdrv)
696 dc = devclass_find("pci");
699 return (_linux_pci_register_driver(pdrv, dc));
702 struct resource_list_entry *
703 linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl,
707 struct resource *res;
709 KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY,
710 ("trying to reserve non-BAR type %d", type));
712 dev = pdev->pdrv != NULL && linux_is_drm(pdev->pdrv) ?
713 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev;
714 res = pci_reserve_map(device_get_parent(dev), dev, type, &rid, 0, ~0,
718 return (resource_list_find(rl, type, rid));
722 pci_resource_start(struct pci_dev *pdev, int bar)
724 struct resource_list_entry *rle;
728 if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL)
730 dev = pdev->pdrv != NULL && linux_is_drm(pdev->pdrv) ?
731 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev;
732 if (BUS_TRANSLATE_RESOURCE(dev, rle->type, rle->start, &newstart)) {
733 device_printf(pdev->dev.bsddev, "translate of %#jx failed\n",
734 (uintmax_t)rle->start);
741 pci_resource_len(struct pci_dev *pdev, int bar)
743 struct resource_list_entry *rle;
745 if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL)
751 pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
753 struct resource *res;
754 struct pci_devres *dr;
755 struct pci_mmio_region *mmio;
759 type = pci_resource_type(pdev, bar);
763 res = bus_alloc_resource_any(pdev->dev.bsddev, type, &rid,
764 RF_ACTIVE|RF_SHAREABLE);
766 device_printf(pdev->dev.bsddev, "%s: failed to alloc "
767 "bar %d type %d rid %d\n",
768 __func__, bar, type, PCIR_BAR(bar));
773 * It seems there is an implicit devres tracking on these if the device
774 * is managed; otherwise the resources are not automatiaclly freed on
775 * FreeBSD/LinuxKPI tough they should be/are expected to be by Linux
778 dr = lkpi_pci_devres_find(pdev);
780 dr->region_mask |= (1 << bar);
781 dr->region_table[bar] = res;
784 /* Even if the device is not managed we need to track it for iomap. */
785 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO);
786 mmio->rid = PCIR_BAR(bar);
789 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next);
795 _lkpi_pci_iomap(struct pci_dev *pdev, int bar, int mmio_size __unused)
797 struct pci_mmio_region *mmio, *p;
800 type = pci_resource_type(pdev, bar);
802 device_printf(pdev->dev.bsddev, "%s: bar %d type %d\n",
803 __func__, bar, type);
808 * Check for duplicate mappings.
809 * This can happen if a driver calls pci_request_region() first.
811 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
812 if (mmio->type == type && mmio->rid == PCIR_BAR(bar)) {
817 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO);
818 mmio->rid = PCIR_BAR(bar);
820 mmio->res = bus_alloc_resource_any(pdev->dev.bsddev, mmio->type,
821 &mmio->rid, RF_ACTIVE|RF_SHAREABLE);
822 if (mmio->res == NULL) {
823 device_printf(pdev->dev.bsddev, "%s: failed to alloc "
824 "bar %d type %d rid %d\n",
825 __func__, bar, type, PCIR_BAR(bar));
826 free(mmio, M_DEVBUF);
829 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next);
835 linux_pci_register_drm_driver(struct pci_driver *pdrv)
839 dc = devclass_create("vgapci");
843 return (_linux_pci_register_driver(pdrv, dc));
847 linux_pci_unregister_driver(struct pci_driver *pdrv)
851 bus = devclass_find("pci");
853 spin_lock(&pci_lock);
854 list_del(&pdrv->node);
855 spin_unlock(&pci_lock);
858 devclass_delete_driver(bus, &pdrv->bsddriver);
863 linux_pci_unregister_drm_driver(struct pci_driver *pdrv)
867 bus = devclass_find("vgapci");
869 spin_lock(&pci_lock);
870 list_del(&pdrv->node);
871 spin_unlock(&pci_lock);
874 devclass_delete_driver(bus, &pdrv->bsddriver);
879 pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv,
884 if (flags & PCI_IRQ_MSIX) {
885 struct msix_entry *entries;
888 entries = kcalloc(maxv, sizeof(*entries), GFP_KERNEL);
889 if (entries == NULL) {
893 for (i = 0; i < maxv; ++i)
894 entries[i].entry = i;
895 error = pci_enable_msix(pdev, entries, maxv);
898 if (error == 0 && pdev->msix_enabled)
899 return (pdev->dev.irq_end - pdev->dev.irq_start);
901 if (flags & PCI_IRQ_MSI) {
902 error = pci_enable_msi(pdev);
903 if (error == 0 && pdev->msi_enabled)
904 return (pdev->dev.irq_end - pdev->dev.irq_start);
906 if (flags & PCI_IRQ_LEGACY) {
914 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t));
916 struct linux_dma_obj {
923 static uma_zone_t linux_dma_trie_zone;
924 static uma_zone_t linux_dma_obj_zone;
927 linux_dma_init(void *arg)
930 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie",
931 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL,
933 linux_dma_obj_zone = uma_zcreate("linux_dma_object",
934 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL,
938 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL);
941 linux_dma_uninit(void *arg)
944 uma_zdestroy(linux_dma_obj_zone);
945 uma_zdestroy(linux_dma_trie_zone);
947 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL);
950 linux_dma_trie_alloc(struct pctrie *ptree)
953 return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT));
957 linux_dma_trie_free(struct pctrie *ptree, void *node)
960 uma_zfree(linux_dma_trie_zone, node);
963 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc,
964 linux_dma_trie_free);
966 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
968 linux_dma_map_phys_common(struct device *dev, vm_paddr_t phys, size_t len,
971 struct linux_dma_priv *priv;
972 struct linux_dma_obj *obj;
974 bus_dma_segment_t seg;
976 priv = dev->dma_priv;
979 * If the resultant mapping will be entirely 1:1 with the
980 * physical address, short-circuit the remainder of the
981 * bus_dma API. This avoids tracking collisions in the pctrie
982 * with the additional benefit of reducing overhead.
984 if (bus_dma_id_mapped(dmat, phys, len))
987 obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT);
994 if (bus_dmamap_create(obj->dmat, 0, &obj->dmamap) != 0) {
995 DMA_PRIV_UNLOCK(priv);
996 uma_zfree(linux_dma_obj_zone, obj);
1001 if (_bus_dmamap_load_phys(obj->dmat, obj->dmamap, phys, len,
1002 BUS_DMA_NOWAIT, &seg, &nseg) != 0) {
1003 bus_dmamap_destroy(obj->dmat, obj->dmamap);
1004 DMA_PRIV_UNLOCK(priv);
1005 uma_zfree(linux_dma_obj_zone, obj);
1009 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg));
1010 obj->dma_addr = seg.ds_addr;
1012 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj);
1014 bus_dmamap_unload(obj->dmat, obj->dmamap);
1015 bus_dmamap_destroy(obj->dmat, obj->dmamap);
1016 DMA_PRIV_UNLOCK(priv);
1017 uma_zfree(linux_dma_obj_zone, obj);
1020 DMA_PRIV_UNLOCK(priv);
1021 return (obj->dma_addr);
1025 linux_dma_map_phys_common(struct device *dev __unused, vm_paddr_t phys,
1026 size_t len __unused, bus_dma_tag_t dmat __unused)
1033 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len)
1035 struct linux_dma_priv *priv;
1037 priv = dev->dma_priv;
1038 return (linux_dma_map_phys_common(dev, phys, len, priv->dmat));
1041 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
1043 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len)
1045 struct linux_dma_priv *priv;
1046 struct linux_dma_obj *obj;
1048 priv = dev->dma_priv;
1050 if (pctrie_is_empty(&priv->ptree))
1053 DMA_PRIV_LOCK(priv);
1054 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr);
1056 DMA_PRIV_UNLOCK(priv);
1059 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr);
1060 bus_dmamap_unload(obj->dmat, obj->dmamap);
1061 bus_dmamap_destroy(obj->dmat, obj->dmamap);
1062 DMA_PRIV_UNLOCK(priv);
1064 uma_zfree(linux_dma_obj_zone, obj);
1068 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len)
1074 linux_dma_alloc_coherent(struct device *dev, size_t size,
1075 dma_addr_t *dma_handle, gfp_t flag)
1077 struct linux_dma_priv *priv;
1082 if (dev == NULL || dev->dma_priv == NULL) {
1086 priv = dev->dma_priv;
1087 if (priv->dma_coherent_mask)
1088 high = priv->dma_coherent_mask;
1090 /* Coherent is lower 32bit only by default in Linux. */
1091 high = BUS_SPACE_MAXADDR_32BIT;
1092 align = PAGE_SIZE << get_order(size);
1093 /* Always zero the allocation. */
1095 mem = (void *)kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high,
1096 align, 0, VM_MEMATTR_DEFAULT);
1098 *dma_handle = linux_dma_map_phys_common(dev, vtophys(mem), size,
1099 priv->dmat_coherent);
1100 if (*dma_handle == 0) {
1101 kmem_free((vm_offset_t)mem, size);
1111 linuxkpi_dma_sync(struct device *dev, dma_addr_t dma_addr, size_t size,
1112 bus_dmasync_op_t op)
1114 struct linux_dma_priv *priv;
1115 struct linux_dma_obj *obj;
1117 priv = dev->dma_priv;
1119 if (pctrie_is_empty(&priv->ptree))
1122 DMA_PRIV_LOCK(priv);
1123 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr);
1125 DMA_PRIV_UNLOCK(priv);
1129 bus_dmamap_sync(obj->dmat, obj->dmamap, op);
1130 DMA_PRIV_UNLOCK(priv);
1134 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents,
1135 enum dma_data_direction direction, unsigned long attrs __unused)
1137 struct linux_dma_priv *priv;
1138 struct scatterlist *sg;
1140 bus_dma_segment_t seg;
1142 priv = dev->dma_priv;
1144 DMA_PRIV_LOCK(priv);
1146 /* create common DMA map in the first S/G entry */
1147 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) {
1148 DMA_PRIV_UNLOCK(priv);
1152 /* load all S/G list entries */
1153 for_each_sg(sgl, sg, nents, i) {
1155 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map,
1156 sg_phys(sg), sg->length, BUS_DMA_NOWAIT,
1157 &seg, &nseg) != 0) {
1158 bus_dmamap_unload(priv->dmat, sgl->dma_map);
1159 bus_dmamap_destroy(priv->dmat, sgl->dma_map);
1160 DMA_PRIV_UNLOCK(priv);
1164 ("More than one segment (nseg=%d)", nseg + 1));
1166 sg_dma_address(sg) = seg.ds_addr;
1169 switch (direction) {
1170 case DMA_BIDIRECTIONAL:
1171 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE);
1174 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD);
1176 case DMA_FROM_DEVICE:
1177 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE);
1183 DMA_PRIV_UNLOCK(priv);
1189 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
1190 int nents __unused, enum dma_data_direction direction,
1191 unsigned long attrs __unused)
1193 struct linux_dma_priv *priv;
1195 priv = dev->dma_priv;
1197 DMA_PRIV_LOCK(priv);
1199 switch (direction) {
1200 case DMA_BIDIRECTIONAL:
1201 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD);
1202 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD);
1205 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTWRITE);
1207 case DMA_FROM_DEVICE:
1208 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD);
1214 bus_dmamap_unload(priv->dmat, sgl->dma_map);
1215 bus_dmamap_destroy(priv->dmat, sgl->dma_map);
1216 DMA_PRIV_UNLOCK(priv);
1220 struct device *pool_device;
1221 uma_zone_t pool_zone;
1222 struct mtx pool_lock;
1223 bus_dma_tag_t pool_dmat;
1224 size_t pool_entry_size;
1225 struct pctrie pool_ptree;
1228 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock)
1229 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock)
1232 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags)
1234 struct linux_dma_obj *obj = mem;
1235 struct dma_pool *pool = arg;
1237 bus_dma_segment_t seg;
1240 DMA_POOL_LOCK(pool);
1241 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap,
1242 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT,
1244 DMA_POOL_UNLOCK(pool);
1248 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg));
1249 obj->dma_addr = seg.ds_addr;
1255 dma_pool_obj_dtor(void *mem, int size, void *arg)
1257 struct linux_dma_obj *obj = mem;
1258 struct dma_pool *pool = arg;
1260 DMA_POOL_LOCK(pool);
1261 bus_dmamap_unload(pool->pool_dmat, obj->dmamap);
1262 DMA_POOL_UNLOCK(pool);
1266 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused,
1269 struct dma_pool *pool = arg;
1270 struct linux_dma_obj *obj;
1273 for (i = 0; i < count; i++) {
1274 obj = uma_zalloc(linux_dma_obj_zone, flags);
1278 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr,
1279 BUS_DMA_NOWAIT, &obj->dmamap);
1281 uma_zfree(linux_dma_obj_zone, obj);
1292 dma_pool_obj_release(void *arg, void **store, int count)
1294 struct dma_pool *pool = arg;
1295 struct linux_dma_obj *obj;
1298 for (i = 0; i < count; i++) {
1300 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap);
1301 uma_zfree(linux_dma_obj_zone, obj);
1306 linux_dma_pool_create(char *name, struct device *dev, size_t size,
1307 size_t align, size_t boundary)
1309 struct linux_dma_priv *priv;
1310 struct dma_pool *pool;
1312 priv = dev->dma_priv;
1314 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
1315 pool->pool_device = dev;
1316 pool->pool_entry_size = size;
1318 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev),
1319 align, boundary, /* alignment, boundary */
1320 priv->dma_mask, /* lowaddr */
1321 BUS_SPACE_MAXADDR, /* highaddr */
1322 NULL, NULL, /* filtfunc, filtfuncarg */
1325 size, /* maxsegsz */
1327 NULL, NULL, /* lockfunc, lockfuncarg */
1328 &pool->pool_dmat)) {
1333 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor,
1334 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import,
1335 dma_pool_obj_release, pool, 0);
1337 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF);
1338 pctrie_init(&pool->pool_ptree);
1344 linux_dma_pool_destroy(struct dma_pool *pool)
1347 uma_zdestroy(pool->pool_zone);
1348 bus_dma_tag_destroy(pool->pool_dmat);
1349 mtx_destroy(&pool->pool_lock);
1354 lkpi_dmam_pool_destroy(struct device *dev, void *p)
1356 struct dma_pool *pool;
1358 pool = *(struct dma_pool **)p;
1359 LINUX_DMA_PCTRIE_RECLAIM(&pool->pool_ptree);
1360 linux_dma_pool_destroy(pool);
1364 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
1367 struct linux_dma_obj *obj;
1369 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags & GFP_NATIVE_MASK);
1373 DMA_POOL_LOCK(pool);
1374 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) {
1375 DMA_POOL_UNLOCK(pool);
1376 uma_zfree_arg(pool->pool_zone, obj, pool);
1379 DMA_POOL_UNLOCK(pool);
1381 *handle = obj->dma_addr;
1382 return (obj->vaddr);
1386 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr)
1388 struct linux_dma_obj *obj;
1390 DMA_POOL_LOCK(pool);
1391 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr);
1393 DMA_POOL_UNLOCK(pool);
1396 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr);
1397 DMA_POOL_UNLOCK(pool);
1399 uma_zfree_arg(pool->pool_zone, obj, pool);
1403 linux_backlight_get_status(device_t dev, struct backlight_props *props)
1405 struct pci_dev *pdev;
1407 linux_set_current(curthread);
1408 pdev = device_get_softc(dev);
1410 props->brightness = pdev->dev.bd->props.brightness;
1411 props->brightness = props->brightness * 100 / pdev->dev.bd->props.max_brightness;
1418 linux_backlight_get_info(device_t dev, struct backlight_info *info)
1420 struct pci_dev *pdev;
1422 linux_set_current(curthread);
1423 pdev = device_get_softc(dev);
1425 info->type = BACKLIGHT_TYPE_PANEL;
1426 strlcpy(info->name, pdev->dev.bd->name, BACKLIGHTMAXNAMELENGTH);
1431 linux_backlight_update_status(device_t dev, struct backlight_props *props)
1433 struct pci_dev *pdev;
1435 linux_set_current(curthread);
1436 pdev = device_get_softc(dev);
1438 pdev->dev.bd->props.brightness = pdev->dev.bd->props.max_brightness *
1439 props->brightness / 100;
1440 pdev->dev.bd->props.power = props->brightness == 0 ?
1441 4/* FB_BLANK_POWERDOWN */ : 0/* FB_BLANK_UNBLANK */;
1442 return (pdev->dev.bd->ops->update_status(pdev->dev.bd));
1445 struct backlight_device *
1446 linux_backlight_device_register(const char *name, struct device *dev,
1447 void *data, const struct backlight_ops *ops, struct backlight_properties *props)
1450 dev->bd = malloc(sizeof(*dev->bd), M_DEVBUF, M_WAITOK | M_ZERO);
1452 dev->bd->props.type = props->type;
1453 dev->bd->props.max_brightness = props->max_brightness;
1454 dev->bd->props.brightness = props->brightness;
1455 dev->bd->props.power = props->power;
1456 dev->bd->data = data;
1458 dev->bd->name = strdup(name, M_DEVBUF);
1460 dev->backlight_dev = backlight_register(name, dev->bsddev);
1466 linux_backlight_device_unregister(struct backlight_device *bd)
1469 backlight_destroy(bd->dev->backlight_dev);
1470 free(bd->name, M_DEVBUF);