2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/kernel.h>
35 #include <sys/sysctl.h>
37 #include <sys/mutex.h>
38 #include <sys/fcntl.h>
40 #include <sys/filio.h>
41 #include <sys/pciio.h>
42 #include <sys/pctrie.h>
43 #include <sys/rwlock.h>
48 #include <machine/stdarg.h>
50 #include <dev/pci/pcivar.h>
51 #include <dev/pci/pci_private.h>
52 #include <dev/pci/pci_iov.h>
54 #include <linux/kobject.h>
55 #include <linux/device.h>
56 #include <linux/slab.h>
57 #include <linux/module.h>
58 #include <linux/cdev.h>
59 #include <linux/file.h>
60 #include <linux/sysfs.h>
63 #include <linux/vmalloc.h>
64 #include <linux/pci.h>
65 #include <linux/compat.h>
67 static device_probe_t linux_pci_probe;
68 static device_attach_t linux_pci_attach;
69 static device_detach_t linux_pci_detach;
70 static device_suspend_t linux_pci_suspend;
71 static device_resume_t linux_pci_resume;
72 static device_shutdown_t linux_pci_shutdown;
73 static pci_iov_init_t linux_pci_iov_init;
74 static pci_iov_uninit_t linux_pci_iov_uninit;
75 static pci_iov_add_vf_t linux_pci_iov_add_vf;
77 static device_method_t pci_methods[] = {
78 DEVMETHOD(device_probe, linux_pci_probe),
79 DEVMETHOD(device_attach, linux_pci_attach),
80 DEVMETHOD(device_detach, linux_pci_detach),
81 DEVMETHOD(device_suspend, linux_pci_suspend),
82 DEVMETHOD(device_resume, linux_pci_resume),
83 DEVMETHOD(device_shutdown, linux_pci_shutdown),
84 DEVMETHOD(pci_iov_init, linux_pci_iov_init),
85 DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit),
86 DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf),
90 struct linux_dma_priv {
96 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock)
97 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock)
100 linux_pdev_dma_init(struct pci_dev *pdev)
102 struct linux_dma_priv *priv;
105 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO);
106 pdev->dev.dma_priv = priv;
108 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF);
110 pctrie_init(&priv->ptree);
112 /* create a default DMA tag */
113 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64));
115 mtx_destroy(&priv->lock);
116 free(priv, M_DEVBUF);
117 pdev->dev.dma_priv = NULL;
123 linux_pdev_dma_uninit(struct pci_dev *pdev)
125 struct linux_dma_priv *priv;
127 priv = pdev->dev.dma_priv;
129 bus_dma_tag_destroy(priv->dmat);
130 mtx_destroy(&priv->lock);
131 free(priv, M_DEVBUF);
132 pdev->dev.dma_priv = NULL;
137 linux_dma_tag_init(struct device *dev, u64 dma_mask)
139 struct linux_dma_priv *priv;
142 priv = dev->dma_priv;
145 if (priv->dma_mask == dma_mask)
148 bus_dma_tag_destroy(priv->dmat);
151 priv->dma_mask = dma_mask;
153 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev),
154 1, 0, /* alignment, boundary */
155 dma_mask, /* lowaddr */
156 BUS_SPACE_MAXADDR, /* highaddr */
157 NULL, NULL, /* filtfunc, filtfuncarg */
158 BUS_SPACE_MAXSIZE, /* maxsize */
160 BUS_SPACE_MAXSIZE, /* maxsegsz */
162 NULL, NULL, /* lockfunc, lockfuncarg */
167 static struct pci_driver *
168 linux_pci_find(device_t dev, const struct pci_device_id **idp)
170 const struct pci_device_id *id;
171 struct pci_driver *pdrv;
177 vendor = pci_get_vendor(dev);
178 device = pci_get_device(dev);
179 subvendor = pci_get_subvendor(dev);
180 subdevice = pci_get_subdevice(dev);
182 spin_lock(&pci_lock);
183 list_for_each_entry(pdrv, &pci_drivers, links) {
184 for (id = pdrv->id_table; id->vendor != 0; id++) {
185 if (vendor == id->vendor &&
186 (PCI_ANY_ID == id->device || device == id->device) &&
187 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) &&
188 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) {
190 spin_unlock(&pci_lock);
195 spin_unlock(&pci_lock);
200 linux_pci_probe(device_t dev)
202 const struct pci_device_id *id;
203 struct pci_driver *pdrv;
205 if ((pdrv = linux_pci_find(dev, &id)) == NULL)
207 if (device_get_driver(dev) != &pdrv->bsddriver)
209 device_set_desc(dev, pdrv->name);
214 linux_pci_attach(device_t dev)
216 const struct pci_device_id *id;
217 struct pci_driver *pdrv;
218 struct pci_dev *pdev;
220 pdrv = linux_pci_find(dev, &id);
221 pdev = device_get_softc(dev);
226 return (linux_pci_attach_device(dev, pdrv, id, pdev));
230 linux_pci_attach_device(device_t dev, struct pci_driver *pdrv,
231 const struct pci_device_id *id, struct pci_dev *pdev)
233 struct resource_list_entry *rle;
234 struct pci_bus *pbus;
235 struct pci_devinfo *dinfo;
239 linux_set_current(curthread);
241 if (pdrv != NULL && pdrv->isdrm) {
242 parent = device_get_parent(dev);
243 dinfo = device_get_ivars(parent);
244 device_set_ivars(dev, dinfo);
246 dinfo = device_get_ivars(dev);
249 pdev->dev.parent = &linux_root_device;
250 pdev->dev.bsddev = dev;
251 INIT_LIST_HEAD(&pdev->dev.irqents);
252 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev));
253 pdev->device = dinfo->cfg.device;
254 pdev->vendor = dinfo->cfg.vendor;
255 pdev->subsystem_vendor = dinfo->cfg.subvendor;
256 pdev->subsystem_device = dinfo->cfg.subdevice;
257 pdev->class = pci_get_class(dev);
258 pdev->revision = pci_get_revid(dev);
260 kobject_init(&pdev->dev.kobj, &linux_dev_ktype);
261 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev));
262 kobject_add(&pdev->dev.kobj, &linux_root_device.kobj,
263 kobject_name(&pdev->dev.kobj));
264 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0);
266 pdev->dev.irq = rle->start;
268 pdev->dev.irq = LINUX_IRQ_INVALID;
269 pdev->irq = pdev->dev.irq;
270 error = linux_pdev_dma_init(pdev);
274 TAILQ_INIT(&pdev->mmio);
275 pbus = malloc(sizeof(*pbus), M_DEVBUF, M_WAITOK | M_ZERO);
277 pbus->number = pci_get_bus(dev);
278 pbus->domain = pci_get_domain(dev);
281 spin_lock(&pci_lock);
282 list_add(&pdev->links, &pci_devices);
283 spin_unlock(&pci_lock);
286 error = pdrv->probe(pdev, id);
293 free(pdev->bus, M_DEVBUF);
294 linux_pdev_dma_uninit(pdev);
296 spin_lock(&pci_lock);
297 list_del(&pdev->links);
298 spin_unlock(&pci_lock);
299 put_device(&pdev->dev);
304 linux_pci_detach(device_t dev)
306 struct pci_dev *pdev;
308 pdev = device_get_softc(dev);
312 device_set_desc(dev, NULL);
314 return (linux_pci_detach_device(pdev));
318 linux_pci_detach_device(struct pci_dev *pdev)
321 linux_set_current(curthread);
323 if (pdev->pdrv != NULL)
324 pdev->pdrv->remove(pdev);
326 free(pdev->bus, M_DEVBUF);
327 linux_pdev_dma_uninit(pdev);
329 spin_lock(&pci_lock);
330 list_del(&pdev->links);
331 spin_unlock(&pci_lock);
332 put_device(&pdev->dev);
338 linux_pci_suspend(device_t dev)
340 const struct dev_pm_ops *pmops;
341 struct pm_message pm = { };
342 struct pci_dev *pdev;
346 linux_set_current(curthread);
347 pdev = device_get_softc(dev);
348 pmops = pdev->pdrv->driver.pm;
350 if (pdev->pdrv->suspend != NULL)
351 error = -pdev->pdrv->suspend(pdev, pm);
352 else if (pmops != NULL && pmops->suspend != NULL) {
353 error = -pmops->suspend(&pdev->dev);
354 if (error == 0 && pmops->suspend_late != NULL)
355 error = -pmops->suspend_late(&pdev->dev);
361 linux_pci_resume(device_t dev)
363 const struct dev_pm_ops *pmops;
364 struct pci_dev *pdev;
368 linux_set_current(curthread);
369 pdev = device_get_softc(dev);
370 pmops = pdev->pdrv->driver.pm;
372 if (pdev->pdrv->resume != NULL)
373 error = -pdev->pdrv->resume(pdev);
374 else if (pmops != NULL && pmops->resume != NULL) {
375 if (pmops->resume_early != NULL)
376 error = -pmops->resume_early(&pdev->dev);
377 if (error == 0 && pmops->resume != NULL)
378 error = -pmops->resume(&pdev->dev);
384 linux_pci_shutdown(device_t dev)
386 struct pci_dev *pdev;
388 linux_set_current(curthread);
389 pdev = device_get_softc(dev);
390 if (pdev->pdrv->shutdown != NULL)
391 pdev->pdrv->shutdown(pdev);
396 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config)
398 struct pci_dev *pdev;
401 linux_set_current(curthread);
402 pdev = device_get_softc(dev);
403 if (pdev->pdrv->bsd_iov_init != NULL)
404 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config);
411 linux_pci_iov_uninit(device_t dev)
413 struct pci_dev *pdev;
415 linux_set_current(curthread);
416 pdev = device_get_softc(dev);
417 if (pdev->pdrv->bsd_iov_uninit != NULL)
418 pdev->pdrv->bsd_iov_uninit(dev);
422 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config)
424 struct pci_dev *pdev;
427 linux_set_current(curthread);
428 pdev = device_get_softc(dev);
429 if (pdev->pdrv->bsd_iov_add_vf != NULL)
430 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config);
437 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc)
441 linux_set_current(curthread);
442 spin_lock(&pci_lock);
443 list_add(&pdrv->links, &pci_drivers);
444 spin_unlock(&pci_lock);
445 pdrv->bsddriver.name = pdrv->name;
446 pdrv->bsddriver.methods = pci_methods;
447 pdrv->bsddriver.size = sizeof(struct pci_dev);
450 error = devclass_add_driver(dc, &pdrv->bsddriver,
451 BUS_PASS_DEFAULT, &pdrv->bsdclass);
457 linux_pci_register_driver(struct pci_driver *pdrv)
461 dc = devclass_find("pci");
465 return (_linux_pci_register_driver(pdrv, dc));
469 pci_resource_start(struct pci_dev *pdev, int bar)
471 struct resource_list_entry *rle;
475 if ((rle = linux_pci_get_bar(pdev, bar)) == NULL)
477 dev = pci_find_dbsf(pdev->bus->domain, pdev->bus->number,
478 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
480 if (BUS_TRANSLATE_RESOURCE(dev, rle->type, rle->start, &newstart)) {
481 device_printf(pdev->dev.bsddev, "translate of %#jx failed\n",
482 (uintmax_t)rle->start);
489 pci_resource_len(struct pci_dev *pdev, int bar)
491 struct resource_list_entry *rle;
493 if ((rle = linux_pci_get_bar(pdev, bar)) == NULL)
499 linux_pci_register_drm_driver(struct pci_driver *pdrv)
503 dc = devclass_create("vgapci");
508 return (_linux_pci_register_driver(pdrv, dc));
512 linux_pci_unregister_driver(struct pci_driver *pdrv)
516 bus = devclass_find("pci");
518 spin_lock(&pci_lock);
519 list_del(&pdrv->links);
520 spin_unlock(&pci_lock);
523 devclass_delete_driver(bus, &pdrv->bsddriver);
528 linux_pci_unregister_drm_driver(struct pci_driver *pdrv)
532 bus = devclass_find("vgapci");
534 spin_lock(&pci_lock);
535 list_del(&pdrv->links);
536 spin_unlock(&pci_lock);
539 devclass_delete_driver(bus, &pdrv->bsddriver);
543 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t));
545 struct linux_dma_obj {
551 static uma_zone_t linux_dma_trie_zone;
552 static uma_zone_t linux_dma_obj_zone;
555 linux_dma_init(void *arg)
558 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie",
559 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL,
561 linux_dma_obj_zone = uma_zcreate("linux_dma_object",
562 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL,
566 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL);
569 linux_dma_uninit(void *arg)
572 uma_zdestroy(linux_dma_obj_zone);
573 uma_zdestroy(linux_dma_trie_zone);
575 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL);
578 linux_dma_trie_alloc(struct pctrie *ptree)
581 return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT));
585 linux_dma_trie_free(struct pctrie *ptree, void *node)
588 uma_zfree(linux_dma_trie_zone, node);
591 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc,
592 linux_dma_trie_free);
595 linux_dma_alloc_coherent(struct device *dev, size_t size,
596 dma_addr_t *dma_handle, gfp_t flag)
598 struct linux_dma_priv *priv;
603 if (dev == NULL || dev->dma_priv == NULL) {
607 priv = dev->dma_priv;
609 high = priv->dma_mask;
610 else if (flag & GFP_DMA32)
611 high = BUS_SPACE_MAXADDR_32BIT;
613 high = BUS_SPACE_MAXADDR;
614 align = PAGE_SIZE << get_order(size);
615 mem = (void *)kmem_alloc_contig(size, flag, 0, high, align, 0,
618 *dma_handle = linux_dma_map_phys(dev, vtophys(mem), size);
619 if (*dma_handle == 0) {
620 kmem_free((vm_offset_t)mem, size);
629 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
631 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len)
633 struct linux_dma_priv *priv;
634 struct linux_dma_obj *obj;
636 bus_dma_segment_t seg;
638 priv = dev->dma_priv;
641 * If the resultant mapping will be entirely 1:1 with the
642 * physical address, short-circuit the remainder of the
643 * bus_dma API. This avoids tracking collisions in the pctrie
644 * with the additional benefit of reducing overhead.
646 if (bus_dma_id_mapped(priv->dmat, phys, len))
649 obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT);
655 if (bus_dmamap_create(priv->dmat, 0, &obj->dmamap) != 0) {
656 DMA_PRIV_UNLOCK(priv);
657 uma_zfree(linux_dma_obj_zone, obj);
662 if (_bus_dmamap_load_phys(priv->dmat, obj->dmamap, phys, len,
663 BUS_DMA_NOWAIT, &seg, &nseg) != 0) {
664 bus_dmamap_destroy(priv->dmat, obj->dmamap);
665 DMA_PRIV_UNLOCK(priv);
666 uma_zfree(linux_dma_obj_zone, obj);
670 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg));
671 obj->dma_addr = seg.ds_addr;
673 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj);
675 bus_dmamap_unload(priv->dmat, obj->dmamap);
676 bus_dmamap_destroy(priv->dmat, obj->dmamap);
677 DMA_PRIV_UNLOCK(priv);
678 uma_zfree(linux_dma_obj_zone, obj);
681 DMA_PRIV_UNLOCK(priv);
682 return (obj->dma_addr);
686 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len)
692 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
694 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len)
696 struct linux_dma_priv *priv;
697 struct linux_dma_obj *obj;
699 priv = dev->dma_priv;
701 if (pctrie_is_empty(&priv->ptree))
705 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr);
707 DMA_PRIV_UNLOCK(priv);
710 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr);
711 bus_dmamap_unload(priv->dmat, obj->dmamap);
712 bus_dmamap_destroy(priv->dmat, obj->dmamap);
713 DMA_PRIV_UNLOCK(priv);
715 uma_zfree(linux_dma_obj_zone, obj);
719 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len)
725 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents,
726 enum dma_data_direction dir, struct dma_attrs *attrs)
728 struct linux_dma_priv *priv;
729 struct scatterlist *sg;
731 bus_dma_segment_t seg;
733 priv = dev->dma_priv;
737 /* create common DMA map in the first S/G entry */
738 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) {
739 DMA_PRIV_UNLOCK(priv);
743 /* load all S/G list entries */
744 for_each_sg(sgl, sg, nents, i) {
746 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map,
747 sg_phys(sg), sg->length, BUS_DMA_NOWAIT,
749 bus_dmamap_unload(priv->dmat, sgl->dma_map);
750 bus_dmamap_destroy(priv->dmat, sgl->dma_map);
751 DMA_PRIV_UNLOCK(priv);
755 ("More than one segment (nseg=%d)", nseg + 1));
757 sg_dma_address(sg) = seg.ds_addr;
759 DMA_PRIV_UNLOCK(priv);
765 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
766 int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
768 struct linux_dma_priv *priv;
770 priv = dev->dma_priv;
773 bus_dmamap_unload(priv->dmat, sgl->dma_map);
774 bus_dmamap_destroy(priv->dmat, sgl->dma_map);
775 DMA_PRIV_UNLOCK(priv);
779 struct device *pool_device;
780 uma_zone_t pool_zone;
781 struct mtx pool_lock;
782 bus_dma_tag_t pool_dmat;
783 size_t pool_entry_size;
784 struct pctrie pool_ptree;
787 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock)
788 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock)
791 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags)
793 struct linux_dma_obj *obj = mem;
794 struct dma_pool *pool = arg;
796 bus_dma_segment_t seg;
800 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap,
801 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT,
803 DMA_POOL_UNLOCK(pool);
807 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg));
808 obj->dma_addr = seg.ds_addr;
814 dma_pool_obj_dtor(void *mem, int size, void *arg)
816 struct linux_dma_obj *obj = mem;
817 struct dma_pool *pool = arg;
820 bus_dmamap_unload(pool->pool_dmat, obj->dmamap);
821 DMA_POOL_UNLOCK(pool);
825 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused,
828 struct dma_pool *pool = arg;
829 struct linux_dma_priv *priv;
830 struct linux_dma_obj *obj;
833 priv = pool->pool_device->dma_priv;
834 for (i = 0; i < count; i++) {
835 obj = uma_zalloc(linux_dma_obj_zone, flags);
839 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr,
840 BUS_DMA_NOWAIT, &obj->dmamap);
842 uma_zfree(linux_dma_obj_zone, obj);
853 dma_pool_obj_release(void *arg, void **store, int count)
855 struct dma_pool *pool = arg;
856 struct linux_dma_priv *priv;
857 struct linux_dma_obj *obj;
860 priv = pool->pool_device->dma_priv;
861 for (i = 0; i < count; i++) {
863 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap);
864 uma_zfree(linux_dma_obj_zone, obj);
869 linux_dma_pool_create(char *name, struct device *dev, size_t size,
870 size_t align, size_t boundary)
872 struct linux_dma_priv *priv;
873 struct dma_pool *pool;
875 priv = dev->dma_priv;
877 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
878 pool->pool_device = dev;
879 pool->pool_entry_size = size;
881 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev),
882 align, boundary, /* alignment, boundary */
883 priv->dma_mask, /* lowaddr */
884 BUS_SPACE_MAXADDR, /* highaddr */
885 NULL, NULL, /* filtfunc, filtfuncarg */
890 NULL, NULL, /* lockfunc, lockfuncarg */
896 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor,
897 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import,
898 dma_pool_obj_release, pool, 0);
900 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF);
901 pctrie_init(&pool->pool_ptree);
907 linux_dma_pool_destroy(struct dma_pool *pool)
910 uma_zdestroy(pool->pool_zone);
911 bus_dma_tag_destroy(pool->pool_dmat);
912 mtx_destroy(&pool->pool_lock);
917 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
920 struct linux_dma_obj *obj;
922 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags);
927 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) {
928 DMA_POOL_UNLOCK(pool);
929 uma_zfree_arg(pool->pool_zone, obj, pool);
932 DMA_POOL_UNLOCK(pool);
934 *handle = obj->dma_addr;
939 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr)
941 struct linux_dma_obj *obj;
944 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr);
946 DMA_POOL_UNLOCK(pool);
949 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr);
950 DMA_POOL_UNLOCK(pool);
952 uma_zfree_arg(pool->pool_zone, obj, pool);