2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * From amd64/busdma_machdep.c, r204214
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/malloc.h>
40 #include <sys/interrupt.h>
41 #include <sys/kernel.h>
45 #include <sys/memdesc.h>
46 #include <sys/mutex.h>
47 #include <sys/sysctl.h>
51 #include <vm/vm_extern.h>
52 #include <vm/vm_kern.h>
53 #include <vm/vm_page.h>
54 #include <vm/vm_map.h>
56 #include <machine/atomic.h>
57 #include <machine/bus.h>
58 #include <machine/cpufunc.h>
59 #include <machine/md_var.h>
63 #define MAX_BPAGES MIN(8192, physmem/40)
74 bus_dma_filter_t *filter;
82 bus_dma_lock_t *lockfunc;
84 struct bounce_zone *bounce_zone;
89 static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
93 STAILQ_HEAD(, bounce_page) bpages;
98 bus_dma_segment_t *segments;
100 bus_dmamap_callback_t *callback;
102 STAILQ_ENTRY(bus_dmamap) links;
106 static MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
108 static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
110 #define dmat_alignment(dmat) ((dmat)->alignment)
111 #define dmat_flags(dmat) ((dmat)->flags)
112 #define dmat_lowaddr(dmat) ((dmat)->lowaddr)
113 #define dmat_lockfunc(dmat) ((dmat)->lockfunc)
114 #define dmat_lockfuncarg(dmat) ((dmat)->lockfuncarg)
116 #include "../../kern/subr_busdma_bounce.c"
119 * Return true if a match is made.
121 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
123 * If paddr is within the bounds of the dma tag then call the filter callback
124 * to check for a match, if there is no filter callback then assume a match.
127 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
134 if (dmat->filter == NULL && dmat->iommu == NULL &&
135 paddr > dmat->lowaddr && paddr <= dmat->highaddr)
137 if (dmat->filter == NULL &&
138 !vm_addr_align_ok(paddr, dmat->alignment))
140 if (dmat->filter != NULL &&
141 (*dmat->filter)(dmat->filterarg, paddr) != 0)
145 } while (retval == 0 && dmat != NULL);
149 #define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3
150 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
152 * Allocate a device specific dma_tag.
155 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
156 bus_addr_t boundary, bus_addr_t lowaddr,
157 bus_addr_t highaddr, bus_dma_filter_t *filter,
158 void *filterarg, bus_size_t maxsize, int nsegments,
159 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
160 void *lockfuncarg, bus_dma_tag_t *dmat)
162 bus_dma_tag_t newtag;
165 /* Basic sanity checking */
166 if (boundary != 0 && boundary < maxsegsz)
173 /* Return a NULL tag on failure */
176 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
178 if (newtag == NULL) {
179 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
180 __func__, newtag, 0, error);
184 newtag->parent = parent;
185 newtag->alignment = alignment;
186 newtag->boundary = boundary;
187 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
188 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1);
189 newtag->filter = filter;
190 newtag->filterarg = filterarg;
191 newtag->maxsize = maxsize;
192 newtag->nsegments = nsegments;
193 newtag->maxsegsz = maxsegsz;
194 newtag->flags = flags;
195 newtag->ref_count = 1; /* Count ourself */
196 newtag->map_count = 0;
197 if (lockfunc != NULL) {
198 newtag->lockfunc = lockfunc;
199 newtag->lockfuncarg = lockfuncarg;
201 newtag->lockfunc = _busdma_dflt_lock;
202 newtag->lockfuncarg = NULL;
205 /* Take into account any restrictions imposed by our parent tag */
206 if (parent != NULL) {
207 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
208 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
209 if (newtag->boundary == 0)
210 newtag->boundary = parent->boundary;
211 else if (parent->boundary != 0)
212 newtag->boundary = MIN(parent->boundary,
214 if (newtag->filter == NULL) {
216 * Short circuit looking at our parent directly
217 * since we have encapsulated all of its information
219 newtag->filter = parent->filter;
220 newtag->filterarg = parent->filterarg;
221 newtag->parent = parent->parent;
223 if (newtag->parent != NULL)
224 atomic_add_int(&parent->ref_count, 1);
225 newtag->iommu = parent->iommu;
226 newtag->iommu_cookie = parent->iommu_cookie;
229 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && newtag->iommu == NULL)
230 newtag->flags |= BUS_DMA_COULD_BOUNCE;
232 if (newtag->alignment > 1)
233 newtag->flags |= BUS_DMA_COULD_BOUNCE;
235 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
236 (flags & BUS_DMA_ALLOCNOW) != 0) {
237 struct bounce_zone *bz;
241 if ((error = alloc_bounce_zone(newtag)) != 0) {
242 free(newtag, M_DEVBUF);
245 bz = newtag->bounce_zone;
247 if (ptoa(bz->total_bpages) < maxsize) {
250 pages = atop(maxsize) - bz->total_bpages;
252 /* Add pages to our bounce pool */
253 if (alloc_bounce_pages(newtag, pages) < pages)
256 /* Performed initial allocation */
257 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
261 free(newtag, M_DEVBUF);
265 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
266 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
271 bus_dma_template_clone(bus_dma_template_t *t, bus_dma_tag_t dmat)
274 if (t == NULL || dmat == NULL)
277 t->parent = dmat->parent;
278 t->alignment = dmat->alignment;
279 t->boundary = dmat->boundary;
280 t->lowaddr = dmat->lowaddr;
281 t->highaddr = dmat->highaddr;
282 t->maxsize = dmat->maxsize;
283 t->nsegments = dmat->nsegments;
284 t->maxsegsize = dmat->maxsegsz;
285 t->flags = dmat->flags;
286 t->lockfunc = dmat->lockfunc;
287 t->lockfuncarg = dmat->lockfuncarg;
291 bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain)
298 bus_dma_tag_destroy(bus_dma_tag_t dmat)
300 bus_dma_tag_t dmat_copy __unused;
307 if (dmat->map_count != 0) {
312 while (dmat != NULL) {
313 bus_dma_tag_t parent;
315 parent = dmat->parent;
316 atomic_subtract_int(&dmat->ref_count, 1);
317 if (dmat->ref_count == 0) {
318 free(dmat, M_DEVBUF);
320 * Last reference count, so
321 * release our reference
322 * count on our parent.
330 CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
335 * Allocate a handle for mapping from kva/uva/physical
336 * address space into bus device space.
339 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
345 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
348 CTR3(KTR_BUSDMA, "%s: tag %p error %d",
349 __func__, dmat, ENOMEM);
354 * Bouncing might be required if the driver asks for an active
355 * exclusion region, a data alignment that is stricter than 1, and/or
356 * an active address boundary.
358 if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
360 struct bounce_zone *bz;
363 if (dmat->bounce_zone == NULL) {
364 if ((error = alloc_bounce_zone(dmat)) != 0)
367 bz = dmat->bounce_zone;
369 /* Initialize the new map */
370 STAILQ_INIT(&((*mapp)->bpages));
373 * Attempt to add pages to our pool on a per-instance
374 * basis up to a sane limit.
376 if (dmat->alignment > 1)
377 maxpages = MAX_BPAGES;
379 maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr));
380 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
381 || (bz->map_count > 0 && bz->total_bpages < maxpages)) {
384 pages = MAX(atop(dmat->maxsize), 1);
385 pages = MIN(maxpages - bz->total_bpages, pages);
386 pages = MAX(pages, 1);
387 if (alloc_bounce_pages(dmat, pages) < pages)
390 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
392 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
401 (*mapp)->segments = (bus_dma_segment_t *)malloc(
402 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
404 if ((*mapp)->segments == NULL) {
405 CTR3(KTR_BUSDMA, "%s: tag %p error %d",
406 __func__, dmat, ENOMEM);
412 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
413 __func__, dmat, dmat->flags, error);
418 * Destroy a handle for mapping from kva/uva/physical
419 * address space into bus device space.
422 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
424 if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
425 if (STAILQ_FIRST(&map->bpages) != NULL) {
426 CTR3(KTR_BUSDMA, "%s: tag %p error %d",
427 __func__, dmat, EBUSY);
430 if (dmat->bounce_zone)
431 dmat->bounce_zone->map_count--;
433 free(map->segments, M_DEVBUF);
436 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
441 * Allocate a piece of memory that can be efficiently mapped into
442 * bus device space based on the constraints lited in the dma tag.
443 * A dmamap to for use with dmamap_load is also allocated.
446 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
452 if (flags & BUS_DMA_NOWAIT)
457 bus_dmamap_create(dmat, flags, mapp);
459 if (flags & BUS_DMA_ZERO)
461 if (flags & BUS_DMA_NOCACHE)
462 attr = VM_MEMATTR_UNCACHEABLE;
464 attr = VM_MEMATTR_DEFAULT;
468 * (dmat->alignment <= dmat->maxsize) is just a quick hack; the exact
469 * alignment guarantees of malloc need to be nailed down, and the
470 * code below should be rewritten to take that into account.
472 * In the meantime, we'll warn the user if malloc gets it wrong.
474 if ((dmat->maxsize <= PAGE_SIZE) &&
475 (dmat->alignment <= dmat->maxsize) &&
476 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
477 attr == VM_MEMATTR_DEFAULT) {
478 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
481 * XXX Use Contigmalloc until it is merged into this facility
482 * and handles multi-seg allocations. Nobody is doing
483 * multi-seg allocations yet though.
484 * XXX Certain AGP hardware does.
486 *vaddr = kmem_alloc_contig(dmat->maxsize, mflags, 0ul,
487 dmat->lowaddr, dmat->alignment ? dmat->alignment : 1ul,
488 dmat->boundary, attr);
489 (*mapp)->contigalloc = 1;
491 if (*vaddr == NULL) {
492 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
493 __func__, dmat, dmat->flags, ENOMEM);
495 } else if (!vm_addr_align_ok(vtophys(*vaddr), dmat->alignment)) {
496 printf("bus_dmamem_alloc failed to align memory properly.\n");
498 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
499 __func__, dmat, dmat->flags, 0);
504 * Free a piece of memory and it's allociated dmamap, that was allocated
505 * via bus_dmamem_alloc. Make the same choice for free/contigfree.
508 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
511 if (!map->contigalloc)
512 free(vaddr, M_DEVBUF);
514 kmem_free(vaddr, dmat->maxsize);
515 bus_dmamap_destroy(dmat, map);
516 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
520 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
521 bus_size_t buflen, int flags)
526 if (map->pagesneeded == 0) {
527 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
528 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
529 dmat->boundary, dmat->alignment);
530 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded);
532 * Count the number of bounce pages
533 * needed in order to complete this transfer
536 while (buflen != 0) {
537 sgsize = MIN(buflen, dmat->maxsegsz);
538 if (run_filter(dmat, curaddr) != 0) {
540 PAGE_SIZE - (curaddr & PAGE_MASK));
546 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
551 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
552 void *buf, bus_size_t buflen, int flags)
555 vm_offset_t vendaddr;
558 if (map->pagesneeded == 0) {
559 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
560 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
561 dmat->boundary, dmat->alignment);
562 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded);
564 * Count the number of bounce pages
565 * needed in order to complete this transfer
567 vaddr = (vm_offset_t)buf;
568 vendaddr = (vm_offset_t)buf + buflen;
570 while (vaddr < vendaddr) {
573 sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
574 if (pmap == kernel_pmap)
575 paddr = pmap_kextract(vaddr);
577 paddr = pmap_extract(pmap, vaddr);
578 if (run_filter(dmat, paddr) != 0) {
579 sg_len = roundup2(sg_len, dmat->alignment);
584 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
589 * Add a single contiguous physical range to the segment list.
592 _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
593 bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
598 * Make sure we don't cross any boundaries.
600 if (!vm_addr_bound_ok(curaddr, sgsize, dmat->boundary))
601 sgsize = roundup2(curaddr, dmat->boundary) - curaddr;
604 * Insert chunk into a segment, coalescing with
605 * previous segment if possible.
610 segs[seg].ds_addr = curaddr;
611 segs[seg].ds_len = sgsize;
613 if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
614 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
615 vm_addr_bound_ok(segs[seg].ds_addr,
616 segs[seg].ds_len + sgsize, dmat->boundary))
617 segs[seg].ds_len += sgsize;
619 if (++seg >= dmat->nsegments)
621 segs[seg].ds_addr = curaddr;
622 segs[seg].ds_len = sgsize;
630 * Utility function to load a physical buffer. segp contains
631 * the starting segment on entrace, and the ending segment on exit.
634 _bus_dmamap_load_phys(bus_dma_tag_t dmat,
636 vm_paddr_t buf, bus_size_t buflen,
638 bus_dma_segment_t *segs,
646 segs = map->segments;
648 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
649 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
650 if (map->pagesneeded != 0) {
651 error = _bus_dmamap_reserve_pages(dmat, map, flags);
659 sgsize = MIN(buflen, dmat->maxsegsz);
660 if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
661 sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
662 curaddr = add_bounce_page(dmat, map, 0, curaddr,
665 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
676 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
680 _bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
681 struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
682 bus_dma_segment_t *segs, int *segp)
685 return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags,
690 * Utility function to load a linear buffer. segp contains
691 * the starting segment on entrance, and the ending segment on exit.
694 _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
696 void *buf, bus_size_t buflen,
699 bus_dma_segment_t *segs,
704 vm_offset_t kvaddr, vaddr;
708 segs = map->segments;
710 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
711 _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
712 if (map->pagesneeded != 0) {
713 error = _bus_dmamap_reserve_pages(dmat, map, flags);
719 vaddr = (vm_offset_t)buf;
722 bus_size_t max_sgsize;
725 * Get the physical address for this segment.
727 if (pmap == kernel_pmap) {
728 curaddr = pmap_kextract(vaddr);
731 curaddr = pmap_extract(pmap, vaddr);
736 * Compute the segment size, and adjust counts.
738 max_sgsize = MIN(buflen, dmat->maxsegsz);
739 sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
740 if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
741 sgsize = roundup2(sgsize, dmat->alignment);
742 sgsize = MIN(sgsize, max_sgsize);
743 curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
746 sgsize = MIN(sgsize, max_sgsize);
749 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
760 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
764 _bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
765 struct memdesc *mem, bus_dmamap_callback_t *callback,
769 if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
772 map->callback = callback;
773 map->callback_arg = callback_arg;
778 _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
779 bus_dma_segment_t *segs, int nsegs, int error)
784 memcpy(map->segments, segs, map->nsegs*sizeof(segs[0]));
785 if (dmat->iommu != NULL)
786 IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs,
787 dmat->lowaddr, dmat->highaddr, dmat->alignment,
788 dmat->boundary, dmat->iommu_cookie);
791 memcpy(segs, map->segments, map->nsegs*sizeof(segs[0]));
793 segs = map->segments;
799 * Release the mapping held by map.
802 bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
805 IOMMU_UNMAP(dmat->iommu, map->segments, map->nsegs, dmat->iommu_cookie);
809 free_bounce_pages(dmat, map);
813 bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
815 struct bounce_page *bpage;
816 vm_offset_t datavaddr, tempvaddr;
818 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
820 * Handle data bouncing. We might also
821 * want to add support for invalidating
822 * the caches on broken hardware
824 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
825 "performing bounce", __func__, dmat, dmat->flags, op);
827 if (op & BUS_DMASYNC_PREWRITE) {
828 while (bpage != NULL) {
830 datavaddr = bpage->datavaddr;
831 if (datavaddr == 0) {
832 tempvaddr = pmap_quick_enter_page(
834 datavaddr = tempvaddr |
838 bcopy((void *)datavaddr,
839 (void *)bpage->vaddr, bpage->datacount);
842 pmap_quick_remove_page(tempvaddr);
843 bpage = STAILQ_NEXT(bpage, links);
845 dmat->bounce_zone->total_bounced++;
848 if (op & BUS_DMASYNC_POSTREAD) {
849 while (bpage != NULL) {
851 datavaddr = bpage->datavaddr;
852 if (datavaddr == 0) {
853 tempvaddr = pmap_quick_enter_page(
855 datavaddr = tempvaddr |
859 bcopy((void *)bpage->vaddr,
860 (void *)datavaddr, bpage->datacount);
863 pmap_quick_remove_page(tempvaddr);
864 bpage = STAILQ_NEXT(bpage, links);
866 dmat->bounce_zone->total_bounced++;
874 bus_dma_tag_set_iommu(bus_dma_tag_t tag, device_t iommu, void *cookie)
877 tag->iommu_cookie = cookie;