2 * Copyright (c) 2012-2014 Ian Lepore
3 * Copyright (c) 2010 Mark Tinguely
4 * Copyright (c) 2004 Olivier Houchard
5 * Copyright (c) 2002 Peter Grehan
6 * Copyright (c) 1997, 1998 Justin T. Gibbs.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification, immediately at the beginning of the file.
15 * 2. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * From i386/busdma_machdep.c 191438 2009-04-23 20:24:19Z jhb
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #define _ARM32_BUS_DMA_PRIVATE
37 #include <sys/param.h>
40 #include <ddb/db_output.h>
41 #include <sys/systm.h>
42 #include <sys/malloc.h>
44 #include <sys/busdma_bufalloc.h>
45 #include <sys/counter.h>
46 #include <sys/interrupt.h>
47 #include <sys/kernel.h>
50 #include <sys/memdesc.h>
52 #include <sys/mutex.h>
53 #include <sys/sysctl.h>
57 #include <vm/vm_page.h>
58 #include <vm/vm_map.h>
59 #include <vm/vm_extern.h>
60 #include <vm/vm_kern.h>
62 #include <machine/atomic.h>
63 #include <machine/bus.h>
64 #include <machine/cpufunc.h>
65 #include <machine/md_var.h>
68 #define MAX_DMA_SEGMENTS 4096
69 #define BUS_DMA_EXCL_BOUNCE BUS_DMA_BUS2
70 #define BUS_DMA_ALIGN_BOUNCE BUS_DMA_BUS3
71 #define BUS_DMA_COULD_BOUNCE (BUS_DMA_EXCL_BOUNCE | BUS_DMA_ALIGN_BOUNCE)
72 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
82 bus_dma_filter_t *filter;
90 bus_dma_lock_t *lockfunc;
92 struct bounce_zone *bounce_zone;
94 * DMA range for this tag. If the page doesn't fall within
95 * one of these ranges, an error is returned. The caller
96 * may then decide what to do with the transfer. If the
97 * range pointer is NULL, it is ignored.
99 struct arm32_dma_range *ranges;
104 vm_offset_t vaddr; /* kva of bounce buffer */
105 bus_addr_t busaddr; /* Physical address */
106 vm_offset_t datavaddr; /* kva of client data */
107 bus_addr_t dataaddr; /* client physical address */
108 bus_size_t datacount; /* client data count */
109 STAILQ_ENTRY(bounce_page) links;
113 vm_offset_t vaddr; /* kva of client data */
114 bus_addr_t busaddr; /* client physical address */
115 bus_size_t datacount; /* client data count */
118 int busdma_swi_pending;
121 STAILQ_ENTRY(bounce_zone) links;
122 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
130 bus_size_t alignment;
134 struct sysctl_ctx_list sysctl_tree;
135 struct sysctl_oid *sysctl_tree_top;
138 static struct mtx bounce_lock;
139 static int total_bpages;
140 static int busdma_zonecount;
141 static uint32_t tags_total;
142 static uint32_t maps_total;
143 static uint32_t maps_dmamem;
144 static uint32_t maps_coherent;
145 static counter_u64_t maploads_total;
146 static counter_u64_t maploads_bounced;
147 static counter_u64_t maploads_coherent;
148 static counter_u64_t maploads_dmamem;
149 static counter_u64_t maploads_mbuf;
150 static counter_u64_t maploads_physmem;
152 static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
154 SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
155 SYSCTL_UINT(_hw_busdma, OID_AUTO, tags_total, CTLFLAG_RD, &tags_total, 0,
156 "Number of active tags");
157 SYSCTL_UINT(_hw_busdma, OID_AUTO, maps_total, CTLFLAG_RD, &maps_total, 0,
158 "Number of active maps");
159 SYSCTL_UINT(_hw_busdma, OID_AUTO, maps_dmamem, CTLFLAG_RD, &maps_dmamem, 0,
160 "Number of active maps for bus_dmamem_alloc buffers");
161 SYSCTL_UINT(_hw_busdma, OID_AUTO, maps_coherent, CTLFLAG_RD, &maps_coherent, 0,
162 "Number of active maps with BUS_DMA_COHERENT flag set");
163 SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_total, CTLFLAG_RD,
164 &maploads_total, "Number of load operations performed");
165 SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_bounced, CTLFLAG_RD,
166 &maploads_bounced, "Number of load operations that used bounce buffers");
167 SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_coherent, CTLFLAG_RD,
168 &maploads_dmamem, "Number of load operations on BUS_DMA_COHERENT memory");
169 SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_dmamem, CTLFLAG_RD,
170 &maploads_dmamem, "Number of load operations on bus_dmamem_alloc buffers");
171 SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_mbuf, CTLFLAG_RD,
172 &maploads_mbuf, "Number of load operations for mbufs");
173 SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_physmem, CTLFLAG_RD,
174 &maploads_physmem, "Number of load operations on physical buffers");
175 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
176 "Total bounce pages");
179 struct bp_list bpages;
185 bus_dmamap_callback_t *callback;
188 #define DMAMAP_COHERENT (1 << 0)
189 #define DMAMAP_DMAMEM_ALLOC (1 << 1)
190 #define DMAMAP_MBUF (1 << 2)
191 STAILQ_ENTRY(bus_dmamap) links;
192 bus_dma_segment_t *segments;
194 struct sync_list slist[];
197 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
198 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
200 static void init_bounce_pages(void *dummy);
201 static int alloc_bounce_zone(bus_dma_tag_t dmat);
202 static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
203 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
205 static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
206 vm_offset_t vaddr, bus_addr_t addr,
208 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
209 static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
210 void *buf, bus_size_t buflen, int flags);
211 static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
212 vm_paddr_t buf, bus_size_t buflen, int flags);
213 static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
216 static busdma_bufalloc_t coherent_allocator; /* Cache of coherent buffers */
217 static busdma_bufalloc_t standard_allocator; /* Cache of standard buffers */
219 busdma_init(void *dummy)
223 maploads_total = counter_u64_alloc(M_WAITOK);
224 maploads_bounced = counter_u64_alloc(M_WAITOK);
225 maploads_coherent = counter_u64_alloc(M_WAITOK);
226 maploads_dmamem = counter_u64_alloc(M_WAITOK);
227 maploads_mbuf = counter_u64_alloc(M_WAITOK);
228 maploads_physmem = counter_u64_alloc(M_WAITOK);
232 /* Create a cache of buffers in standard (cacheable) memory. */
233 standard_allocator = busdma_bufalloc_create("buffer",
234 arm_dcache_align, /* minimum_alignment */
235 NULL, /* uma_alloc func */
236 NULL, /* uma_free func */
237 uma_flags); /* uma_zcreate_flags */
241 * Force UMA zone to allocate service structures like
242 * slabs using own allocator. uma_debug code performs
243 * atomic ops on uma_slab_t fields and safety of this
244 * operation is not guaranteed for write-back caches
246 uma_flags = UMA_ZONE_OFFPAGE;
249 * Create a cache of buffers in uncacheable memory, to implement the
250 * BUS_DMA_COHERENT (and potentially BUS_DMA_NOCACHE) flag.
252 coherent_allocator = busdma_bufalloc_create("coherent",
253 arm_dcache_align, /* minimum_alignment */
254 busdma_bufalloc_alloc_uncacheable,
255 busdma_bufalloc_free_uncacheable,
256 uma_flags); /* uma_zcreate_flags */
260 * This init historically used SI_SUB_VM, but now the init code requires
261 * malloc(9) using M_DEVBUF memory and the pcpu zones for counter(9), which get
262 * set up by SI_SUB_KMEM and SI_ORDER_LAST, so we'll go right after that by
263 * using SI_SUB_KMEM+1.
265 SYSINIT(busdma, SI_SUB_KMEM+1, SI_ORDER_FIRST, busdma_init, NULL);
268 * This routine checks the exclusion zone constraints from a tag against the
269 * physical RAM available on the machine. If a tag specifies an exclusion zone
270 * but there's no RAM in that zone, then we avoid allocating resources to bounce
271 * a request, and we can use any memory allocator (as opposed to needing
272 * kmem_alloc_contig() just because it can allocate pages in an address range).
274 * Most tags have BUS_SPACE_MAXADDR or BUS_SPACE_MAXADDR_32BIT (they are the
275 * same value on 32-bit architectures) as their lowaddr constraint, and we can't
276 * possibly have RAM at an address higher than the highest address we can
277 * express, so we take a fast out.
280 exclusion_bounce_check(vm_offset_t lowaddr, vm_offset_t highaddr)
284 if (lowaddr >= BUS_SPACE_MAXADDR)
287 for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) {
288 if ((lowaddr >= phys_avail[i] && lowaddr < phys_avail[i + 1]) ||
289 (lowaddr < phys_avail[i] && highaddr >= phys_avail[i]))
296 * Return true if the tag has an exclusion zone that could lead to bouncing.
299 exclusion_bounce(bus_dma_tag_t dmat)
302 return (dmat->flags & BUS_DMA_EXCL_BOUNCE);
306 * Return true if the given address does not fall on the alignment boundary.
309 alignment_bounce(bus_dma_tag_t dmat, bus_addr_t addr)
312 return (addr & (dmat->alignment - 1));
316 * Return true if the DMA should bounce because the start or end does not fall
317 * on a cacheline boundary (which would require a partial cacheline flush).
318 * COHERENT memory doesn't trigger cacheline flushes. Memory allocated by
319 * bus_dmamem_alloc() is always aligned to cacheline boundaries, and there's a
320 * strict rule that such memory cannot be accessed by the CPU while DMA is in
321 * progress (or by multiple DMA engines at once), so that it's always safe to do
322 * full cacheline flushes even if that affects memory outside the range of a
323 * given DMA operation that doesn't involve the full allocated buffer. If we're
324 * mapping an mbuf, that follows the same rules as a buffer we allocated.
327 cacheline_bounce(bus_dmamap_t map, bus_addr_t addr, bus_size_t size)
330 if (map->flags & (DMAMAP_DMAMEM_ALLOC | DMAMAP_COHERENT | DMAMAP_MBUF))
332 return ((addr | size) & arm_dcache_align_mask);
336 * Return true if we might need to bounce the DMA described by addr and size.
338 * This is used to quick-check whether we need to do the more expensive work of
339 * checking the DMA page-by-page looking for alignment and exclusion bounces.
341 * Note that the addr argument might be either virtual or physical. It doesn't
342 * matter because we only look at the low-order bits, which are the same in both
346 might_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t addr,
350 return ((dmat->flags & BUS_DMA_EXCL_BOUNCE) ||
351 alignment_bounce(dmat, addr) ||
352 cacheline_bounce(map, addr, size));
356 * Return true if we must bounce the DMA described by paddr and size.
358 * Bouncing can be triggered by DMA that doesn't begin and end on cacheline
359 * boundaries, or doesn't begin on an alignment boundary, or falls within the
360 * exclusion zone of any tag in the ancestry chain.
362 * For exclusions, walk the chain of tags comparing paddr to the exclusion zone
363 * within each tag. If the tag has a filter function, use it to decide whether
364 * the DMA needs to bounce, otherwise any DMA within the zone bounces.
367 must_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr,
371 if (cacheline_bounce(map, paddr, size))
375 * The tag already contains ancestors' alignment restrictions so this
376 * check doesn't need to be inside the loop.
378 if (alignment_bounce(dmat, paddr))
382 * Even though each tag has an exclusion zone that is a superset of its
383 * own and all its ancestors' exclusions, the exclusion zone of each tag
384 * up the chain must be checked within the loop, because the busdma
385 * rules say the filter function is called only when the address lies
386 * within the low-highaddr range of the tag that filterfunc belongs to.
388 while (dmat != NULL && exclusion_bounce(dmat)) {
389 if ((paddr >= dmat->lowaddr && paddr <= dmat->highaddr) &&
390 (dmat->filter == NULL ||
391 dmat->filter(dmat->filterarg, paddr) != 0))
399 static __inline struct arm32_dma_range *
400 _bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
403 struct arm32_dma_range *dr;
406 for (i = 0, dr = ranges; i < nranges; i++, dr++) {
407 if (curaddr >= dr->dr_sysbase &&
408 round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len))
416 * Convenience function for manipulating driver locks from busdma (during
417 * busdma_swi, for example). Drivers that don't provide their own locks
418 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own
419 * non-mutex locking scheme don't have to use this at all.
422 busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
426 dmtx = (struct mtx *)arg;
435 panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
440 * dflt_lock should never get called. It gets put into the dma tag when
441 * lockfunc == NULL, which is only valid if the maps that are associated
442 * with the tag are meant to never be defered.
443 * XXX Should have a way to identify which driver is responsible here.
446 dflt_lock(void *arg, bus_dma_lock_op_t op)
449 panic("driver error: busdma dflt_lock called");
453 * Allocate a device specific dma_tag.
456 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
457 bus_size_t boundary, bus_addr_t lowaddr,
458 bus_addr_t highaddr, bus_dma_filter_t *filter,
459 void *filterarg, bus_size_t maxsize, int nsegments,
460 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
461 void *lockfuncarg, bus_dma_tag_t *dmat)
463 bus_dma_tag_t newtag;
468 parent = arm_root_dma_tag;
471 /* Basic sanity checking. */
472 KASSERT(boundary == 0 || powerof2(boundary),
473 ("dma tag boundary %lu, must be a power of 2", boundary));
474 KASSERT(boundary == 0 || boundary >= maxsegsz,
475 ("dma tag boundary %lu is < maxsegsz %lu\n", boundary, maxsegsz));
476 KASSERT(alignment != 0 && powerof2(alignment),
477 ("dma tag alignment %lu, must be non-zero power of 2", alignment));
478 KASSERT(maxsegsz != 0, ("dma tag maxsegsz must not be zero"));
480 /* Return a NULL tag on failure */
483 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
485 if (newtag == NULL) {
486 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
487 __func__, newtag, 0, error);
491 newtag->parent = parent;
492 newtag->alignment = alignment;
493 newtag->boundary = boundary;
494 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
495 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) +
497 newtag->filter = filter;
498 newtag->filterarg = filterarg;
499 newtag->maxsize = maxsize;
500 newtag->nsegments = nsegments;
501 newtag->maxsegsz = maxsegsz;
502 newtag->flags = flags;
503 newtag->ref_count = 1; /* Count ourself */
504 newtag->map_count = 0;
505 newtag->ranges = bus_dma_get_range();
506 newtag->_nranges = bus_dma_get_range_nb();
507 if (lockfunc != NULL) {
508 newtag->lockfunc = lockfunc;
509 newtag->lockfuncarg = lockfuncarg;
511 newtag->lockfunc = dflt_lock;
512 newtag->lockfuncarg = NULL;
515 /* Take into account any restrictions imposed by our parent tag */
516 if (parent != NULL) {
517 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
518 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
519 newtag->alignment = MAX(parent->alignment, newtag->alignment);
520 newtag->flags |= parent->flags & BUS_DMA_COULD_BOUNCE;
521 if (newtag->boundary == 0)
522 newtag->boundary = parent->boundary;
523 else if (parent->boundary != 0)
524 newtag->boundary = MIN(parent->boundary,
526 if (newtag->filter == NULL) {
528 * Short circuit to looking at our parent directly
529 * since we have encapsulated all of its information
531 newtag->filter = parent->filter;
532 newtag->filterarg = parent->filterarg;
533 newtag->parent = parent->parent;
535 if (newtag->parent != NULL)
536 atomic_add_int(&parent->ref_count, 1);
539 if (exclusion_bounce_check(newtag->lowaddr, newtag->highaddr))
540 newtag->flags |= BUS_DMA_EXCL_BOUNCE;
541 if (alignment_bounce(newtag, 1))
542 newtag->flags |= BUS_DMA_ALIGN_BOUNCE;
545 * Any request can auto-bounce due to cacheline alignment, in addition
546 * to any alignment or boundary specifications in the tag, so if the
547 * ALLOCNOW flag is set, there's always work to do.
549 if ((flags & BUS_DMA_ALLOCNOW) != 0) {
550 struct bounce_zone *bz;
552 * Round size up to a full page, and add one more page because
553 * there can always be one more boundary crossing than the
554 * number of pages in a transfer.
556 maxsize = roundup2(maxsize, PAGE_SIZE) + PAGE_SIZE;
558 if ((error = alloc_bounce_zone(newtag)) != 0) {
559 free(newtag, M_DEVBUF);
562 bz = newtag->bounce_zone;
564 if (ptoa(bz->total_bpages) < maxsize) {
567 pages = atop(maxsize) - bz->total_bpages;
569 /* Add pages to our bounce pool */
570 if (alloc_bounce_pages(newtag, pages) < pages)
573 /* Performed initial allocation */
574 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
576 newtag->bounce_zone = NULL;
579 free(newtag, M_DEVBUF);
581 atomic_add_32(&tags_total, 1);
584 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
585 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
590 bus_dma_tag_destroy(bus_dma_tag_t dmat)
592 bus_dma_tag_t dmat_copy;
600 if (dmat->map_count != 0) {
605 while (dmat != NULL) {
606 bus_dma_tag_t parent;
608 parent = dmat->parent;
609 atomic_subtract_int(&dmat->ref_count, 1);
610 if (dmat->ref_count == 0) {
611 atomic_subtract_32(&tags_total, 1);
612 free(dmat, M_DEVBUF);
614 * Last reference count, so
615 * release our reference
616 * count on our parent.
624 CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
629 allocate_bz_and_pages(bus_dma_tag_t dmat, bus_dmamap_t mapp)
631 struct bounce_zone *bz;
635 if (dmat->bounce_zone == NULL)
636 if ((error = alloc_bounce_zone(dmat)) != 0)
638 bz = dmat->bounce_zone;
639 /* Initialize the new map */
640 STAILQ_INIT(&(mapp->bpages));
643 * Attempt to add pages to our pool on a per-instance basis up to a sane
644 * limit. Even if the tag isn't flagged as COULD_BOUNCE due to
645 * alignment and boundary constraints, it could still auto-bounce due to
646 * cacheline alignment, which requires at most two bounce pages.
648 if (dmat->flags & BUS_DMA_COULD_BOUNCE)
649 maxpages = MAX_BPAGES;
651 maxpages = 2 * bz->map_count;
652 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 ||
653 (bz->map_count > 0 && bz->total_bpages < maxpages)) {
656 pages = atop(roundup2(dmat->maxsize, PAGE_SIZE)) + 1;
657 pages = MIN(maxpages - bz->total_bpages, pages);
658 pages = MAX(pages, 2);
659 if (alloc_bounce_pages(dmat, pages) < pages)
662 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0)
663 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
670 allocate_map(bus_dma_tag_t dmat, int mflags)
672 int mapsize, segsize;
676 * Allocate the map. The map structure ends with an embedded
677 * variable-sized array of sync_list structures. Following that
678 * we allocate enough extra space to hold the array of bus_dma_segments.
680 KASSERT(dmat->nsegments <= MAX_DMA_SEGMENTS,
681 ("cannot allocate %u dma segments (max is %u)",
682 dmat->nsegments, MAX_DMA_SEGMENTS));
683 segsize = sizeof(struct bus_dma_segment) * dmat->nsegments;
684 mapsize = sizeof(*map) + sizeof(struct sync_list) * dmat->nsegments;
685 map = malloc(mapsize + segsize, M_DEVBUF, mflags | M_ZERO);
687 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
690 map->segments = (bus_dma_segment_t *)((uintptr_t)map + mapsize);
695 * Allocate a handle for mapping from kva/uva/physical
696 * address space into bus device space.
699 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
704 *mapp = map = allocate_map(dmat, M_NOWAIT);
706 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
711 * Bouncing might be required if the driver asks for an exclusion
712 * region, a data alignment that is stricter than 1, or DMA that begins
713 * or ends with a partial cacheline. Whether bouncing will actually
714 * happen can't be known until mapping time, but we need to pre-allocate
715 * resources now because we might not be allowed to at mapping time.
717 error = allocate_bz_and_pages(dmat, map);
723 if (map->flags & DMAMAP_COHERENT)
724 atomic_add_32(&maps_coherent, 1);
725 atomic_add_32(&maps_total, 1);
732 * Destroy a handle for mapping from kva/uva/physical
733 * address space into bus device space.
736 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
738 if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
739 CTR3(KTR_BUSDMA, "%s: tag %p error %d",
740 __func__, dmat, EBUSY);
743 if (dmat->bounce_zone)
744 dmat->bounce_zone->map_count--;
745 if (map->flags & DMAMAP_COHERENT)
746 atomic_subtract_32(&maps_coherent, 1);
747 atomic_subtract_32(&maps_total, 1);
750 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
756 * Allocate a piece of memory that can be efficiently mapped into
757 * bus device space based on the constraints lited in the dma tag.
758 * A dmamap to for use with dmamap_load is also allocated.
761 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
764 busdma_bufalloc_t ba;
765 struct busdma_bufzone *bufzone;
767 vm_memattr_t memattr;
770 if (flags & BUS_DMA_NOWAIT)
774 if (flags & BUS_DMA_ZERO)
777 *mapp = map = allocate_map(dmat, mflags);
779 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
780 __func__, dmat, dmat->flags, ENOMEM);
783 map->flags = DMAMAP_DMAMEM_ALLOC;
785 /* Choose a busdma buffer allocator based on memory type flags. */
786 if (flags & BUS_DMA_COHERENT) {
787 memattr = VM_MEMATTR_UNCACHEABLE;
788 ba = coherent_allocator;
789 map->flags |= DMAMAP_COHERENT;
791 memattr = VM_MEMATTR_DEFAULT;
792 ba = standard_allocator;
796 * Try to find a bufzone in the allocator that holds a cache of buffers
797 * of the right size for this request. If the buffer is too big to be
798 * held in the allocator cache, this returns NULL.
800 bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
803 * Allocate the buffer from the uma(9) allocator if...
804 * - It's small enough to be in the allocator (bufzone not NULL).
805 * - The alignment constraint isn't larger than the allocation size
806 * (the allocator aligns buffers to their size boundaries).
807 * - There's no need to handle lowaddr/highaddr exclusion zones.
808 * else allocate non-contiguous pages if...
809 * - The page count that could get allocated doesn't exceed nsegments.
810 * - The alignment constraint isn't larger than a page boundary.
811 * - There are no boundary-crossing constraints.
812 * else allocate a block of contiguous pages because one or more of the
813 * constraints is something that only the contig allocator can fulfill.
815 if (bufzone != NULL && dmat->alignment <= bufzone->size &&
816 !exclusion_bounce(dmat)) {
817 *vaddr = uma_zalloc(bufzone->umazone, mflags);
818 } else if (dmat->nsegments >= btoc(dmat->maxsize) &&
819 dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) {
820 *vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
821 mflags, 0, dmat->lowaddr, memattr);
823 *vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize,
824 mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,
829 if (*vaddr == NULL) {
830 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
831 __func__, dmat, dmat->flags, ENOMEM);
836 if (map->flags & DMAMAP_COHERENT)
837 atomic_add_32(&maps_coherent, 1);
838 atomic_add_32(&maps_dmamem, 1);
839 atomic_add_32(&maps_total, 1);
842 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
843 __func__, dmat, dmat->flags, 0);
848 * Free a piece of memory and it's allociated dmamap, that was allocated
849 * via bus_dmamem_alloc. Make the same choice for free/contigfree.
852 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
854 struct busdma_bufzone *bufzone;
855 busdma_bufalloc_t ba;
857 if (map->flags & DMAMAP_COHERENT)
858 ba = coherent_allocator;
860 ba = standard_allocator;
862 bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
864 if (bufzone != NULL && dmat->alignment <= bufzone->size &&
865 !exclusion_bounce(dmat))
866 uma_zfree(bufzone->umazone, vaddr);
868 kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
871 if (map->flags & DMAMAP_COHERENT)
872 atomic_subtract_32(&maps_coherent, 1);
873 atomic_subtract_32(&maps_total, 1);
874 atomic_subtract_32(&maps_dmamem, 1);
876 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
880 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
881 bus_size_t buflen, int flags)
886 if (map->pagesneeded == 0) {
887 CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d"
888 " map= %p, pagesneeded= %d",
889 dmat->lowaddr, dmat->boundary, dmat->alignment,
890 map, map->pagesneeded);
892 * Count the number of bounce pages
893 * needed in order to complete this transfer
896 while (buflen != 0) {
897 sgsize = MIN(buflen, dmat->maxsegsz);
898 if (must_bounce(dmat, map, curaddr, sgsize) != 0) {
899 sgsize = MIN(sgsize, PAGE_SIZE);
905 CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded);
910 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
911 void *buf, bus_size_t buflen, int flags)
914 vm_offset_t vendaddr;
917 if (map->pagesneeded == 0) {
918 CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d"
919 " map= %p, pagesneeded= %d",
920 dmat->lowaddr, dmat->boundary, dmat->alignment,
921 map, map->pagesneeded);
923 * Count the number of bounce pages
924 * needed in order to complete this transfer
926 vaddr = (vm_offset_t)buf;
927 vendaddr = (vm_offset_t)buf + buflen;
929 while (vaddr < vendaddr) {
930 if (__predict_true(map->pmap == kernel_pmap))
931 paddr = pmap_kextract(vaddr);
933 paddr = pmap_extract(map->pmap, vaddr);
934 if (must_bounce(dmat, map, paddr,
935 min(vendaddr - vaddr, (PAGE_SIZE - ((vm_offset_t)vaddr &
936 PAGE_MASK)))) != 0) {
939 vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK));
942 CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded);
947 _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
950 /* Reserve Necessary Bounce Pages */
951 mtx_lock(&bounce_lock);
952 if (flags & BUS_DMA_NOWAIT) {
953 if (reserve_bounce_pages(dmat, map, 0) != 0) {
954 map->pagesneeded = 0;
955 mtx_unlock(&bounce_lock);
959 if (reserve_bounce_pages(dmat, map, 1) != 0) {
960 /* Queue us for resources */
961 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
962 mtx_unlock(&bounce_lock);
963 return (EINPROGRESS);
966 mtx_unlock(&bounce_lock);
972 * Add a single contiguous physical range to the segment list.
975 _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
976 bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
978 bus_addr_t baddr, bmask;
982 * Make sure we don't cross any boundaries.
984 bmask = ~(dmat->boundary - 1);
985 if (dmat->boundary > 0) {
986 baddr = (curaddr + dmat->boundary) & bmask;
987 if (sgsize > (baddr - curaddr))
988 sgsize = (baddr - curaddr);
992 struct arm32_dma_range *dr;
994 dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges,
997 _bus_dmamap_unload(dmat, map);
1001 * In a valid DMA range. Translate the physical
1002 * memory address to an address in the DMA window.
1004 curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
1008 * Insert chunk into a segment, coalescing with
1009 * previous segment if possible.
1014 segs[seg].ds_addr = curaddr;
1015 segs[seg].ds_len = sgsize;
1017 if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
1018 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
1019 (dmat->boundary == 0 ||
1020 (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
1021 segs[seg].ds_len += sgsize;
1023 if (++seg >= dmat->nsegments)
1025 segs[seg].ds_addr = curaddr;
1026 segs[seg].ds_len = sgsize;
1034 * Utility function to load a physical buffer. segp contains
1035 * the starting segment on entrace, and the ending segment on exit.
1038 _bus_dmamap_load_phys(bus_dma_tag_t dmat,
1040 vm_paddr_t buf, bus_size_t buflen,
1042 bus_dma_segment_t *segs,
1050 segs = map->segments;
1052 counter_u64_add(maploads_total, 1);
1053 counter_u64_add(maploads_physmem, 1);
1055 if (might_bounce(dmat, map, buflen, buflen)) {
1056 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
1057 if (map->pagesneeded != 0) {
1058 counter_u64_add(maploads_bounced, 1);
1059 error = _bus_dmamap_reserve_pages(dmat, map, flags);
1065 while (buflen > 0) {
1067 sgsize = MIN(buflen, dmat->maxsegsz);
1068 if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr,
1070 sgsize = MIN(sgsize, PAGE_SIZE);
1071 curaddr = add_bounce_page(dmat, map, 0, curaddr,
1074 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
1086 _bus_dmamap_unload(dmat, map);
1087 return (EFBIG); /* XXX better return value here? */
1093 _bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
1094 struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
1095 bus_dma_segment_t *segs, int *segp)
1098 return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags,
1103 * Utility function to load a linear buffer. segp contains
1104 * the starting segment on entrace, and the ending segment on exit.
1107 _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
1109 void *buf, bus_size_t buflen,
1112 bus_dma_segment_t *segs,
1118 struct sync_list *sl;
1121 counter_u64_add(maploads_total, 1);
1122 if (map->flags & DMAMAP_COHERENT)
1123 counter_u64_add(maploads_coherent, 1);
1124 if (map->flags & DMAMAP_DMAMEM_ALLOC)
1125 counter_u64_add(maploads_dmamem, 1);
1128 segs = map->segments;
1130 if (flags & BUS_DMA_LOAD_MBUF) {
1131 counter_u64_add(maploads_mbuf, 1);
1132 map->flags |= DMAMAP_MBUF;
1137 if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) {
1138 _bus_dmamap_count_pages(dmat, map, buf, buflen, flags);
1139 if (map->pagesneeded != 0) {
1140 counter_u64_add(maploads_bounced, 1);
1141 error = _bus_dmamap_reserve_pages(dmat, map, flags);
1148 vaddr = (vm_offset_t)buf;
1150 while (buflen > 0) {
1152 * Get the physical address for this segment.
1154 if (__predict_true(map->pmap == kernel_pmap))
1155 curaddr = pmap_kextract(vaddr);
1157 curaddr = pmap_extract(map->pmap, vaddr);
1160 * Compute the segment size, and adjust counts.
1162 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
1163 if (sgsize > dmat->maxsegsz)
1164 sgsize = dmat->maxsegsz;
1165 if (buflen < sgsize)
1168 if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr,
1170 curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
1173 sl = &map->slist[map->sync_count - 1];
1174 if (map->sync_count == 0 ||
1176 curaddr != sl->busaddr + sl->datacount ||
1178 vaddr != sl->vaddr + sl->datacount) {
1179 if (++map->sync_count > dmat->nsegments)
1183 sl->datacount = sgsize;
1184 sl->busaddr = curaddr;
1186 sl->datacount += sgsize;
1188 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
1201 _bus_dmamap_unload(dmat, map);
1202 return (EFBIG); /* XXX better return value here? */
1209 __bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
1210 struct memdesc *mem, bus_dmamap_callback_t *callback,
1216 map->callback = callback;
1217 map->callback_arg = callback_arg;
1221 _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
1222 bus_dma_segment_t *segs, int nsegs, int error)
1226 segs = map->segments;
1231 * Release the mapping held by map.
1234 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
1236 struct bounce_page *bpage;
1237 struct bounce_zone *bz;
1239 if ((bz = dmat->bounce_zone) != NULL) {
1240 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
1241 STAILQ_REMOVE_HEAD(&map->bpages, links);
1242 free_bounce_page(dmat, bpage);
1245 bz = dmat->bounce_zone;
1246 bz->free_bpages += map->pagesreserved;
1247 bz->reserved_bpages -= map->pagesreserved;
1248 map->pagesreserved = 0;
1249 map->pagesneeded = 0;
1251 map->sync_count = 0;
1252 map->flags &= ~DMAMAP_MBUF;
1255 #ifdef notyetbounceuser
1256 /* If busdma uses user pages, then the interrupt handler could
1257 * be use the kernel vm mapping. Both bounce pages and sync list
1258 * do not cross page boundaries.
1259 * Below is a rough sequence that a person would do to fix the
1260 * user page reference in the kernel vmspace. This would be
1261 * done in the dma post routine.
1264 _bus_dmamap_fix_user(vm_offset_t buf, bus_size_t len,
1265 pmap_t pmap, int op)
1272 * each synclist entry is contained within a single page.
1273 * this would be needed if BUS_DMASYNC_POSTxxxx was implemented
1275 curaddr = pmap_extract(pmap, buf);
1276 va = pmap_dma_map(curaddr);
1279 cpu_dcache_wb_range(va, sgsize);
1282 case SYNC_USER_COPYTO:
1283 bcopy((void *)va, (void *)bounce, sgsize);
1286 case SYNC_USER_COPYFROM:
1287 bcopy((void *) bounce, (void *)va, sgsize);
1299 #define l2cache_wb_range(va, pa, size) cpu_l2cache_wb_range(pa, size)
1300 #define l2cache_wbinv_range(va, pa, size) cpu_l2cache_wbinv_range(pa, size)
1301 #define l2cache_inv_range(va, pa, size) cpu_l2cache_inv_range(pa, size)
1303 #define l2cache_wb_range(va, pa, size) cpu_l2cache_wb_range(va, size)
1304 #define l2cache_wbinv_range(va, pa, size) cpu_l2cache_wbinv_range(va, size)
1305 #define l2cache_inv_range(va, pa, size) cpu_l2cache_inv_range(va, size)
1309 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1311 struct bounce_page *bpage;
1312 struct sync_list *sl, *end;
1314 * If the buffer was from user space, it is possible that this is not
1315 * the same vm map, especially on a POST operation. It's not clear that
1316 * dma on userland buffers can work at all right now. To be safe, until
1317 * we're able to test direct userland dma, panic on a map mismatch.
1319 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
1320 if (!pmap_dmap_iscurrent(map->pmap))
1321 panic("_bus_dmamap_sync: wrong user map for bounce sync.");
1323 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
1324 "performing bounce", __func__, dmat, dmat->flags, op);
1327 * For PREWRITE do a writeback. Clean the caches from the
1328 * innermost to the outermost levels.
1330 if (op & BUS_DMASYNC_PREWRITE) {
1331 while (bpage != NULL) {
1332 if (bpage->datavaddr != 0)
1333 bcopy((void *)bpage->datavaddr,
1334 (void *)bpage->vaddr,
1337 physcopyout(bpage->dataaddr,
1338 (void *)bpage->vaddr,
1340 cpu_dcache_wb_range((vm_offset_t)bpage->vaddr,
1342 l2cache_wb_range((vm_offset_t)bpage->vaddr,
1343 (vm_offset_t)bpage->busaddr,
1345 bpage = STAILQ_NEXT(bpage, links);
1347 dmat->bounce_zone->total_bounced++;
1351 * Do an invalidate for PREREAD unless a writeback was already
1352 * done above due to PREWRITE also being set. The reason for a
1353 * PREREAD invalidate is to prevent dirty lines currently in the
1354 * cache from being evicted during the DMA. If a writeback was
1355 * done due to PREWRITE also being set there will be no dirty
1356 * lines and the POSTREAD invalidate handles the rest. The
1357 * invalidate is done from the innermost to outermost level. If
1358 * L2 were done first, a dirty cacheline could be automatically
1359 * evicted from L1 before we invalidated it, re-dirtying the L2.
1361 if ((op & BUS_DMASYNC_PREREAD) && !(op & BUS_DMASYNC_PREWRITE)) {
1362 bpage = STAILQ_FIRST(&map->bpages);
1363 while (bpage != NULL) {
1364 cpu_dcache_inv_range((vm_offset_t)bpage->vaddr,
1366 l2cache_inv_range((vm_offset_t)bpage->vaddr,
1367 (vm_offset_t)bpage->busaddr,
1369 bpage = STAILQ_NEXT(bpage, links);
1374 * Re-invalidate the caches on a POSTREAD, even though they were
1375 * already invalidated at PREREAD time. Aggressive prefetching
1376 * due to accesses to other data near the dma buffer could have
1377 * brought buffer data into the caches which is now stale. The
1378 * caches are invalidated from the outermost to innermost; the
1379 * prefetches could be happening right now, and if L1 were
1380 * invalidated first, stale L2 data could be prefetched into L1.
1382 if (op & BUS_DMASYNC_POSTREAD) {
1383 while (bpage != NULL) {
1384 l2cache_inv_range((vm_offset_t)bpage->vaddr,
1385 (vm_offset_t)bpage->busaddr,
1387 cpu_dcache_inv_range((vm_offset_t)bpage->vaddr,
1389 if (bpage->datavaddr != 0)
1390 bcopy((void *)bpage->vaddr,
1391 (void *)bpage->datavaddr,
1394 physcopyin((void *)bpage->vaddr,
1397 bpage = STAILQ_NEXT(bpage, links);
1399 dmat->bounce_zone->total_bounced++;
1404 * For COHERENT memory no cache maintenance is necessary, but ensure all
1405 * writes have reached memory for the PREWRITE case. No action is
1406 * needed for a PREREAD without PREWRITE also set, because that would
1407 * imply that the cpu had written to the COHERENT buffer and expected
1408 * the dma device to see that change, and by definition a PREWRITE sync
1409 * is required to make that happen.
1411 if (map->flags & DMAMAP_COHERENT) {
1412 if (op & BUS_DMASYNC_PREWRITE) {
1414 cpu_l2cache_drain_writebuf();
1420 * Cache maintenance for normal (non-COHERENT non-bounce) buffers. All
1421 * the comments about the sequences for flushing cache levels in the
1422 * bounce buffer code above apply here as well. In particular, the fact
1423 * that the sequence is inner-to-outer for PREREAD invalidation and
1424 * outer-to-inner for POSTREAD invalidation is not a mistake.
1426 if (map->sync_count != 0) {
1427 if (!pmap_dmap_iscurrent(map->pmap))
1428 panic("_bus_dmamap_sync: wrong user map for sync.");
1430 sl = &map->slist[0];
1431 end = &map->slist[map->sync_count];
1432 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
1433 "performing sync", __func__, dmat, dmat->flags, op);
1436 case BUS_DMASYNC_PREWRITE:
1437 case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD:
1439 cpu_dcache_wb_range(sl->vaddr, sl->datacount);
1440 l2cache_wb_range(sl->vaddr, sl->busaddr,
1446 case BUS_DMASYNC_PREREAD:
1448 * An mbuf may start in the middle of a cacheline. There
1449 * will be no cpu writes to the beginning of that line
1450 * (which contains the mbuf header) while dma is in
1451 * progress. Handle that case by doing a writeback of
1452 * just the first cacheline before invalidating the
1453 * overall buffer. Any mbuf in a chain may have this
1454 * misalignment. Buffers which are not mbufs bounce if
1455 * they are not aligned to a cacheline.
1458 if (sl->vaddr & arm_dcache_align_mask) {
1459 KASSERT(map->flags & DMAMAP_MBUF,
1460 ("unaligned buffer is not an mbuf"));
1461 cpu_dcache_wb_range(sl->vaddr, 1);
1462 l2cache_wb_range(sl->vaddr,
1465 cpu_dcache_inv_range(sl->vaddr, sl->datacount);
1466 l2cache_inv_range(sl->vaddr, sl->busaddr,
1472 case BUS_DMASYNC_POSTWRITE:
1475 case BUS_DMASYNC_POSTREAD:
1476 case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE:
1478 l2cache_inv_range(sl->vaddr, sl->busaddr,
1480 cpu_dcache_inv_range(sl->vaddr, sl->datacount);
1486 panic("unsupported combination of sync operations: 0x%08x\n", op);
1493 init_bounce_pages(void *dummy __unused)
1497 STAILQ_INIT(&bounce_zone_list);
1498 STAILQ_INIT(&bounce_map_waitinglist);
1499 STAILQ_INIT(&bounce_map_callbacklist);
1500 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
1502 SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
1504 static struct sysctl_ctx_list *
1505 busdma_sysctl_tree(struct bounce_zone *bz)
1508 return (&bz->sysctl_tree);
1511 static struct sysctl_oid *
1512 busdma_sysctl_tree_top(struct bounce_zone *bz)
1515 return (bz->sysctl_tree_top);
1519 alloc_bounce_zone(bus_dma_tag_t dmat)
1521 struct bounce_zone *bz;
1523 /* Check to see if we already have a suitable zone */
1524 STAILQ_FOREACH(bz, &bounce_zone_list, links) {
1525 if ((dmat->alignment <= bz->alignment) &&
1526 (dmat->lowaddr >= bz->lowaddr)) {
1527 dmat->bounce_zone = bz;
1532 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
1533 M_NOWAIT | M_ZERO)) == NULL)
1536 STAILQ_INIT(&bz->bounce_page_list);
1537 bz->free_bpages = 0;
1538 bz->reserved_bpages = 0;
1539 bz->active_bpages = 0;
1540 bz->lowaddr = dmat->lowaddr;
1541 bz->alignment = MAX(dmat->alignment, PAGE_SIZE);
1543 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1545 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1546 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1547 dmat->bounce_zone = bz;
1549 sysctl_ctx_init(&bz->sysctl_tree);
1550 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
1551 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1553 if (bz->sysctl_tree_top == NULL) {
1554 sysctl_ctx_free(&bz->sysctl_tree);
1555 return (0); /* XXX error code? */
1558 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1559 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1560 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1561 "Total bounce pages");
1562 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1563 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1564 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1565 "Free bounce pages");
1566 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1567 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1568 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1569 "Reserved bounce pages");
1570 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1571 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1572 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1573 "Active bounce pages");
1574 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1575 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1576 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1577 "Total bounce requests (pages bounced)");
1578 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1579 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1580 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1581 "Total bounce requests that were deferred");
1582 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
1583 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1584 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1585 SYSCTL_ADD_ULONG(busdma_sysctl_tree(bz),
1586 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1587 "alignment", CTLFLAG_RD, &bz->alignment, "");
1593 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1595 struct bounce_zone *bz;
1598 bz = dmat->bounce_zone;
1600 while (numpages > 0) {
1601 struct bounce_page *bpage;
1603 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
1608 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
1609 M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0);
1610 if (bpage->vaddr == 0) {
1611 free(bpage, M_DEVBUF);
1614 bpage->busaddr = pmap_kextract(bpage->vaddr);
1615 mtx_lock(&bounce_lock);
1616 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1620 mtx_unlock(&bounce_lock);
1628 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1630 struct bounce_zone *bz;
1633 mtx_assert(&bounce_lock, MA_OWNED);
1634 bz = dmat->bounce_zone;
1635 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1636 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1637 return (map->pagesneeded - (map->pagesreserved + pages));
1638 bz->free_bpages -= pages;
1639 bz->reserved_bpages += pages;
1640 map->pagesreserved += pages;
1641 pages = map->pagesneeded - map->pagesreserved;
1647 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1648 bus_addr_t addr, bus_size_t size)
1650 struct bounce_zone *bz;
1651 struct bounce_page *bpage;
1653 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1654 KASSERT(map != NULL,
1655 ("add_bounce_page: bad map %p", map));
1657 bz = dmat->bounce_zone;
1658 if (map->pagesneeded == 0)
1659 panic("add_bounce_page: map doesn't need any pages");
1662 if (map->pagesreserved == 0)
1663 panic("add_bounce_page: map doesn't need any pages");
1664 map->pagesreserved--;
1666 mtx_lock(&bounce_lock);
1667 bpage = STAILQ_FIRST(&bz->bounce_page_list);
1669 panic("add_bounce_page: free page list is empty");
1671 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1672 bz->reserved_bpages--;
1673 bz->active_bpages++;
1674 mtx_unlock(&bounce_lock);
1676 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1677 /* Page offset needs to be preserved. */
1678 bpage->vaddr |= addr & PAGE_MASK;
1679 bpage->busaddr |= addr & PAGE_MASK;
1681 bpage->datavaddr = vaddr;
1682 bpage->dataaddr = addr;
1683 bpage->datacount = size;
1684 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1685 return (bpage->busaddr);
1689 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1691 struct bus_dmamap *map;
1692 struct bounce_zone *bz;
1694 bz = dmat->bounce_zone;
1695 bpage->datavaddr = 0;
1696 bpage->datacount = 0;
1697 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1699 * Reset the bounce page to start at offset 0. Other uses
1700 * of this bounce page may need to store a full page of
1701 * data and/or assume it starts on a page boundary.
1703 bpage->vaddr &= ~PAGE_MASK;
1704 bpage->busaddr &= ~PAGE_MASK;
1707 mtx_lock(&bounce_lock);
1708 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1710 bz->active_bpages--;
1711 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1712 if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1713 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1714 STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1716 busdma_swi_pending = 1;
1717 bz->total_deferred++;
1718 swi_sched(vm_ih, 0);
1721 mtx_unlock(&bounce_lock);
1728 struct bus_dmamap *map;
1730 mtx_lock(&bounce_lock);
1731 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1732 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1733 mtx_unlock(&bounce_lock);
1735 dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_LOCK);
1736 bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
1737 map->callback_arg, BUS_DMA_WAITOK);
1738 dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_UNLOCK);
1739 mtx_lock(&bounce_lock);
1741 mtx_unlock(&bounce_lock);