2 * Copyright (c) 2006 Fill this file and put your name here
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * Copyright (c) 1997, 1998, 2001 The NetBSD Foundation, Inc.
32 * All rights reserved.
34 * This code is derived from software contributed to The NetBSD Foundation
35 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
36 * NASA Ames Research Center.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the NetBSD
49 * Foundation, Inc. and its contributors.
50 * 4. Neither the name of The NetBSD Foundation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
55 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
57 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64 * POSSIBILITY OF SUCH DAMAGE.
67 /* $NetBSD: bus_dma.c,v 1.17 2006/03/01 12:38:11 yamt Exp $ */
69 #include <sys/cdefs.h>
70 __FBSDID("$FreeBSD$");
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/malloc.h>
76 #include <sys/interrupt.h>
79 #include <sys/mutex.h>
83 #include <sys/kernel.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_map.h>
89 #include <machine/atomic.h>
90 #include <machine/bus.h>
91 #include <machine/cache.h>
92 #include <machine/cpufunc.h>
100 bus_dma_filter_t *filter;
108 bus_dma_lock_t *lockfunc;
110 /* XXX: machine-dependent fields */
111 vm_offset_t _physbase;
116 #define DMAMAP_LINEAR 0x1
117 #define DMAMAP_MBUF 0x2
118 #define DMAMAP_UIO 0x4
119 #define DMAMAP_ALLOCATED 0x10
120 #define DMAMAP_TYPE_MASK (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO)
121 #define DMAMAP_COHERENT 0x8
128 TAILQ_ENTRY(bus_dmamap) freelist;
132 static TAILQ_HEAD(,bus_dmamap) dmamap_freelist =
133 TAILQ_HEAD_INITIALIZER(dmamap_freelist);
135 #define BUSDMA_STATIC_MAPS 500
136 static struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS];
138 static struct mtx busdma_mtx;
140 MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF);
143 mips_dmamap_freelist_init(void *dummy)
147 for (i = 0; i < BUSDMA_STATIC_MAPS; i++)
148 TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist);
151 SYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, mips_dmamap_freelist_init, NULL);
154 * Check to see if the specified page is in an allowed DMA range.
158 bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
159 bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
160 int flags, vm_offset_t *lastaddrp, int *segp);
163 * Convenience function for manipulating driver locks from busdma (during
164 * busdma_swi, for example). Drivers that don't provide their own locks
165 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own
166 * non-mutex locking scheme don't have to use this at all.
169 busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
173 dmtx = (struct mtx *)arg;
182 panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
187 * dflt_lock should never get called. It gets put into the dma tag when
188 * lockfunc == NULL, which is only valid if the maps that are associated
189 * with the tag are meant to never be defered.
190 * XXX Should have a way to identify which driver is responsible here.
194 dflt_lock(void *arg, bus_dma_lock_op_t op)
197 panic("driver error: busdma dflt_lock called");
199 printf("DRIVER_ERROR: busdma dflt_lock called\n");
204 static __inline bus_dmamap_t
205 _busdma_alloc_dmamap(void)
209 mtx_lock(&busdma_mtx);
210 map = TAILQ_FIRST(&dmamap_freelist);
212 TAILQ_REMOVE(&dmamap_freelist, map, freelist);
213 mtx_unlock(&busdma_mtx);
215 map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO);
217 map->flags = DMAMAP_ALLOCATED;
224 _busdma_free_dmamap(bus_dmamap_t map)
226 if (map->flags & DMAMAP_ALLOCATED)
229 mtx_lock(&busdma_mtx);
230 TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist);
231 mtx_unlock(&busdma_mtx);
236 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
237 bus_size_t boundary, bus_addr_t lowaddr,
238 bus_addr_t highaddr, bus_dma_filter_t *filter,
239 void *filterarg, bus_size_t maxsize, int nsegments,
240 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
241 void *lockfuncarg, bus_dma_tag_t *dmat)
244 bus_dma_tag_t newtag;
247 /* Basic sanity checking */
248 if (boundary != 0 && boundary < maxsegsz)
251 /* Return a NULL tag on failure */
254 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
256 if (newtag == NULL) {
257 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
258 __func__, newtag, 0, error);
262 newtag->parent = parent;
263 newtag->alignment = alignment;
264 newtag->boundary = boundary;
265 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
266 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) +
268 newtag->filter = filter;
269 newtag->filterarg = filterarg;
270 newtag->maxsize = maxsize;
271 newtag->nsegments = nsegments;
272 newtag->maxsegsz = maxsegsz;
273 newtag->flags = flags;
274 newtag->ref_count = 1; /* Count ourself */
275 newtag->map_count = 0;
277 newtag->_physbase = 0;
278 /* XXXMIPS: Should we limit window size to amount of physical memory */
279 newtag->_wsize = MIPS_KSEG1_START - MIPS_KSEG0_START;
280 if (lockfunc != NULL) {
281 newtag->lockfunc = lockfunc;
282 newtag->lockfuncarg = lockfuncarg;
284 newtag->lockfunc = dflt_lock;
285 newtag->lockfuncarg = NULL;
288 /* Take into account any restrictions imposed by our parent tag */
289 if (parent != NULL) {
290 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
291 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
292 if (newtag->boundary == 0)
293 newtag->boundary = parent->boundary;
294 else if (parent->boundary != 0)
295 newtag->boundary = MIN(parent->boundary,
297 if (newtag->filter == NULL) {
299 * Short circuit looking at our parent directly
300 * since we have encapsulated all of its information
302 newtag->filter = parent->filter;
303 newtag->filterarg = parent->filterarg;
304 newtag->parent = parent->parent;
306 if (newtag->parent != NULL)
307 atomic_add_int(&parent->ref_count, 1);
311 free(newtag, M_DEVBUF);
315 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
316 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
325 bus_dma_tag_destroy(bus_dma_tag_t dmat)
328 bus_dma_tag_t dmat_copy = dmat;
333 if (dmat->map_count != 0)
336 while (dmat != NULL) {
337 bus_dma_tag_t parent;
339 parent = dmat->parent;
340 atomic_subtract_int(&dmat->ref_count, 1);
341 if (dmat->ref_count == 0) {
342 free(dmat, M_DEVBUF);
344 * Last reference count, so
345 * release our reference
346 * count on our parent.
353 CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy);
359 * Allocate a handle for mapping from kva/uva/physical
360 * address space into bus device space.
363 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
370 newmap = _busdma_alloc_dmamap();
371 if (newmap == NULL) {
372 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
379 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
380 __func__, dmat, dmat->flags, error);
387 * Destroy a handle for mapping from kva/uva/physical
388 * address space into bus device space.
391 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
393 _busdma_free_dmamap(map);
395 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
400 * Allocate a piece of memory that can be efficiently mapped into
401 * bus device space based on the constraints lited in the dma tag.
402 * A dmamap to for use with dmamap_load is also allocated.
405 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
408 bus_dmamap_t newmap = NULL;
412 if (flags & BUS_DMA_NOWAIT)
416 if (flags & BUS_DMA_ZERO)
419 newmap = _busdma_alloc_dmamap();
420 if (newmap == NULL) {
421 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
422 __func__, dmat, dmat->flags, ENOMEM);
429 if (dmat->maxsize <= PAGE_SIZE) {
430 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
433 * XXX Use Contigmalloc until it is merged into this facility
434 * and handles multi-seg allocations. Nobody is doing
435 * multi-seg allocations yet though.
438 if((uint32_t)dmat->lowaddr >= MIPS_KSEG0_LARGEST_PHYS) {
439 /* Note in the else case I just put in what was already
440 * being passed in dmat->lowaddr. I am not sure
441 * how this would have worked. Since lowaddr is in the
442 * max address postion. I would have thought that the
443 * caller would have wanted dmat->highaddr. That is
444 * presuming they are asking for physical addresses
445 * which is what contigmalloc takes. - RRS
447 maxphys = MIPS_KSEG0_LARGEST_PHYS - 1;
449 maxphys = dmat->lowaddr;
451 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
452 0ul, maxphys, dmat->alignment? dmat->alignment : 1ul,
455 if (*vaddr == NULL) {
456 if (newmap != NULL) {
457 _busdma_free_dmamap(newmap);
463 if (flags & BUS_DMA_COHERENT) {
464 void *tmpaddr = (void *)*vaddr;
467 tmpaddr = (void *)MIPS_PHYS_TO_KSEG1(vtophys(tmpaddr));
468 newmap->origbuffer = *vaddr;
469 newmap->allocbuffer = tmpaddr;
470 mips_dcache_wbinv_range((vm_offset_t)*vaddr,
474 newmap->origbuffer = newmap->allocbuffer = NULL;
476 newmap->origbuffer = newmap->allocbuffer = NULL;
482 * Free a piece of memory and it's allocated dmamap, that was allocated
483 * via bus_dmamem_alloc. Make the same choice for free/contigfree.
486 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
488 if (map->allocbuffer) {
489 KASSERT(map->allocbuffer == vaddr,
490 ("Trying to freeing the wrong DMA buffer"));
491 vaddr = map->origbuffer;
493 if (dmat->maxsize <= PAGE_SIZE)
494 free(vaddr, M_DEVBUF);
496 contigfree(vaddr, dmat->maxsize, M_DEVBUF);
499 _busdma_free_dmamap(map);
500 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
505 * Utility function to load a linear buffer. lastaddrp holds state
506 * between invocations (for multiple-buffer loads). segp contains
507 * the starting segment on entrance, and the ending segment on exit.
508 * first indicates if this is the first invocation of this function.
511 bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
512 bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
513 int flags, vm_offset_t *lastaddrp, int *segp)
517 vm_offset_t curaddr, lastaddr;
518 vm_offset_t vaddr = (vm_offset_t)buf;
522 lastaddr = *lastaddrp;
523 bmask = ~(dmat->boundary - 1);
525 for (seg = *segp; buflen > 0 ; ) {
527 * Get the physical address for this segment.
529 KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap"));
530 curaddr = pmap_kextract(vaddr);
533 * If we're beyond the current DMA window, indicate
534 * that and try to fall back onto something else.
536 if (curaddr < dmat->_physbase ||
537 curaddr >= (dmat->_physbase + dmat->_wsize))
541 * In a valid DMA range. Translate the physical
542 * memory address to an address in the DMA window.
544 curaddr = (curaddr - dmat->_physbase) + dmat->_wbase;
548 * Compute the segment size, and adjust counts.
550 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
555 * Insert chunk into a segment, coalescing with
556 * the previous segment if possible.
558 if (seg >= 0 && curaddr == lastaddr &&
559 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
560 (dmat->boundary == 0 ||
561 (segs[seg].ds_addr & bmask) ==
562 (curaddr & bmask))) {
563 segs[seg].ds_len += sgsize;
566 if (++seg >= dmat->nsegments)
568 segs[seg].ds_addr = curaddr;
569 segs[seg].ds_len = sgsize;
574 lastaddr = curaddr + sgsize;
580 *lastaddrp = lastaddr;
592 * Map the buffer buf into bus space using the dmamap map.
595 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
596 bus_size_t buflen, bus_dmamap_callback_t *callback,
597 void *callback_arg, int flags)
599 vm_offset_t lastaddr = 0;
600 int error, nsegs = -1;
601 #ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
602 bus_dma_segment_t dm_segments[dmat->nsegments];
604 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
607 KASSERT(dmat != NULL, ("dmatag is NULL"));
608 KASSERT(map != NULL, ("dmamap is NULL"));
609 map->flags &= ~DMAMAP_TYPE_MASK;
610 map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT;
613 error = bus_dmamap_load_buffer(dmat,
614 dm_segments, map, buf, buflen, kernel_pmap,
615 flags, &lastaddr, &nsegs);
618 (*callback)(callback_arg, NULL, 0, error);
620 (*callback)(callback_arg, dm_segments, nsegs + 1, error);
622 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
623 __func__, dmat, dmat->flags, nsegs + 1, error);
630 * Like bus_dmamap_load(), but for mbufs.
633 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
634 bus_dmamap_callback2_t *callback, void *callback_arg,
637 #ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
638 bus_dma_segment_t dm_segments[dmat->nsegments];
640 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
642 int nsegs = -1, error = 0;
646 map->flags &= ~DMAMAP_TYPE_MASK;
647 map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
651 if (m0->m_pkthdr.len <= dmat->maxsize) {
652 vm_offset_t lastaddr = 0;
655 for (m = m0; m != NULL && error == 0; m = m->m_next) {
657 error = bus_dmamap_load_buffer(dmat,
658 dm_segments, map, m->m_data, m->m_len,
659 pmap_kernel(), flags, &lastaddr, &nsegs);
660 map->len += m->m_len;
669 * force "no valid mappings" on error in callback.
671 (*callback)(callback_arg, dm_segments, 0, 0, error);
673 (*callback)(callback_arg, dm_segments, nsegs + 1,
674 m0->m_pkthdr.len, error);
676 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
677 __func__, dmat, dmat->flags, error, nsegs + 1);
683 bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
684 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
691 flags |= BUS_DMA_NOWAIT;
693 map->flags &= ~DMAMAP_TYPE_MASK;
694 map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
698 if (m0->m_pkthdr.len <= dmat->maxsize) {
699 vm_offset_t lastaddr = 0;
702 for (m = m0; m != NULL && error == 0; m = m->m_next) {
704 error = bus_dmamap_load_buffer(dmat, segs, map,
706 pmap_kernel(), flags, &lastaddr, nsegs);
707 map->len += m->m_len;
715 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
716 __func__, dmat, dmat->flags, error, *nsegs);
723 * Like bus_dmamap_load(), but for uios.
726 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
727 bus_dmamap_callback2_t *callback, void *callback_arg,
731 panic("Unimplemented %s at %s:%d\n", __func__, __FILE__, __LINE__);
736 * Release the mapping held by map.
739 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
746 bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
750 case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
751 mips_dcache_wbinv_range((vm_offset_t)buf, len);
754 case BUS_DMASYNC_PREREAD:
756 mips_dcache_wbinv_range((vm_offset_t)buf, len);
758 mips_dcache_inv_range((vm_offset_t)buf, len);
762 case BUS_DMASYNC_PREWRITE:
763 mips_dcache_wb_range((vm_offset_t)buf, len);
769 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
778 * Mixing PRE and POST operations is not allowed.
780 if ((op & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
781 (op & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
782 panic("_bus_dmamap_sync: mix PRE and POST");
785 * Since we're dealing with a virtually-indexed, write-back
786 * cache, we need to do the following things:
788 * PREREAD -- Invalidate D-cache. Note we might have
789 * to also write-back here if we have to use an Index
790 * op, or if the buffer start/end is not cache-line aligned.
792 * PREWRITE -- Write-back the D-cache. If we have to use
793 * an Index op, we also have to invalidate. Note that if
794 * we are doing PREREAD|PREWRITE, we can collapse everything
797 * POSTREAD -- Nothing.
799 * POSTWRITE -- Nothing.
803 * Flush the write buffer.
804 * XXX Is this always necessary?
808 op &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
812 CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
813 switch(map->flags & DMAMAP_TYPE_MASK) {
815 bus_dmamap_sync_buf(map->buffer, map->len, op);
821 bus_dmamap_sync_buf(m->m_data, m->m_len, op);
828 resid = uio->uio_resid;
829 for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) {
830 bus_size_t minlen = resid < iov[i].iov_len ? resid :
833 bus_dmamap_sync_buf(iov[i].iov_base, minlen, op);