2 * Copyright (c) 2002 Peter Grehan
3 * Copyright (c) 1997, 1998 Justin T. Gibbs.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification, immediately at the beginning of the file.
12 * 2. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 * Bus dma support routines
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/malloc.h>
41 #include <sys/interrupt.h>
44 #include <sys/mutex.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_map.h>
52 #include <machine/atomic.h>
53 #include <machine/bus.h>
54 #include <machine/cpufunc.h>
62 bus_dma_filter_t *filter;
70 bus_dma_lock_t *lockfunc;
76 void *buf; /* unmapped buffer pointer */
77 bus_size_t buflen; /* unmapped buffer length */
78 bus_dmamap_callback_t *callback;
83 * Convenience function for manipulating driver locks from busdma (during
84 * busdma_swi, for example). Drivers that don't provide their own locks
85 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own
86 * non-mutex locking scheme don't have to use this at all.
89 busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
93 dmtx = (struct mtx *)arg;
102 panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
107 * dflt_lock should never get called. It gets put into the dma tag when
108 * lockfunc == NULL, which is only valid if the maps that are associated
109 * with the tag are meant to never be defered.
110 * XXX Should have a way to identify which driver is responsible here.
113 dflt_lock(void *arg, bus_dma_lock_op_t op)
116 panic("driver error: busdma dflt_lock called");
118 printf("DRIVER_ERROR: busdma dflt_lock called\n");
123 * Allocate a device specific dma_tag.
126 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
127 bus_size_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
128 bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
129 int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
130 void *lockfuncarg, bus_dma_tag_t *dmat)
132 bus_dma_tag_t newtag;
135 /* Return a NULL tag on failure */
138 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
142 newtag->parent = parent;
143 newtag->alignment = alignment;
144 newtag->boundary = boundary;
145 newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
146 newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
147 newtag->filter = filter;
148 newtag->filterarg = filterarg;
149 newtag->maxsize = maxsize;
150 newtag->nsegments = nsegments;
151 newtag->maxsegsz = maxsegsz;
152 newtag->flags = flags;
153 newtag->ref_count = 1; /* Count ourself */
154 newtag->map_count = 0;
155 if (lockfunc != NULL) {
156 newtag->lockfunc = lockfunc;
157 newtag->lockfuncarg = lockfuncarg;
159 newtag->lockfunc = dflt_lock;
160 newtag->lockfuncarg = NULL;
164 * Take into account any restrictions imposed by our parent tag
166 if (parent != NULL) {
167 newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
168 newtag->highaddr = max(parent->highaddr, newtag->highaddr);
169 if (newtag->boundary == 0)
170 newtag->boundary = parent->boundary;
171 else if (parent->boundary != 0)
172 newtag->boundary = MIN(parent->boundary,
174 if (newtag->filter == NULL) {
176 * Short circuit looking at our parent directly
177 * since we have encapsulated all of its information
179 newtag->filter = parent->filter;
180 newtag->filterarg = parent->filterarg;
181 newtag->parent = parent->parent;
183 if (newtag->parent != NULL)
184 atomic_add_int(&parent->ref_count, 1);
192 bus_dma_tag_destroy(bus_dma_tag_t dmat)
196 if (dmat->map_count != 0)
199 while (dmat != NULL) {
200 bus_dma_tag_t parent;
202 parent = dmat->parent;
203 atomic_subtract_int(&dmat->ref_count, 1);
204 if (dmat->ref_count == 0) {
205 free(dmat, M_DEVBUF);
207 * Last reference count, so
208 * release our reference
209 * count on our parent.
220 * Allocate a handle for mapping from kva/uva/physical
221 * address space into bus device space.
224 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
233 * Destroy a handle for mapping from kva/uva/physical
234 * address space into bus device space.
237 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
240 panic("dmamap_destroy: NULL?\n");
247 * Allocate a piece of memory that can be efficiently mapped into
248 * bus device space based on the constraints lited in the dma tag.
249 * A dmamap to for use with dmamap_load is also allocated.
252 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
257 if (flags & BUS_DMA_NOWAIT)
261 if (flags & BUS_DMA_ZERO)
268 * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact
269 * alignment guarantees of malloc need to be nailed down, and the
270 * code below should be rewritten to take that into account.
272 * In the meantime, we'll return an error if malloc gets it wrong.
274 if (dmat->maxsize <= PAGE_SIZE &&
275 dmat->alignment < dmat->maxsize) {
276 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
279 * XXX Use Contigmalloc until it is merged into this facility
280 * and handles multi-seg allocations. Nobody is doing
281 * multi-seg allocations yet though.
283 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
284 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
291 if ((uintptr_t)*vaddr % dmat->alignment)
292 printf("XXX: %s: alignment not respected!\n", __func__);
298 * Free a piece of memory and it's allocated dmamap, that was allocated
299 * via bus_dmamem_alloc. Make the same choice for free/contigfree.
302 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
305 panic("bus_dmamem_free: Invalid map freed\n");
306 if (dmat->maxsize <= PAGE_SIZE &&
307 dmat->alignment < dmat->maxsize)
308 free(vaddr, M_DEVBUF);
310 contigfree(vaddr, dmat->maxsize, M_DEVBUF);
314 * Utility function to load a linear buffer. lastaddrp holds state
315 * between invocations (for multiple-buffer loads). segp contains
316 * the starting segment on entrance, and the ending segment on exit.
317 * first indicates if this is the first invocation of this function.
320 bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
321 void *buf, bus_size_t buflen, struct thread *td, int flags,
322 vm_offset_t *lastaddrp, int *segp, int first)
325 bus_addr_t curaddr, lastaddr, baddr, bmask;
326 vm_offset_t vaddr = (vm_offset_t)buf;
331 pmap = vmspace_pmap(td->td_proc->p_vmspace);
335 lastaddr = *lastaddrp;
336 bmask = ~(dmat->boundary - 1);
338 for (seg = *segp; buflen > 0 ; ) {
340 * Get the physical address for this segment.
343 curaddr = pmap_extract(pmap, vaddr);
345 curaddr = pmap_kextract(vaddr);
348 * Compute the segment size, and adjust counts.
350 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
351 if (sgsize > dmat->maxsegsz)
352 sgsize = dmat->maxsegsz;
357 * Make sure we don't cross any boundaries.
359 if (dmat->boundary > 0) {
360 baddr = (curaddr + dmat->boundary) & bmask;
361 if (sgsize > (baddr - curaddr))
362 sgsize = (baddr - curaddr);
366 * Insert chunk into a segment, coalescing with
367 * the previous segment if possible.
370 segs[seg].ds_addr = curaddr;
371 segs[seg].ds_len = sgsize;
374 if (curaddr == lastaddr &&
375 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
376 (dmat->boundary == 0 ||
377 (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
378 segs[seg].ds_len += sgsize;
380 if (++seg >= dmat->nsegments)
382 segs[seg].ds_addr = curaddr;
383 segs[seg].ds_len = sgsize;
387 lastaddr = curaddr + sgsize;
393 *lastaddrp = lastaddr;
398 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
402 * Map the buffer buf into bus space using the dmamap map.
405 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
406 bus_size_t buflen, bus_dmamap_callback_t *callback,
407 void *callback_arg, int flags)
409 #ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
410 bus_dma_segment_t dm_segments[dmat->nsegments];
412 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
414 vm_offset_t lastaddr;
418 panic("bus_dmamap_load: Invalid map\n");
420 lastaddr = (vm_offset_t)0;
422 error = bus_dmamap_load_buffer(dmat, dm_segments, buf, buflen,
423 NULL, flags, &lastaddr, &nsegs, 1);
426 (*callback)(callback_arg, dm_segments, nsegs + 1, 0);
428 (*callback)(callback_arg, NULL, 0, error);
434 * Like bus_dmamap_load(), but for mbufs.
437 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
438 bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
440 #ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
441 bus_dma_segment_t dm_segments[dmat->nsegments];
443 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
445 int nsegs = 0, error = 0;
449 if (m0->m_pkthdr.len <= dmat->maxsize) {
451 vm_offset_t lastaddr = 0;
454 for (m = m0; m != NULL && error == 0; m = m->m_next) {
456 error = bus_dmamap_load_buffer(dmat,
457 dm_segments, m->m_data, m->m_len, NULL,
458 flags, &lastaddr, &nsegs, first);
468 * force "no valid mappings" on error in callback.
470 (*callback)(callback_arg, dm_segments, 0, 0, error);
472 (*callback)(callback_arg, dm_segments, nsegs+1,
473 m0->m_pkthdr.len, error);
479 bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
480 bus_dma_segment_t *segs, int *nsegs, int flags)
488 if (m0->m_pkthdr.len <= dmat->maxsize) {
490 vm_offset_t lastaddr = 0;
493 for (m = m0; m != NULL && error == 0; m = m->m_next) {
495 error = bus_dmamap_load_buffer(dmat,
496 segs, m->m_data, m->m_len, NULL,
497 flags, &lastaddr, nsegs, first);
510 * Like bus_dmamap_load(), but for uios.
513 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
514 bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
516 vm_offset_t lastaddr;
517 #ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
518 bus_dma_segment_t dm_segments[dmat->nsegments];
520 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
522 int nsegs, i, error, first;
525 struct thread *td = NULL;
527 resid = uio->uio_resid;
530 if (uio->uio_segflg == UIO_USERSPACE) {
533 ("bus_dmamap_load_uio: USERSPACE but no proc"));
538 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
540 * Now at the first iovec to load. Load each iovec
541 * until we have exhausted the residual count.
544 resid < iov[i].iov_len ? resid : iov[i].iov_len;
545 caddr_t addr = (caddr_t) iov[i].iov_base;
548 error = bus_dmamap_load_buffer(dmat, dm_segments, addr,
549 minlen, td, flags, &lastaddr, &nsegs, first);
559 * force "no valid mappings" on error in callback.
561 (*callback)(callback_arg, dm_segments, 0, 0, error);
563 (*callback)(callback_arg, dm_segments, nsegs+1,
564 uio->uio_resid, error);
571 * Release the mapping held by map. A no-op on PowerPC.
574 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
581 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)