2 * Copyright (c) 2015, 2019 Marcel Moolenaar
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <machine/bus.h>
32 #include <machine/bus_dma.h>
33 #include <machine/resource.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
40 #include <sys/queue.h>
47 #include <vm/vm_map.h>
49 #include <dev/proto/proto.h>
50 #include <dev/proto/proto_dev.h>
51 #include <dev/proto/proto_busdma.h>
53 MALLOC_DEFINE(M_PROTO_BUSDMA, "proto_busdma", "DMA management data");
55 #define BNDRY_MIN(a, b) \
56 (((a) == 0) ? (b) : (((b) == 0) ? (a) : MIN((a), (b))))
58 struct proto_callback_bundle {
59 struct proto_busdma *busdma;
61 struct proto_ioc_busdma *ioc;
65 proto_busdma_tag_create(struct proto_busdma *busdma, struct proto_tag *parent,
66 struct proto_ioc_busdma *ioc)
68 struct proto_tag *tag;
70 /* Make sure that when a boundary is specified, it's a power of 2 */
71 if (ioc->u.tag.bndry != 0 &&
72 (ioc->u.tag.bndry & (ioc->u.tag.bndry - 1)) != 0)
76 * If nsegs is 1, ignore maxsegsz. What this means is that if we have
77 * just 1 segment, then maxsz should be equal to maxsegsz. To keep it
78 * simple for us, limit maxsegsz to maxsz in any case.
80 if (ioc->u.tag.maxsegsz > ioc->u.tag.maxsz || ioc->u.tag.nsegs == 1)
81 ioc->u.tag.maxsegsz = ioc->u.tag.maxsz;
83 tag = malloc(sizeof(*tag), M_PROTO_BUSDMA, M_WAITOK | M_ZERO);
86 LIST_INSERT_HEAD(&parent->children, tag, peers);
87 tag->align = MAX(ioc->u.tag.align, parent->align);
88 tag->bndry = BNDRY_MIN(ioc->u.tag.bndry, parent->bndry);
89 tag->maxaddr = MIN(ioc->u.tag.maxaddr, parent->maxaddr);
90 tag->maxsz = MIN(ioc->u.tag.maxsz, parent->maxsz);
91 tag->maxsegsz = MIN(ioc->u.tag.maxsegsz, parent->maxsegsz);
92 tag->nsegs = MIN(ioc->u.tag.nsegs, parent->nsegs);
93 tag->datarate = MIN(ioc->u.tag.datarate, parent->datarate);
94 /* Write constraints back */
95 ioc->u.tag.align = tag->align;
96 ioc->u.tag.bndry = tag->bndry;
97 ioc->u.tag.maxaddr = tag->maxaddr;
98 ioc->u.tag.maxsz = tag->maxsz;
99 ioc->u.tag.maxsegsz = tag->maxsegsz;
100 ioc->u.tag.nsegs = tag->nsegs;
101 ioc->u.tag.datarate = tag->datarate;
103 tag->align = ioc->u.tag.align;
104 tag->bndry = ioc->u.tag.bndry;
105 tag->maxaddr = ioc->u.tag.maxaddr;
106 tag->maxsz = ioc->u.tag.maxsz;
107 tag->maxsegsz = ioc->u.tag.maxsegsz;
108 tag->nsegs = ioc->u.tag.nsegs;
109 tag->datarate = ioc->u.tag.datarate;
111 LIST_INSERT_HEAD(&busdma->tags, tag, tags);
112 ioc->result = (uintptr_t)(void *)tag;
117 proto_busdma_tag_destroy(struct proto_busdma *busdma, struct proto_tag *tag)
120 if (!LIST_EMPTY(&tag->mds))
122 if (!LIST_EMPTY(&tag->children))
125 if (tag->parent != NULL) {
126 LIST_REMOVE(tag, peers);
129 LIST_REMOVE(tag, tags);
130 free(tag, M_PROTO_BUSDMA);
134 static struct proto_tag *
135 proto_busdma_tag_lookup(struct proto_busdma *busdma, u_long key)
137 struct proto_tag *tag;
139 LIST_FOREACH(tag, &busdma->tags, tags) {
140 if ((void *)tag == (void *)key)
147 proto_busdma_md_destroy_internal(struct proto_busdma *busdma,
151 LIST_REMOVE(md, mds);
152 LIST_REMOVE(md, peers);
154 bus_dmamap_unload(md->bd_tag, md->bd_map);
155 if (md->virtaddr != NULL)
156 bus_dmamem_free(md->bd_tag, md->virtaddr, md->bd_map);
158 bus_dmamap_destroy(md->bd_tag, md->bd_map);
159 bus_dma_tag_destroy(md->bd_tag);
160 free(md, M_PROTO_BUSDMA);
165 proto_busdma_mem_alloc_callback(void *arg, bus_dma_segment_t *segs, int nseg,
168 struct proto_callback_bundle *pcb = arg;
170 pcb->ioc->u.md.bus_nsegs = nseg;
171 pcb->ioc->u.md.bus_addr = segs[0].ds_addr;
175 proto_busdma_mem_alloc(struct proto_busdma *busdma, struct proto_tag *tag,
176 struct proto_ioc_busdma *ioc)
178 struct proto_callback_bundle pcb;
182 md = malloc(sizeof(*md), M_PROTO_BUSDMA, M_WAITOK | M_ZERO);
185 error = bus_dma_tag_create(busdma->bd_roottag, tag->align, tag->bndry,
186 tag->maxaddr, BUS_SPACE_MAXADDR, NULL, NULL, tag->maxsz,
187 tag->nsegs, tag->maxsegsz, 0, NULL, NULL, &md->bd_tag);
189 free(md, M_PROTO_BUSDMA);
192 error = bus_dmamem_alloc(md->bd_tag, &md->virtaddr, 0, &md->bd_map);
194 bus_dma_tag_destroy(md->bd_tag);
195 free(md, M_PROTO_BUSDMA);
198 md->physaddr = pmap_kextract((uintptr_t)(md->virtaddr));
202 error = bus_dmamap_load(md->bd_tag, md->bd_map, md->virtaddr,
203 tag->maxsz, proto_busdma_mem_alloc_callback, &pcb, BUS_DMA_NOWAIT);
205 bus_dmamem_free(md->bd_tag, md->virtaddr, md->bd_map);
206 bus_dma_tag_destroy(md->bd_tag);
207 free(md, M_PROTO_BUSDMA);
210 LIST_INSERT_HEAD(&tag->mds, md, peers);
211 LIST_INSERT_HEAD(&busdma->mds, md, mds);
212 ioc->u.md.virt_addr = (uintptr_t)md->virtaddr;
213 ioc->u.md.virt_size = tag->maxsz;
214 ioc->u.md.phys_nsegs = 1;
215 ioc->u.md.phys_addr = md->physaddr;
216 ioc->result = (uintptr_t)(void *)md;
221 proto_busdma_mem_free(struct proto_busdma *busdma, struct proto_md *md)
224 if (md->virtaddr == NULL)
226 return (proto_busdma_md_destroy_internal(busdma, md));
230 proto_busdma_md_create(struct proto_busdma *busdma, struct proto_tag *tag,
231 struct proto_ioc_busdma *ioc)
236 md = malloc(sizeof(*md), M_PROTO_BUSDMA, M_WAITOK | M_ZERO);
239 error = bus_dma_tag_create(busdma->bd_roottag, tag->align, tag->bndry,
240 tag->maxaddr, BUS_SPACE_MAXADDR, NULL, NULL, tag->maxsz,
241 tag->nsegs, tag->maxsegsz, 0, NULL, NULL, &md->bd_tag);
243 free(md, M_PROTO_BUSDMA);
246 error = bus_dmamap_create(md->bd_tag, 0, &md->bd_map);
248 bus_dma_tag_destroy(md->bd_tag);
249 free(md, M_PROTO_BUSDMA);
253 LIST_INSERT_HEAD(&tag->mds, md, peers);
254 LIST_INSERT_HEAD(&busdma->mds, md, mds);
255 ioc->result = (uintptr_t)(void *)md;
260 proto_busdma_md_destroy(struct proto_busdma *busdma, struct proto_md *md)
263 if (md->virtaddr != NULL)
265 return (proto_busdma_md_destroy_internal(busdma, md));
269 proto_busdma_md_load_callback(void *arg, bus_dma_segment_t *segs, int nseg,
270 bus_size_t sz, int error)
272 struct proto_callback_bundle *pcb = arg;
274 pcb->ioc->u.md.bus_nsegs = nseg;
275 pcb->ioc->u.md.bus_addr = segs[0].ds_addr;
279 proto_busdma_md_load(struct proto_busdma *busdma, struct proto_md *md,
280 struct proto_ioc_busdma *ioc, struct thread *td)
282 struct proto_callback_bundle pcb;
288 iov.iov_base = (void *)(uintptr_t)ioc->u.md.virt_addr;
289 iov.iov_len = ioc->u.md.virt_size;
293 uio.uio_resid = iov.iov_len;
294 uio.uio_segflg = UIO_USERSPACE;
295 uio.uio_rw = UIO_READ;
301 error = bus_dmamap_load_uio(md->bd_tag, md->bd_map, &uio,
302 proto_busdma_md_load_callback, &pcb, BUS_DMA_NOWAIT);
306 /* XXX determine *all* physical memory segments */
307 pmap = vmspace_pmap(td->td_proc->p_vmspace);
308 md->physaddr = pmap_extract(pmap, ioc->u.md.virt_addr);
309 ioc->u.md.phys_nsegs = 1; /* XXX */
310 ioc->u.md.phys_addr = md->physaddr;
315 proto_busdma_md_unload(struct proto_busdma *busdma, struct proto_md *md)
320 bus_dmamap_unload(md->bd_tag, md->bd_map);
326 proto_busdma_sync(struct proto_busdma *busdma, struct proto_md *md,
327 struct proto_ioc_busdma *ioc)
331 ops = BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE |
332 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE;
333 if (ioc->u.sync.op & ~ops)
337 bus_dmamap_sync(md->bd_tag, md->bd_map, ioc->u.sync.op);
341 static struct proto_md *
342 proto_busdma_md_lookup(struct proto_busdma *busdma, u_long key)
346 LIST_FOREACH(md, &busdma->mds, mds) {
347 if ((void *)md == (void *)key)
353 struct proto_busdma *
354 proto_busdma_attach(struct proto_softc *sc)
356 struct proto_busdma *busdma;
358 busdma = malloc(sizeof(*busdma), M_PROTO_BUSDMA, M_WAITOK | M_ZERO);
359 sx_init(&busdma->sxlck, "proto-busdma");
364 proto_busdma_detach(struct proto_softc *sc, struct proto_busdma *busdma)
367 proto_busdma_cleanup(sc, busdma);
368 sx_destroy(&busdma->sxlck);
369 free(busdma, M_PROTO_BUSDMA);
374 proto_busdma_cleanup(struct proto_softc *sc, struct proto_busdma *busdma)
376 struct proto_md *md, *md1;
377 struct proto_tag *tag, *tag1;
379 sx_xlock(&busdma->sxlck);
380 LIST_FOREACH_SAFE(md, &busdma->mds, mds, md1)
381 proto_busdma_md_destroy_internal(busdma, md);
382 LIST_FOREACH_SAFE(tag, &busdma->tags, tags, tag1)
383 proto_busdma_tag_destroy(busdma, tag);
384 sx_xunlock(&busdma->sxlck);
389 proto_busdma_ioctl(struct proto_softc *sc, struct proto_busdma *busdma,
390 struct proto_ioc_busdma *ioc, struct thread *td)
392 struct proto_tag *tag;
396 sx_xlock(&busdma->sxlck);
399 switch (ioc->request) {
400 case PROTO_IOC_BUSDMA_TAG_CREATE:
401 busdma->bd_roottag = bus_get_dma_tag(sc->sc_dev);
402 error = proto_busdma_tag_create(busdma, NULL, ioc);
404 case PROTO_IOC_BUSDMA_TAG_DERIVE:
405 tag = proto_busdma_tag_lookup(busdma, ioc->key);
410 error = proto_busdma_tag_create(busdma, tag, ioc);
412 case PROTO_IOC_BUSDMA_TAG_DESTROY:
413 tag = proto_busdma_tag_lookup(busdma, ioc->key);
418 error = proto_busdma_tag_destroy(busdma, tag);
420 case PROTO_IOC_BUSDMA_MEM_ALLOC:
421 tag = proto_busdma_tag_lookup(busdma, ioc->u.md.tag);
426 error = proto_busdma_mem_alloc(busdma, tag, ioc);
428 case PROTO_IOC_BUSDMA_MEM_FREE:
429 md = proto_busdma_md_lookup(busdma, ioc->key);
434 error = proto_busdma_mem_free(busdma, md);
436 case PROTO_IOC_BUSDMA_MD_CREATE:
437 tag = proto_busdma_tag_lookup(busdma, ioc->u.md.tag);
442 error = proto_busdma_md_create(busdma, tag, ioc);
444 case PROTO_IOC_BUSDMA_MD_DESTROY:
445 md = proto_busdma_md_lookup(busdma, ioc->key);
450 error = proto_busdma_md_destroy(busdma, md);
452 case PROTO_IOC_BUSDMA_MD_LOAD:
453 md = proto_busdma_md_lookup(busdma, ioc->key);
458 error = proto_busdma_md_load(busdma, md, ioc, td);
460 case PROTO_IOC_BUSDMA_MD_UNLOAD:
461 md = proto_busdma_md_lookup(busdma, ioc->key);
466 error = proto_busdma_md_unload(busdma, md);
468 case PROTO_IOC_BUSDMA_SYNC:
469 md = proto_busdma_md_lookup(busdma, ioc->key);
474 error = proto_busdma_sync(busdma, md, ioc);
481 sx_xunlock(&busdma->sxlck);
487 proto_busdma_mmap_allowed(struct proto_busdma *busdma, vm_paddr_t physaddr)
492 sx_xlock(&busdma->sxlck);
495 LIST_FOREACH(md, &busdma->mds, mds) {
496 if (physaddr >= trunc_page(md->physaddr) &&
497 physaddr <= trunc_page(md->physaddr + md->tag->maxsz)) {
503 sx_xunlock(&busdma->sxlck);