2 * Copyright (c) 2018 Ruslan Bukin <br@bsdpad.com>
5 * This software was developed by SRI International and the University of
6 * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
7 * ("CTSRD"), as part of the DARPA CRASH research programme.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include "opt_platform.h"
35 #include <sys/param.h>
38 #include <sys/kernel.h>
39 #include <sys/malloc.h>
43 #include <machine/bus.h>
46 #include <dev/fdt/fdt_common.h>
47 #include <dev/ofw/ofw_bus.h>
48 #include <dev/ofw/ofw_bus_subr.h>
51 #include <dev/xdma/xdma.h>
55 struct seg_load_request {
56 struct bus_dma_segment *seg;
62 _xchan_bufs_alloc(xdma_channel_t *xchan)
64 xdma_controller_t *xdma;
65 struct xdma_request *xr;
70 for (i = 0; i < xchan->xr_num; i++) {
71 xr = &xchan->xr_mem[i];
72 xr->buf.cbuf = contigmalloc(xchan->maxsegsize,
73 M_XDMA, 0, 0, ~0, PAGE_SIZE, 0);
74 if (xr->buf.cbuf == NULL) {
75 device_printf(xdma->dev,
76 "%s: Can't allocate contiguous kernel"
77 " physical memory\n", __func__);
86 _xchan_bufs_alloc_busdma(xdma_channel_t *xchan)
88 xdma_controller_t *xdma;
89 struct xdma_request *xr;
95 /* Create bus_dma tag */
96 err = bus_dma_tag_create(
97 bus_get_dma_tag(xdma->dev), /* Parent tag. */
98 xchan->alignment, /* alignment */
99 xchan->boundary, /* boundary */
100 xchan->lowaddr, /* lowaddr */
101 xchan->highaddr, /* highaddr */
102 NULL, NULL, /* filter, filterarg */
103 xchan->maxsegsize * xchan->maxnsegs, /* maxsize */
104 xchan->maxnsegs, /* nsegments */
105 xchan->maxsegsize, /* maxsegsize */
107 NULL, NULL, /* lockfunc, lockarg */
108 &xchan->dma_tag_bufs);
110 device_printf(xdma->dev,
111 "%s: Can't create bus_dma tag.\n", __func__);
115 for (i = 0; i < xchan->xr_num; i++) {
116 xr = &xchan->xr_mem[i];
117 err = bus_dmamap_create(xchan->dma_tag_bufs, 0,
120 device_printf(xdma->dev,
121 "%s: Can't create buf DMA map.\n", __func__);
124 bus_dma_tag_destroy(xchan->dma_tag_bufs);
134 xchan_bufs_alloc(xdma_channel_t *xchan)
136 xdma_controller_t *xdma;
142 device_printf(xdma->dev,
143 "%s: Channel was not allocated properly.\n", __func__);
147 if (xchan->caps & XCHAN_CAP_BUSDMA)
148 ret = _xchan_bufs_alloc_busdma(xchan);
150 ret = _xchan_bufs_alloc(xchan);
152 device_printf(xdma->dev,
153 "%s: Can't allocate bufs.\n", __func__);
157 xchan->flags |= XCHAN_BUFS_ALLOCATED;
163 xchan_bufs_free(xdma_channel_t *xchan)
165 struct xdma_request *xr;
169 if ((xchan->flags & XCHAN_BUFS_ALLOCATED) == 0)
172 if (xchan->caps & XCHAN_CAP_BUSDMA) {
173 for (i = 0; i < xchan->xr_num; i++) {
174 xr = &xchan->xr_mem[i];
176 bus_dmamap_destroy(xchan->dma_tag_bufs, b->map);
178 bus_dma_tag_destroy(xchan->dma_tag_bufs);
180 for (i = 0; i < xchan->xr_num; i++) {
181 xr = &xchan->xr_mem[i];
182 contigfree(xr->buf.cbuf, xchan->maxsegsize, M_XDMA);
186 xchan->flags &= ~XCHAN_BUFS_ALLOCATED;
192 xdma_channel_free_sg(xdma_channel_t *xchan)
195 xchan_bufs_free(xchan);
196 xchan_sglist_free(xchan);
197 xchan_bank_free(xchan);
201 * Prepare xchan for a scatter-gather transfer.
202 * xr_num - xdma requests queue size,
203 * maxsegsize - maximum allowed scatter-gather list element size in bytes
206 xdma_prep_sg(xdma_channel_t *xchan, uint32_t xr_num,
207 bus_size_t maxsegsize, bus_size_t maxnsegs,
208 bus_size_t alignment, bus_addr_t boundary,
209 bus_addr_t lowaddr, bus_addr_t highaddr)
211 xdma_controller_t *xdma;
216 KASSERT(xdma != NULL, ("xdma is NULL"));
218 if (xchan->flags & XCHAN_CONFIGURED) {
219 device_printf(xdma->dev,
220 "%s: Channel is already configured.\n", __func__);
224 xchan->xr_num = xr_num;
225 xchan->maxsegsize = maxsegsize;
226 xchan->maxnsegs = maxnsegs;
227 xchan->alignment = alignment;
228 xchan->boundary = boundary;
229 xchan->lowaddr = lowaddr;
230 xchan->highaddr = highaddr;
232 if (xchan->maxnsegs > XDMA_MAX_SEG) {
233 device_printf(xdma->dev, "%s: maxnsegs is too big\n",
238 xchan_bank_init(xchan);
240 /* Allocate sglist. */
241 ret = xchan_sglist_alloc(xchan);
243 device_printf(xdma->dev,
244 "%s: Can't allocate sglist.\n", __func__);
249 ret = xchan_bufs_alloc(xchan);
251 device_printf(xdma->dev,
252 "%s: Can't allocate bufs.\n", __func__);
255 xchan_sglist_free(xchan);
256 xchan_bank_free(xchan);
261 xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_SG);
264 ret = XDMA_CHANNEL_PREP_SG(xdma->dma_dev, xchan);
266 device_printf(xdma->dev,
267 "%s: Can't prepare SG transfer.\n", __func__);
278 xchan_seg_done(xdma_channel_t *xchan,
279 struct xdma_transfer_status *st)
281 struct xdma_request *xr;
282 xdma_controller_t *xdma;
287 xr = TAILQ_FIRST(&xchan->processing);
289 panic("request not found\n");
293 atomic_subtract_int(&b->nsegs_left, 1);
295 if (b->nsegs_left == 0) {
296 if (xchan->caps & XCHAN_CAP_BUSDMA) {
297 if (xr->direction == XDMA_MEM_TO_DEV)
298 bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
299 BUS_DMASYNC_POSTWRITE);
301 bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
302 BUS_DMASYNC_POSTREAD);
303 bus_dmamap_unload(xchan->dma_tag_bufs, b->map);
305 xr->status.error = st->error;
306 xr->status.transferred = st->transferred;
308 QUEUE_PROC_LOCK(xchan);
309 TAILQ_REMOVE(&xchan->processing, xr, xr_next);
310 QUEUE_PROC_UNLOCK(xchan);
312 QUEUE_OUT_LOCK(xchan);
313 TAILQ_INSERT_TAIL(&xchan->queue_out, xr, xr_next);
314 QUEUE_OUT_UNLOCK(xchan);
319 xdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
321 struct seg_load_request *slr;
322 struct bus_dma_segment *seg;
335 for (i = 0; i < nsegs; i++) {
336 seg[i].ds_addr = segs[i].ds_addr;
337 seg[i].ds_len = segs[i].ds_len;
342 _xdma_load_data_busdma(xdma_channel_t *xchan, struct xdma_request *xr,
343 struct bus_dma_segment *seg)
345 xdma_controller_t *xdma;
346 struct seg_load_request slr;
356 switch (xr->req_type) {
358 error = bus_dmamap_load_mbuf_sg(xchan->dma_tag_bufs,
359 xr->buf.map, xr->m, seg, &nsegs, BUS_DMA_NOWAIT);
365 error = bus_dmamap_load_bio(xchan->dma_tag_bufs,
366 xr->buf.map, xr->bp, xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
367 if (slr.error != 0) {
368 device_printf(xdma->dma_dev,
369 "%s: bus_dmamap_load failed, err %d\n",
370 __func__, slr.error);
376 switch (xr->direction) {
377 case XDMA_MEM_TO_DEV:
378 addr = (void *)xr->src_addr;
380 case XDMA_DEV_TO_MEM:
381 addr = (void *)xr->dst_addr;
384 device_printf(xdma->dma_dev,
385 "%s: Direction is not supported\n", __func__);
391 error = bus_dmamap_load(xchan->dma_tag_bufs, xr->buf.map,
392 addr, (xr->block_len * xr->block_num),
393 xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
394 if (slr.error != 0) {
395 device_printf(xdma->dma_dev,
396 "%s: bus_dmamap_load failed, err %d\n",
397 __func__, slr.error);
407 if (error == ENOMEM) {
409 * Out of memory. Try again later.
410 * TODO: count errors.
413 device_printf(xdma->dma_dev,
414 "%s: bus_dmamap_load failed with err %d\n",
419 if (xr->direction == XDMA_MEM_TO_DEV)
420 bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
421 BUS_DMASYNC_PREWRITE);
423 bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
424 BUS_DMASYNC_PREREAD);
430 _xdma_load_data(xdma_channel_t *xchan, struct xdma_request *xr,
431 struct bus_dma_segment *seg)
433 xdma_controller_t *xdma;
443 switch (xr->req_type) {
445 if (xr->direction == XDMA_MEM_TO_DEV) {
446 m_copydata(m, 0, m->m_pkthdr.len, xr->buf.cbuf);
447 seg[0].ds_addr = (bus_addr_t)xr->buf.cbuf;
448 seg[0].ds_len = m->m_pkthdr.len;
450 seg[0].ds_addr = mtod(m, bus_addr_t);
451 seg[0].ds_len = m->m_pkthdr.len;
457 panic("implement me\n");
464 xdma_load_data(xdma_channel_t *xchan,
465 struct xdma_request *xr, struct bus_dma_segment *seg)
467 xdma_controller_t *xdma;
476 if (xchan->caps & XCHAN_CAP_BUSDMA)
477 nsegs = _xdma_load_data_busdma(xchan, xr, seg);
479 nsegs = _xdma_load_data(xchan, xr, seg);
481 return (0); /* Try again later. */
483 xr->buf.nsegs = nsegs;
484 xr->buf.nsegs_left = nsegs;
490 xdma_process(xdma_channel_t *xchan,
491 struct xdma_sglist *sg)
493 struct bus_dma_segment seg[XDMA_MAX_SEG];
494 struct xdma_request *xr;
495 struct xdma_request *xr_tmp;
496 xdma_controller_t *xdma;
503 XCHAN_ASSERT_LOCKED(xchan);
509 ret = XDMA_CHANNEL_CAPACITY(xdma->dma_dev, xchan, &capacity);
511 device_printf(xdma->dev,
512 "%s: Can't get DMA controller capacity.\n", __func__);
516 TAILQ_FOREACH_SAFE(xr, &xchan->queue_in, xr_next, xr_tmp) {
517 switch (xr->req_type) {
519 c = xdma_mbuf_defrag(xchan, xr);
527 if (capacity <= (c + n)) {
529 * No space yet available for the entire
530 * request in the DMA engine.
535 if ((c + n + xchan->maxnsegs) >= XDMA_SGLIST_MAXLEN) {
536 /* Sglist is full. */
540 nsegs = xdma_load_data(xchan, xr, seg);
544 xdma_sglist_add(&sg[n], seg, nsegs, xr);
547 QUEUE_IN_LOCK(xchan);
548 TAILQ_REMOVE(&xchan->queue_in, xr, xr_next);
549 QUEUE_IN_UNLOCK(xchan);
551 QUEUE_PROC_LOCK(xchan);
552 TAILQ_INSERT_TAIL(&xchan->processing, xr, xr_next);
553 QUEUE_PROC_UNLOCK(xchan);
560 xdma_queue_submit_sg(xdma_channel_t *xchan)
562 struct xdma_sglist *sg;
563 xdma_controller_t *xdma;
568 KASSERT(xdma != NULL, ("xdma is NULL"));
570 XCHAN_ASSERT_LOCKED(xchan);
574 if ((xchan->flags & XCHAN_BUFS_ALLOCATED) == 0) {
575 device_printf(xdma->dev,
576 "%s: Can't submit a transfer: no bufs\n",
581 sg_n = xdma_process(xchan, sg);
583 return (0); /* Nothing to submit */
585 /* Now submit sglist to DMA engine driver. */
586 ret = XDMA_CHANNEL_SUBMIT_SG(xdma->dma_dev, xchan, sg, sg_n);
588 device_printf(xdma->dev,
589 "%s: Can't submit an sglist.\n", __func__);