2 * Copyright (c) 2018 Ruslan Bukin <br@bsdpad.com>
5 * This software was developed by SRI International and the University of
6 * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
7 * ("CTSRD"), as part of the DARPA CRASH research programme.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include "opt_platform.h"
35 #include <sys/param.h>
38 #include <sys/kernel.h>
39 #include <sys/malloc.h>
43 #include <machine/bus.h>
46 #include <dev/fdt/fdt_common.h>
47 #include <dev/ofw/ofw_bus.h>
48 #include <dev/ofw/ofw_bus_subr.h>
51 #include <dev/xdma/xdma.h>
55 struct seg_load_request {
56 struct bus_dma_segment *seg;
62 _xchan_bufs_alloc(xdma_channel_t *xchan)
64 xdma_controller_t *xdma;
65 struct xdma_request *xr;
70 for (i = 0; i < xchan->xr_num; i++) {
71 xr = &xchan->xr_mem[i];
72 /* TODO: bounce buffer */
79 _xchan_bufs_alloc_busdma(xdma_channel_t *xchan)
81 xdma_controller_t *xdma;
82 struct xdma_request *xr;
88 /* Create bus_dma tag */
89 err = bus_dma_tag_create(
90 bus_get_dma_tag(xdma->dev), /* Parent tag. */
91 xchan->alignment, /* alignment */
92 xchan->boundary, /* boundary */
93 xchan->lowaddr, /* lowaddr */
94 xchan->highaddr, /* highaddr */
95 NULL, NULL, /* filter, filterarg */
96 xchan->maxsegsize * xchan->maxnsegs, /* maxsize */
97 xchan->maxnsegs, /* nsegments */
98 xchan->maxsegsize, /* maxsegsize */
100 NULL, NULL, /* lockfunc, lockarg */
101 &xchan->dma_tag_bufs);
103 device_printf(xdma->dev,
104 "%s: Can't create bus_dma tag.\n", __func__);
108 for (i = 0; i < xchan->xr_num; i++) {
109 xr = &xchan->xr_mem[i];
110 err = bus_dmamap_create(xchan->dma_tag_bufs, 0,
113 device_printf(xdma->dev,
114 "%s: Can't create buf DMA map.\n", __func__);
117 bus_dma_tag_destroy(xchan->dma_tag_bufs);
127 xchan_bufs_alloc(xdma_channel_t *xchan)
129 xdma_controller_t *xdma;
135 device_printf(xdma->dev,
136 "%s: Channel was not allocated properly.\n", __func__);
140 if (xchan->caps & XCHAN_CAP_BUSDMA)
141 ret = _xchan_bufs_alloc_busdma(xchan);
143 ret = _xchan_bufs_alloc(xchan);
145 device_printf(xdma->dev,
146 "%s: Can't allocate bufs.\n", __func__);
150 xchan->flags |= XCHAN_BUFS_ALLOCATED;
156 xchan_bufs_free(xdma_channel_t *xchan)
158 struct xdma_request *xr;
162 if ((xchan->flags & XCHAN_BUFS_ALLOCATED) == 0)
165 if (xchan->caps & XCHAN_CAP_BUSDMA) {
166 for (i = 0; i < xchan->xr_num; i++) {
167 xr = &xchan->xr_mem[i];
169 bus_dmamap_destroy(xchan->dma_tag_bufs, b->map);
171 bus_dma_tag_destroy(xchan->dma_tag_bufs);
173 for (i = 0; i < xchan->xr_num; i++) {
174 xr = &xchan->xr_mem[i];
175 /* TODO: bounce buffer */
179 xchan->flags &= ~XCHAN_BUFS_ALLOCATED;
185 xdma_channel_free_sg(xdma_channel_t *xchan)
188 xchan_bufs_free(xchan);
189 xchan_sglist_free(xchan);
190 xchan_bank_free(xchan);
194 * Prepare xchan for a scatter-gather transfer.
195 * xr_num - xdma requests queue size,
196 * maxsegsize - maximum allowed scatter-gather list element size in bytes
199 xdma_prep_sg(xdma_channel_t *xchan, uint32_t xr_num,
200 bus_size_t maxsegsize, bus_size_t maxnsegs,
201 bus_size_t alignment, bus_addr_t boundary,
202 bus_addr_t lowaddr, bus_addr_t highaddr)
204 xdma_controller_t *xdma;
209 KASSERT(xdma != NULL, ("xdma is NULL"));
211 if (xchan->flags & XCHAN_CONFIGURED) {
212 device_printf(xdma->dev,
213 "%s: Channel is already configured.\n", __func__);
217 xchan->xr_num = xr_num;
218 xchan->maxsegsize = maxsegsize;
219 xchan->maxnsegs = maxnsegs;
220 xchan->alignment = alignment;
221 xchan->boundary = boundary;
222 xchan->lowaddr = lowaddr;
223 xchan->highaddr = highaddr;
225 if (xchan->maxnsegs > XDMA_MAX_SEG) {
226 device_printf(xdma->dev, "%s: maxnsegs is too big\n",
231 xchan_bank_init(xchan);
233 /* Allocate sglist. */
234 ret = xchan_sglist_alloc(xchan);
236 device_printf(xdma->dev,
237 "%s: Can't allocate sglist.\n", __func__);
241 /* Allocate buffers if required. */
242 if ((xchan->caps & XCHAN_CAP_NOBUFS) == 0) {
243 ret = xchan_bufs_alloc(xchan);
245 device_printf(xdma->dev,
246 "%s: Can't allocate bufs.\n", __func__);
249 xchan_sglist_free(xchan);
250 xchan_bank_free(xchan);
256 xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_SG);
259 ret = XDMA_CHANNEL_PREP_SG(xdma->dma_dev, xchan);
261 device_printf(xdma->dev,
262 "%s: Can't prepare SG transfer.\n", __func__);
273 xchan_seg_done(xdma_channel_t *xchan,
274 struct xdma_transfer_status *st)
276 struct xdma_request *xr;
277 xdma_controller_t *xdma;
282 xr = TAILQ_FIRST(&xchan->processing);
284 panic("request not found\n");
288 atomic_subtract_int(&b->nsegs_left, 1);
290 if (b->nsegs_left == 0) {
291 if (xchan->caps & XCHAN_CAP_BUSDMA) {
292 if (xr->direction == XDMA_MEM_TO_DEV)
293 bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
294 BUS_DMASYNC_POSTWRITE);
296 bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
297 BUS_DMASYNC_POSTREAD);
298 bus_dmamap_unload(xchan->dma_tag_bufs, b->map);
300 xr->status.error = st->error;
301 xr->status.transferred = st->transferred;
303 QUEUE_PROC_LOCK(xchan);
304 TAILQ_REMOVE(&xchan->processing, xr, xr_next);
305 QUEUE_PROC_UNLOCK(xchan);
307 QUEUE_OUT_LOCK(xchan);
308 TAILQ_INSERT_TAIL(&xchan->queue_out, xr, xr_next);
309 QUEUE_OUT_UNLOCK(xchan);
314 xdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
316 struct seg_load_request *slr;
317 struct bus_dma_segment *seg;
330 for (i = 0; i < nsegs; i++) {
331 seg[i].ds_addr = segs[i].ds_addr;
332 seg[i].ds_len = segs[i].ds_len;
337 _xdma_load_data_busdma(xdma_channel_t *xchan, struct xdma_request *xr,
338 struct bus_dma_segment *seg)
340 xdma_controller_t *xdma;
341 struct seg_load_request slr;
351 switch (xr->req_type) {
353 error = bus_dmamap_load_mbuf_sg(xchan->dma_tag_bufs,
354 xr->buf.map, xr->m, seg, &nsegs, BUS_DMA_NOWAIT);
360 error = bus_dmamap_load_bio(xchan->dma_tag_bufs,
361 xr->buf.map, xr->bp, xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
362 if (slr.error != 0) {
363 device_printf(xdma->dma_dev,
364 "%s: bus_dmamap_load failed, err %d\n",
365 __func__, slr.error);
371 switch (xr->direction) {
372 case XDMA_MEM_TO_DEV:
373 addr = (void *)xr->src_addr;
375 case XDMA_DEV_TO_MEM:
376 addr = (void *)xr->dst_addr;
379 device_printf(xdma->dma_dev,
380 "%s: Direction is not supported\n", __func__);
386 error = bus_dmamap_load(xchan->dma_tag_bufs, xr->buf.map,
387 addr, (xr->block_len * xr->block_num),
388 xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
389 if (slr.error != 0) {
390 device_printf(xdma->dma_dev,
391 "%s: bus_dmamap_load failed, err %d\n",
392 __func__, slr.error);
402 if (error == ENOMEM) {
404 * Out of memory. Try again later.
405 * TODO: count errors.
408 device_printf(xdma->dma_dev,
409 "%s: bus_dmamap_load failed with err %d\n",
414 if (xr->direction == XDMA_MEM_TO_DEV)
415 bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
416 BUS_DMASYNC_PREWRITE);
418 bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
419 BUS_DMASYNC_PREREAD);
425 _xdma_load_data(xdma_channel_t *xchan, struct xdma_request *xr,
426 struct bus_dma_segment *seg)
428 xdma_controller_t *xdma;
438 switch (xr->req_type) {
440 seg[0].ds_addr = mtod(m, bus_addr_t);
441 seg[0].ds_len = m->m_pkthdr.len;
446 panic("implement me\n");
453 xdma_load_data(xdma_channel_t *xchan,
454 struct xdma_request *xr, struct bus_dma_segment *seg)
456 xdma_controller_t *xdma;
465 if (xchan->caps & XCHAN_CAP_BUSDMA)
466 nsegs = _xdma_load_data_busdma(xchan, xr, seg);
468 nsegs = _xdma_load_data(xchan, xr, seg);
470 return (0); /* Try again later. */
472 xr->buf.nsegs = nsegs;
473 xr->buf.nsegs_left = nsegs;
479 xdma_process(xdma_channel_t *xchan,
480 struct xdma_sglist *sg)
482 struct bus_dma_segment seg[XDMA_MAX_SEG];
483 struct xdma_request *xr;
484 struct xdma_request *xr_tmp;
485 xdma_controller_t *xdma;
492 XCHAN_ASSERT_LOCKED(xchan);
498 ret = XDMA_CHANNEL_CAPACITY(xdma->dma_dev, xchan, &capacity);
500 device_printf(xdma->dev,
501 "%s: Can't get DMA controller capacity.\n", __func__);
505 TAILQ_FOREACH_SAFE(xr, &xchan->queue_in, xr_next, xr_tmp) {
506 switch (xr->req_type) {
508 if ((xchan->caps & XCHAN_CAP_NOSEG) ||
509 (c > xchan->maxnsegs))
510 c = xdma_mbuf_defrag(xchan, xr);
518 if (capacity <= (c + n)) {
520 * No space yet available for the entire
521 * request in the DMA engine.
526 if ((c + n + xchan->maxnsegs) >= XDMA_SGLIST_MAXLEN) {
527 /* Sglist is full. */
531 nsegs = xdma_load_data(xchan, xr, seg);
535 xdma_sglist_add(&sg[n], seg, nsegs, xr);
538 QUEUE_IN_LOCK(xchan);
539 TAILQ_REMOVE(&xchan->queue_in, xr, xr_next);
540 QUEUE_IN_UNLOCK(xchan);
542 QUEUE_PROC_LOCK(xchan);
543 TAILQ_INSERT_TAIL(&xchan->processing, xr, xr_next);
544 QUEUE_PROC_UNLOCK(xchan);
551 xdma_queue_submit_sg(xdma_channel_t *xchan)
553 struct xdma_sglist *sg;
554 xdma_controller_t *xdma;
559 KASSERT(xdma != NULL, ("xdma is NULL"));
561 XCHAN_ASSERT_LOCKED(xchan);
565 if ((xchan->caps & XCHAN_CAP_NOBUFS) == 0 &&
566 (xchan->flags & XCHAN_BUFS_ALLOCATED) == 0) {
567 device_printf(xdma->dev,
568 "%s: Can't submit a transfer: no bufs\n",
573 sg_n = xdma_process(xchan, sg);
575 return (0); /* Nothing to submit */
577 /* Now submit sglist to DMA engine driver. */
578 ret = XDMA_CHANNEL_SUBMIT_SG(xdma->dma_dev, xchan, sg, sg_n);
580 device_printf(xdma->dev,
581 "%s: Can't submit an sglist.\n", __func__);