2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2018-2019 Ruslan Bukin <br@bsdpad.com>
6 * This software was developed by SRI International and the University of
7 * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
8 * ("CTSRD"), as part of the DARPA CRASH research programme.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include "opt_platform.h"
36 #include <sys/param.h>
39 #include <sys/kernel.h>
41 #include <sys/malloc.h>
43 #include <sys/mutex.h>
44 #include <sys/rwlock.h>
46 #include <machine/bus.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_page.h>
54 #include <dev/fdt/fdt_common.h>
55 #include <dev/ofw/ofw_bus.h>
56 #include <dev/ofw/ofw_bus_subr.h>
59 #include <dev/xdma/xdma.h>
63 struct seg_load_request {
64 struct bus_dma_segment *seg;
70 xchan_bufs_free_reserved(xdma_channel_t *xchan)
72 struct xdma_request *xr;
76 for (i = 0; i < xchan->xr_num; i++) {
77 xr = &xchan->xr_mem[i];
80 pmap_kremove_device(xr->buf.vaddr, size);
81 kva_free(xr->buf.vaddr, size);
85 vmem_free(xchan->vmem, xr->buf.paddr, size);
93 xchan_bufs_alloc_reserved(xdma_channel_t *xchan)
95 xdma_controller_t *xdma;
96 struct xdma_request *xr;
103 if (xchan->vmem == NULL)
106 for (i = 0; i < xchan->xr_num; i++) {
107 xr = &xchan->xr_mem[i];
108 size = round_page(xchan->maxsegsize);
109 if (vmem_alloc(xchan->vmem, size,
110 M_BESTFIT | M_NOWAIT, &addr)) {
111 device_printf(xdma->dev,
112 "%s: Can't allocate memory\n", __func__);
113 xchan_bufs_free_reserved(xchan);
118 xr->buf.paddr = addr;
119 xr->buf.vaddr = kva_alloc(size);
120 if (xr->buf.vaddr == 0) {
121 device_printf(xdma->dev,
122 "%s: Can't allocate KVA\n", __func__);
123 xchan_bufs_free_reserved(xchan);
126 pmap_kenter_device(xr->buf.vaddr, size, addr);
133 xchan_bufs_alloc_busdma(xdma_channel_t *xchan)
135 xdma_controller_t *xdma;
136 struct xdma_request *xr;
142 /* Create bus_dma tag */
143 err = bus_dma_tag_create(
144 bus_get_dma_tag(xdma->dev), /* Parent tag. */
145 xchan->alignment, /* alignment */
146 xchan->boundary, /* boundary */
147 xchan->lowaddr, /* lowaddr */
148 xchan->highaddr, /* highaddr */
149 NULL, NULL, /* filter, filterarg */
150 xchan->maxsegsize * xchan->maxnsegs, /* maxsize */
151 xchan->maxnsegs, /* nsegments */
152 xchan->maxsegsize, /* maxsegsize */
154 NULL, NULL, /* lockfunc, lockarg */
155 &xchan->dma_tag_bufs);
157 device_printf(xdma->dev,
158 "%s: Can't create bus_dma tag.\n", __func__);
162 for (i = 0; i < xchan->xr_num; i++) {
163 xr = &xchan->xr_mem[i];
164 err = bus_dmamap_create(xchan->dma_tag_bufs, 0,
167 device_printf(xdma->dev,
168 "%s: Can't create buf DMA map.\n", __func__);
171 bus_dma_tag_destroy(xchan->dma_tag_bufs);
181 xchan_bufs_alloc(xdma_channel_t *xchan)
183 xdma_controller_t *xdma;
189 device_printf(xdma->dev,
190 "%s: Channel was not allocated properly.\n", __func__);
194 if (xchan->caps & XCHAN_CAP_BUSDMA)
195 ret = xchan_bufs_alloc_busdma(xchan);
197 ret = xchan_bufs_alloc_reserved(xchan);
200 device_printf(xdma->dev,
201 "%s: Can't allocate bufs.\n", __func__);
205 xchan->flags |= XCHAN_BUFS_ALLOCATED;
211 xchan_bufs_free(xdma_channel_t *xchan)
213 struct xdma_request *xr;
217 if ((xchan->flags & XCHAN_BUFS_ALLOCATED) == 0)
220 if (xchan->caps & XCHAN_CAP_BUSDMA) {
221 for (i = 0; i < xchan->xr_num; i++) {
222 xr = &xchan->xr_mem[i];
224 bus_dmamap_destroy(xchan->dma_tag_bufs, b->map);
226 bus_dma_tag_destroy(xchan->dma_tag_bufs);
228 xchan_bufs_free_reserved(xchan);
230 xchan->flags &= ~XCHAN_BUFS_ALLOCATED;
236 xdma_channel_free_sg(xdma_channel_t *xchan)
239 xchan_bufs_free(xchan);
240 xchan_sglist_free(xchan);
241 xchan_bank_free(xchan);
245 * Prepare xchan for a scatter-gather transfer.
246 * xr_num - xdma requests queue size,
247 * maxsegsize - maximum allowed scatter-gather list element size in bytes
250 xdma_prep_sg(xdma_channel_t *xchan, uint32_t xr_num,
251 bus_size_t maxsegsize, bus_size_t maxnsegs,
252 bus_size_t alignment, bus_addr_t boundary,
253 bus_addr_t lowaddr, bus_addr_t highaddr)
255 xdma_controller_t *xdma;
260 KASSERT(xdma != NULL, ("xdma is NULL"));
262 if (xchan->flags & XCHAN_CONFIGURED) {
263 device_printf(xdma->dev,
264 "%s: Channel is already configured.\n", __func__);
268 xchan->xr_num = xr_num;
269 xchan->maxsegsize = maxsegsize;
270 xchan->maxnsegs = maxnsegs;
271 xchan->alignment = alignment;
272 xchan->boundary = boundary;
273 xchan->lowaddr = lowaddr;
274 xchan->highaddr = highaddr;
276 if (xchan->maxnsegs > XDMA_MAX_SEG) {
277 device_printf(xdma->dev, "%s: maxnsegs is too big\n",
282 xchan_bank_init(xchan);
284 /* Allocate sglist. */
285 ret = xchan_sglist_alloc(xchan);
287 device_printf(xdma->dev,
288 "%s: Can't allocate sglist.\n", __func__);
292 /* Allocate buffers if required. */
293 if (xchan->caps & (XCHAN_CAP_BUSDMA | XCHAN_CAP_BOUNCE)) {
294 ret = xchan_bufs_alloc(xchan);
296 device_printf(xdma->dev,
297 "%s: Can't allocate bufs.\n", __func__);
300 xchan_sglist_free(xchan);
301 xchan_bank_free(xchan);
307 xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_SG);
310 ret = XDMA_CHANNEL_PREP_SG(xdma->dma_dev, xchan);
312 device_printf(xdma->dev,
313 "%s: Can't prepare SG transfer.\n", __func__);
324 xchan_seg_done(xdma_channel_t *xchan,
325 struct xdma_transfer_status *st)
327 struct xdma_request *xr;
328 xdma_controller_t *xdma;
334 xr = TAILQ_FIRST(&xchan->processing);
336 panic("request not found\n");
340 atomic_subtract_int(&b->nsegs_left, 1);
342 if (b->nsegs_left == 0) {
343 if (xchan->caps & XCHAN_CAP_BUSDMA) {
344 if (xr->direction == XDMA_MEM_TO_DEV)
345 bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
346 BUS_DMASYNC_POSTWRITE);
348 bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
349 BUS_DMASYNC_POSTREAD);
350 bus_dmamap_unload(xchan->dma_tag_bufs, b->map);
351 } else if (xchan->caps & XCHAN_CAP_BOUNCE) {
352 if (xr->req_type == XR_TYPE_MBUF &&
353 xr->direction == XDMA_DEV_TO_MEM)
354 m_copyback(xr->m, 0, st->transferred,
355 (void *)xr->buf.vaddr);
356 } else if (xchan->caps & XCHAN_CAP_IOMMU) {
357 if (xr->direction == XDMA_MEM_TO_DEV)
361 xdma_iommu_remove_entry(xchan, addr);
363 xr->status.error = st->error;
364 xr->status.transferred = st->transferred;
366 QUEUE_PROC_LOCK(xchan);
367 TAILQ_REMOVE(&xchan->processing, xr, xr_next);
368 QUEUE_PROC_UNLOCK(xchan);
370 QUEUE_OUT_LOCK(xchan);
371 TAILQ_INSERT_TAIL(&xchan->queue_out, xr, xr_next);
372 QUEUE_OUT_UNLOCK(xchan);
377 xdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
379 struct seg_load_request *slr;
380 struct bus_dma_segment *seg;
393 for (i = 0; i < nsegs; i++) {
394 seg[i].ds_addr = segs[i].ds_addr;
395 seg[i].ds_len = segs[i].ds_len;
400 _xdma_load_data_busdma(xdma_channel_t *xchan, struct xdma_request *xr,
401 struct bus_dma_segment *seg)
403 xdma_controller_t *xdma;
404 struct seg_load_request slr;
414 switch (xr->req_type) {
416 error = bus_dmamap_load_mbuf_sg(xchan->dma_tag_bufs,
417 xr->buf.map, xr->m, seg, &nsegs, BUS_DMA_NOWAIT);
423 error = bus_dmamap_load_bio(xchan->dma_tag_bufs,
424 xr->buf.map, xr->bp, xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
425 if (slr.error != 0) {
426 device_printf(xdma->dma_dev,
427 "%s: bus_dmamap_load failed, err %d\n",
428 __func__, slr.error);
434 switch (xr->direction) {
435 case XDMA_MEM_TO_DEV:
436 addr = (void *)xr->src_addr;
438 case XDMA_DEV_TO_MEM:
439 addr = (void *)xr->dst_addr;
442 device_printf(xdma->dma_dev,
443 "%s: Direction is not supported\n", __func__);
449 error = bus_dmamap_load(xchan->dma_tag_bufs, xr->buf.map,
450 addr, (xr->block_len * xr->block_num),
451 xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
452 if (slr.error != 0) {
453 device_printf(xdma->dma_dev,
454 "%s: bus_dmamap_load failed, err %d\n",
455 __func__, slr.error);
465 if (error == ENOMEM) {
467 * Out of memory. Try again later.
468 * TODO: count errors.
471 device_printf(xdma->dma_dev,
472 "%s: bus_dmamap_load failed with err %d\n",
477 if (xr->direction == XDMA_MEM_TO_DEV)
478 bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
479 BUS_DMASYNC_PREWRITE);
481 bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
482 BUS_DMASYNC_PREREAD);
488 _xdma_load_data(xdma_channel_t *xchan, struct xdma_request *xr,
489 struct bus_dma_segment *seg)
491 xdma_controller_t *xdma;
494 vm_offset_t va, addr;
502 KASSERT(xchan->caps & XCHAN_CAP_NOSEG,
503 ("Handling segmented data is not implemented here."));
507 switch (xr->req_type) {
509 if (xchan->caps & XCHAN_CAP_BOUNCE) {
510 if (xr->direction == XDMA_MEM_TO_DEV)
511 m_copydata(m, 0, m->m_pkthdr.len,
512 (void *)xr->buf.vaddr);
513 seg[0].ds_addr = (bus_addr_t)xr->buf.paddr;
514 } else if (xchan->caps & XCHAN_CAP_IOMMU) {
515 addr = mtod(m, bus_addr_t);
518 if (xr->direction == XDMA_MEM_TO_DEV)
521 prot = VM_PROT_WRITE;
523 xdma_iommu_add_entry(xchan, &va,
524 pa, m->m_pkthdr.len, prot);
527 * Save VA so we can unload data later
528 * after completion of this transfer.
530 if (xr->direction == XDMA_MEM_TO_DEV)
536 seg[0].ds_addr = mtod(m, bus_addr_t);
537 seg[0].ds_len = m->m_pkthdr.len;
542 panic("implement me\n");
549 xdma_load_data(xdma_channel_t *xchan,
550 struct xdma_request *xr, struct bus_dma_segment *seg)
552 xdma_controller_t *xdma;
561 if (xchan->caps & XCHAN_CAP_BUSDMA)
562 nsegs = _xdma_load_data_busdma(xchan, xr, seg);
564 nsegs = _xdma_load_data(xchan, xr, seg);
566 return (0); /* Try again later. */
568 xr->buf.nsegs = nsegs;
569 xr->buf.nsegs_left = nsegs;
575 xdma_process(xdma_channel_t *xchan,
576 struct xdma_sglist *sg)
578 struct bus_dma_segment seg[XDMA_MAX_SEG];
579 struct xdma_request *xr;
580 struct xdma_request *xr_tmp;
581 xdma_controller_t *xdma;
588 XCHAN_ASSERT_LOCKED(xchan);
595 ret = XDMA_CHANNEL_CAPACITY(xdma->dma_dev, xchan, &capacity);
597 device_printf(xdma->dev,
598 "%s: Can't get DMA controller capacity.\n", __func__);
602 TAILQ_FOREACH_SAFE(xr, &xchan->queue_in, xr_next, xr_tmp) {
603 switch (xr->req_type) {
605 if ((xchan->caps & XCHAN_CAP_NOSEG) ||
606 (c > xchan->maxnsegs))
607 c = xdma_mbuf_defrag(xchan, xr);
615 if (capacity <= (c + n)) {
617 * No space yet available for the entire
618 * request in the DMA engine.
623 if ((c + n + xchan->maxnsegs) >= XDMA_SGLIST_MAXLEN) {
624 /* Sglist is full. */
628 nsegs = xdma_load_data(xchan, xr, seg);
632 xdma_sglist_add(&sg[n], seg, nsegs, xr);
635 QUEUE_IN_LOCK(xchan);
636 TAILQ_REMOVE(&xchan->queue_in, xr, xr_next);
637 QUEUE_IN_UNLOCK(xchan);
639 QUEUE_PROC_LOCK(xchan);
640 TAILQ_INSERT_TAIL(&xchan->processing, xr, xr_next);
641 QUEUE_PROC_UNLOCK(xchan);
648 xdma_queue_submit_sg(xdma_channel_t *xchan)
650 struct xdma_sglist *sg;
651 xdma_controller_t *xdma;
656 KASSERT(xdma != NULL, ("xdma is NULL"));
658 XCHAN_ASSERT_LOCKED(xchan);
662 if ((xchan->caps & (XCHAN_CAP_BOUNCE | XCHAN_CAP_BUSDMA)) &&
663 (xchan->flags & XCHAN_BUFS_ALLOCATED) == 0) {
664 device_printf(xdma->dev,
665 "%s: Can't submit a transfer: no bufs\n",
670 sg_n = xdma_process(xchan, sg);
672 return (0); /* Nothing to submit */
674 /* Now submit sglist to DMA engine driver. */
675 ret = XDMA_CHANNEL_SUBMIT_SG(xdma->dma_dev, xchan, sg, sg_n);
677 device_printf(xdma->dev,
678 "%s: Can't submit an sglist.\n", __func__);