2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2018-2019 Ruslan Bukin <br@bsdpad.com>
6 * This software was developed by SRI International and the University of
7 * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
8 * ("CTSRD"), as part of the DARPA CRASH research programme.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include "opt_platform.h"
36 #include <sys/param.h>
39 #include <sys/kernel.h>
41 #include <sys/malloc.h>
43 #include <sys/mutex.h>
44 #include <sys/rwlock.h>
46 #include <machine/bus.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_page.h>
54 #include <dev/fdt/fdt_common.h>
55 #include <dev/ofw/ofw_bus.h>
56 #include <dev/ofw/ofw_bus_subr.h>
59 #include <dev/xdma/xdma.h>
63 struct seg_load_request {
64 struct bus_dma_segment *seg;
70 xchan_bufs_free_reserved(xdma_channel_t *xchan)
72 struct xdma_request *xr;
76 for (i = 0; i < xchan->xr_num; i++) {
77 xr = &xchan->xr_mem[i];
80 pmap_kremove_device(xr->buf.vaddr, size);
81 kva_free(xr->buf.vaddr, size);
85 vmem_free(xchan->vmem, xr->buf.paddr, size);
93 xchan_bufs_alloc_reserved(xdma_channel_t *xchan)
95 xdma_controller_t *xdma;
96 struct xdma_request *xr;
103 if (xchan->vmem == NULL)
106 for (i = 0; i < xchan->xr_num; i++) {
107 xr = &xchan->xr_mem[i];
108 size = round_page(xchan->maxsegsize);
109 if (vmem_alloc(xchan->vmem, size,
110 M_BESTFIT | M_NOWAIT, &addr)) {
111 device_printf(xdma->dev,
112 "%s: Can't allocate memory\n", __func__);
113 xchan_bufs_free_reserved(xchan);
118 xr->buf.paddr = addr;
119 xr->buf.vaddr = kva_alloc(size);
120 if (xr->buf.vaddr == 0) {
121 device_printf(xdma->dev,
122 "%s: Can't allocate KVA\n", __func__);
123 xchan_bufs_free_reserved(xchan);
126 pmap_kenter_device(xr->buf.vaddr, size, addr);
133 xchan_bufs_alloc_busdma(xdma_channel_t *xchan)
135 xdma_controller_t *xdma;
136 struct xdma_request *xr;
142 /* Create bus_dma tag */
143 err = bus_dma_tag_create(
144 bus_get_dma_tag(xdma->dev), /* Parent tag. */
145 xchan->alignment, /* alignment */
146 xchan->boundary, /* boundary */
147 xchan->lowaddr, /* lowaddr */
148 xchan->highaddr, /* highaddr */
149 NULL, NULL, /* filter, filterarg */
150 xchan->maxsegsize * xchan->maxnsegs, /* maxsize */
151 xchan->maxnsegs, /* nsegments */
152 xchan->maxsegsize, /* maxsegsize */
154 NULL, NULL, /* lockfunc, lockarg */
155 &xchan->dma_tag_bufs);
157 device_printf(xdma->dev,
158 "%s: Can't create bus_dma tag.\n", __func__);
162 for (i = 0; i < xchan->xr_num; i++) {
163 xr = &xchan->xr_mem[i];
164 err = bus_dmamap_create(xchan->dma_tag_bufs, 0,
167 device_printf(xdma->dev,
168 "%s: Can't create buf DMA map.\n", __func__);
171 bus_dma_tag_destroy(xchan->dma_tag_bufs);
181 xchan_bufs_alloc(xdma_channel_t *xchan)
183 xdma_controller_t *xdma;
189 printf("%s: Channel was not allocated properly.\n", __func__);
193 if (xchan->caps & XCHAN_CAP_BUSDMA)
194 ret = xchan_bufs_alloc_busdma(xchan);
196 ret = xchan_bufs_alloc_reserved(xchan);
199 device_printf(xdma->dev,
200 "%s: Can't allocate bufs.\n", __func__);
204 xchan->flags |= XCHAN_BUFS_ALLOCATED;
210 xchan_bufs_free(xdma_channel_t *xchan)
212 struct xdma_request *xr;
216 if ((xchan->flags & XCHAN_BUFS_ALLOCATED) == 0)
219 if (xchan->caps & XCHAN_CAP_BUSDMA) {
220 for (i = 0; i < xchan->xr_num; i++) {
221 xr = &xchan->xr_mem[i];
223 bus_dmamap_destroy(xchan->dma_tag_bufs, b->map);
225 bus_dma_tag_destroy(xchan->dma_tag_bufs);
227 xchan_bufs_free_reserved(xchan);
229 xchan->flags &= ~XCHAN_BUFS_ALLOCATED;
235 xdma_channel_free_sg(xdma_channel_t *xchan)
238 xchan_bufs_free(xchan);
239 xchan_sglist_free(xchan);
240 xchan_bank_free(xchan);
244 * Prepare xchan for a scatter-gather transfer.
245 * xr_num - xdma requests queue size,
246 * maxsegsize - maximum allowed scatter-gather list element size in bytes
249 xdma_prep_sg(xdma_channel_t *xchan, uint32_t xr_num,
250 bus_size_t maxsegsize, bus_size_t maxnsegs,
251 bus_size_t alignment, bus_addr_t boundary,
252 bus_addr_t lowaddr, bus_addr_t highaddr)
254 xdma_controller_t *xdma;
259 KASSERT(xdma != NULL, ("xdma is NULL"));
261 if (xchan->flags & XCHAN_CONFIGURED) {
262 device_printf(xdma->dev,
263 "%s: Channel is already configured.\n", __func__);
267 xchan->xr_num = xr_num;
268 xchan->maxsegsize = maxsegsize;
269 xchan->maxnsegs = maxnsegs;
270 xchan->alignment = alignment;
271 xchan->boundary = boundary;
272 xchan->lowaddr = lowaddr;
273 xchan->highaddr = highaddr;
275 if (xchan->maxnsegs > XDMA_MAX_SEG) {
276 device_printf(xdma->dev, "%s: maxnsegs is too big\n",
281 xchan_bank_init(xchan);
283 /* Allocate sglist. */
284 ret = xchan_sglist_alloc(xchan);
286 device_printf(xdma->dev,
287 "%s: Can't allocate sglist.\n", __func__);
291 /* Allocate buffers if required. */
292 if (xchan->caps & (XCHAN_CAP_BUSDMA | XCHAN_CAP_BOUNCE)) {
293 ret = xchan_bufs_alloc(xchan);
295 device_printf(xdma->dev,
296 "%s: Can't allocate bufs.\n", __func__);
299 xchan_sglist_free(xchan);
300 xchan_bank_free(xchan);
306 xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_SG);
309 ret = XDMA_CHANNEL_PREP_SG(xdma->dma_dev, xchan);
311 device_printf(xdma->dev,
312 "%s: Can't prepare SG transfer.\n", __func__);
323 xchan_seg_done(xdma_channel_t *xchan,
324 struct xdma_transfer_status *st)
326 struct xdma_request *xr;
330 xr = TAILQ_FIRST(&xchan->processing);
332 panic("request not found\n");
336 atomic_subtract_int(&b->nsegs_left, 1);
338 if (b->nsegs_left == 0) {
339 if (xchan->caps & XCHAN_CAP_BUSDMA) {
340 if (xr->direction == XDMA_MEM_TO_DEV)
341 bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
342 BUS_DMASYNC_POSTWRITE);
344 bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
345 BUS_DMASYNC_POSTREAD);
346 bus_dmamap_unload(xchan->dma_tag_bufs, b->map);
347 } else if (xchan->caps & XCHAN_CAP_BOUNCE) {
348 if (xr->req_type == XR_TYPE_MBUF &&
349 xr->direction == XDMA_DEV_TO_MEM)
350 m_copyback(xr->m, 0, st->transferred,
351 (void *)xr->buf.vaddr);
352 } else if (xchan->caps & XCHAN_CAP_IOMMU) {
353 if (xr->direction == XDMA_MEM_TO_DEV)
357 xdma_iommu_remove_entry(xchan, addr);
359 xr->status.error = st->error;
360 xr->status.transferred = st->transferred;
362 QUEUE_PROC_LOCK(xchan);
363 TAILQ_REMOVE(&xchan->processing, xr, xr_next);
364 QUEUE_PROC_UNLOCK(xchan);
366 QUEUE_OUT_LOCK(xchan);
367 TAILQ_INSERT_TAIL(&xchan->queue_out, xr, xr_next);
368 QUEUE_OUT_UNLOCK(xchan);
373 xdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
375 struct seg_load_request *slr;
376 struct bus_dma_segment *seg;
389 for (i = 0; i < nsegs; i++) {
390 seg[i].ds_addr = segs[i].ds_addr;
391 seg[i].ds_len = segs[i].ds_len;
396 _xdma_load_data_busdma(xdma_channel_t *xchan, struct xdma_request *xr,
397 struct bus_dma_segment *seg)
399 xdma_controller_t *xdma;
400 struct seg_load_request slr;
410 switch (xr->req_type) {
412 error = bus_dmamap_load_mbuf_sg(xchan->dma_tag_bufs,
413 xr->buf.map, xr->m, seg, &nsegs, BUS_DMA_NOWAIT);
419 error = bus_dmamap_load_bio(xchan->dma_tag_bufs,
420 xr->buf.map, xr->bp, xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
421 if (slr.error != 0) {
422 device_printf(xdma->dma_dev,
423 "%s: bus_dmamap_load failed, err %d\n",
424 __func__, slr.error);
430 switch (xr->direction) {
431 case XDMA_MEM_TO_DEV:
432 addr = (void *)xr->src_addr;
434 case XDMA_DEV_TO_MEM:
435 addr = (void *)xr->dst_addr;
438 device_printf(xdma->dma_dev,
439 "%s: Direction is not supported\n", __func__);
445 error = bus_dmamap_load(xchan->dma_tag_bufs, xr->buf.map,
446 addr, (xr->block_len * xr->block_num),
447 xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
448 if (slr.error != 0) {
449 device_printf(xdma->dma_dev,
450 "%s: bus_dmamap_load failed, err %d\n",
451 __func__, slr.error);
461 if (error == ENOMEM) {
463 * Out of memory. Try again later.
464 * TODO: count errors.
467 device_printf(xdma->dma_dev,
468 "%s: bus_dmamap_load failed with err %d\n",
473 if (xr->direction == XDMA_MEM_TO_DEV)
474 bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
475 BUS_DMASYNC_PREWRITE);
477 bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
478 BUS_DMASYNC_PREREAD);
484 _xdma_load_data(xdma_channel_t *xchan, struct xdma_request *xr,
485 struct bus_dma_segment *seg)
489 vm_offset_t va, addr;
495 KASSERT(xchan->caps & (XCHAN_CAP_NOSEG | XCHAN_CAP_BOUNCE),
496 ("Handling segmented data is not implemented here."));
500 switch (xr->req_type) {
502 if (xchan->caps & XCHAN_CAP_BOUNCE) {
503 if (xr->direction == XDMA_MEM_TO_DEV)
504 m_copydata(m, 0, m->m_pkthdr.len,
505 (void *)xr->buf.vaddr);
506 seg[0].ds_addr = (bus_addr_t)xr->buf.paddr;
507 } else if (xchan->caps & XCHAN_CAP_IOMMU) {
508 addr = mtod(m, bus_addr_t);
511 if (xr->direction == XDMA_MEM_TO_DEV)
514 prot = VM_PROT_WRITE;
516 xdma_iommu_add_entry(xchan, &va,
517 pa, m->m_pkthdr.len, prot);
520 * Save VA so we can unload data later
521 * after completion of this transfer.
523 if (xr->direction == XDMA_MEM_TO_DEV)
529 seg[0].ds_addr = mtod(m, bus_addr_t);
530 seg[0].ds_len = m->m_pkthdr.len;
535 panic("implement me\n");
542 xdma_load_data(xdma_channel_t *xchan,
543 struct xdma_request *xr, struct bus_dma_segment *seg)
549 if (xchan->caps & XCHAN_CAP_BUSDMA)
550 nsegs = _xdma_load_data_busdma(xchan, xr, seg);
552 nsegs = _xdma_load_data(xchan, xr, seg);
554 return (0); /* Try again later. */
556 xr->buf.nsegs = nsegs;
557 xr->buf.nsegs_left = nsegs;
563 xdma_process(xdma_channel_t *xchan,
564 struct xdma_sglist *sg)
566 struct bus_dma_segment seg[XDMA_MAX_SEG];
567 struct xdma_request *xr;
568 struct xdma_request *xr_tmp;
569 xdma_controller_t *xdma;
576 XCHAN_ASSERT_LOCKED(xchan);
583 ret = XDMA_CHANNEL_CAPACITY(xdma->dma_dev, xchan, &capacity);
585 device_printf(xdma->dev,
586 "%s: Can't get DMA controller capacity.\n", __func__);
590 TAILQ_FOREACH_SAFE(xr, &xchan->queue_in, xr_next, xr_tmp) {
591 switch (xr->req_type) {
593 if ((xchan->caps & XCHAN_CAP_NOSEG) ||
594 (c > xchan->maxnsegs))
595 c = xdma_mbuf_defrag(xchan, xr);
603 if (capacity <= (c + n)) {
605 * No space yet available for the entire
606 * request in the DMA engine.
611 if ((c + n + xchan->maxnsegs) >= XDMA_SGLIST_MAXLEN) {
612 /* Sglist is full. */
616 nsegs = xdma_load_data(xchan, xr, seg);
620 xdma_sglist_add(&sg[n], seg, nsegs, xr);
623 QUEUE_IN_LOCK(xchan);
624 TAILQ_REMOVE(&xchan->queue_in, xr, xr_next);
625 QUEUE_IN_UNLOCK(xchan);
627 QUEUE_PROC_LOCK(xchan);
628 TAILQ_INSERT_TAIL(&xchan->processing, xr, xr_next);
629 QUEUE_PROC_UNLOCK(xchan);
636 xdma_queue_submit_sg(xdma_channel_t *xchan)
638 struct xdma_sglist *sg;
639 xdma_controller_t *xdma;
644 KASSERT(xdma != NULL, ("xdma is NULL"));
646 XCHAN_ASSERT_LOCKED(xchan);
650 if ((xchan->caps & (XCHAN_CAP_BOUNCE | XCHAN_CAP_BUSDMA)) &&
651 (xchan->flags & XCHAN_BUFS_ALLOCATED) == 0) {
652 device_printf(xdma->dev,
653 "%s: Can't submit a transfer: no bufs\n",
658 sg_n = xdma_process(xchan, sg);
660 return (0); /* Nothing to submit */
662 /* Now submit sglist to DMA engine driver. */
663 ret = XDMA_CHANNEL_SUBMIT_SG(xdma->dma_dev, xchan, sg, sg_n);
665 device_printf(xdma->dev,
666 "%s: Can't submit an sglist.\n", __func__);