2 * Copyright (c) 2016 Ruslan Bukin <br@bsdpad.com>
5 * This software was developed by SRI International and the University of
6 * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
7 * ("CTSRD"), as part of the DARPA CRASH research programme.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include "opt_platform.h"
35 #include <sys/param.h>
38 #include <sys/kernel.h>
39 #include <sys/queue.h>
41 #include <sys/malloc.h>
42 #include <sys/mutex.h>
43 #include <sys/limits.h>
45 #include <sys/sysctl.h>
46 #include <sys/systm.h>
48 #include <sys/bus_dma.h>
50 #include <machine/bus.h>
53 #include <dev/fdt/fdt_common.h>
54 #include <dev/ofw/ofw_bus.h>
55 #include <dev/ofw/ofw_bus_subr.h>
58 #include <dev/xdma/xdma.h>
62 MALLOC_DEFINE(M_XDMA, "xdma", "xDMA framework");
65 * Multiple xDMA controllers may work with single DMA device,
66 * so we have global lock for physical channel management.
68 static struct mtx xdma_mtx;
69 #define XDMA_LOCK() mtx_lock(&xdma_mtx)
70 #define XDMA_UNLOCK() mtx_unlock(&xdma_mtx)
71 #define XDMA_ASSERT_LOCKED() mtx_assert(&xdma_mtx, MA_OWNED)
76 #define XCHAN_LOCK(xchan) mtx_lock(&(xchan)->mtx_lock)
77 #define XCHAN_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_lock)
78 #define XCHAN_ASSERT_LOCKED(xchan) mtx_assert(&(xchan)->mtx_lock, MA_OWNED)
81 * Allocate virtual xDMA channel.
84 xdma_channel_alloc(xdma_controller_t *xdma)
86 xdma_channel_t *xchan;
89 xchan = malloc(sizeof(xdma_channel_t), M_XDMA, M_WAITOK | M_ZERO);
91 device_printf(xdma->dev,
92 "%s: Can't allocate memory for channel.\n", __func__);
99 /* Request a real channel from hardware driver. */
100 ret = XDMA_CHANNEL_ALLOC(xdma->dma_dev, xchan);
102 device_printf(xdma->dev,
103 "%s: Can't request hardware channel.\n", __func__);
110 TAILQ_INIT(&xchan->ie_handlers);
111 mtx_init(&xchan->mtx_lock, "xDMA", NULL, MTX_DEF);
113 TAILQ_INSERT_TAIL(&xdma->channels, xchan, xchan_next);
121 xdma_channel_free(xdma_channel_t *xchan)
123 xdma_controller_t *xdma;
130 /* Free the real DMA channel. */
131 err = XDMA_CHANNEL_FREE(xdma->dma_dev, xchan);
133 device_printf(xdma->dev,
134 "%s: Can't free real hw channel.\n", __func__);
139 xdma_teardown_all_intr(xchan);
141 /* Deallocate descriptors, if any. */
142 xdma_desc_free(xchan);
144 mtx_destroy(&xchan->mtx_lock);
146 TAILQ_REMOVE(&xdma->channels, xchan, xchan_next);
156 xdma_setup_intr(xdma_channel_t *xchan, int (*cb)(void *), void *arg,
159 struct xdma_intr_handler *ih;
160 xdma_controller_t *xdma;
163 KASSERT(xdma != NULL, ("xdma is NULL"));
167 device_printf(xdma->dev,
168 "%s: Can't setup interrupt handler.\n",
174 ih = malloc(sizeof(struct xdma_intr_handler),
175 M_XDMA, M_WAITOK | M_ZERO);
177 device_printf(xdma->dev,
178 "%s: Can't allocate memory for interrupt handler.\n",
187 TAILQ_INSERT_TAIL(&xchan->ie_handlers, ih, ih_next);
189 if (ihandler != NULL) {
197 xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih)
199 xdma_controller_t *xdma;
202 KASSERT(xdma != NULL, ("xdma is NULL"));
206 device_printf(xdma->dev,
207 "%s: Can't teardown interrupt.\n", __func__);
211 TAILQ_REMOVE(&xchan->ie_handlers, ih, ih_next);
218 xdma_teardown_all_intr(xdma_channel_t *xchan)
220 struct xdma_intr_handler *ih_tmp;
221 struct xdma_intr_handler *ih;
222 xdma_controller_t *xdma;
225 KASSERT(xdma != NULL, ("xdma is NULL"));
227 TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp) {
228 TAILQ_REMOVE(&xchan->ie_handlers, ih, ih_next);
236 xdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
238 xdma_channel_t *xchan;
241 xchan = (xdma_channel_t *)arg;
242 KASSERT(xchan != NULL, ("xchan is NULL"));
249 for (i = 0; i < nseg; i++) {
250 xchan->descs_phys[i].ds_addr = segs[i].ds_addr;
251 xchan->descs_phys[i].ds_len = segs[i].ds_len;
256 xdma_desc_alloc_bus_dma(xdma_channel_t *xchan, uint32_t desc_size,
259 xdma_controller_t *xdma;
260 bus_size_t all_desc_sz;
268 nsegments = conf->block_num;
269 all_desc_sz = (nsegments * desc_size);
271 err = bus_dma_tag_create(
272 bus_get_dma_tag(xdma->dev),
273 align, desc_size, /* alignment, boundary */
274 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
275 BUS_SPACE_MAXADDR, /* highaddr */
276 NULL, NULL, /* filter, filterarg */
277 all_desc_sz, nsegments, /* maxsize, nsegments*/
278 desc_size, 0, /* maxsegsize, flags */
279 NULL, NULL, /* lockfunc, lockarg */
282 device_printf(xdma->dev,
283 "%s: Can't create bus_dma tag.\n", __func__);
287 err = bus_dmamem_alloc(xchan->dma_tag, (void **)&xchan->descs,
288 BUS_DMA_WAITOK | BUS_DMA_COHERENT, &xchan->dma_map);
290 device_printf(xdma->dev,
291 "%s: Can't allocate memory for descriptors.\n", __func__);
295 xchan->descs_phys = malloc(nsegments * sizeof(xdma_descriptor_t), M_XDMA,
296 (M_WAITOK | M_ZERO));
299 err = bus_dmamap_load(xchan->dma_tag, xchan->dma_map, xchan->descs,
300 all_desc_sz, xdma_dmamap_cb, xchan, BUS_DMA_WAITOK);
302 device_printf(xdma->dev,
303 "%s: Can't load DMA map.\n", __func__);
307 if (xchan->map_err != 0) {
308 device_printf(xdma->dev,
309 "%s: Can't load DMA map.\n", __func__);
317 * This function called by DMA controller driver.
320 xdma_desc_alloc(xdma_channel_t *xchan, uint32_t desc_size, uint32_t align)
322 xdma_controller_t *xdma;
326 XCHAN_ASSERT_LOCKED(xchan);
330 device_printf(xdma->dev,
331 "%s: Channel was not allocated properly.\n", __func__);
335 if (xchan->flags & XCHAN_DESC_ALLOCATED) {
336 device_printf(xdma->dev,
337 "%s: Descriptors already allocated.\n", __func__);
341 if ((xchan->flags & XCHAN_CONFIGURED) == 0) {
342 device_printf(xdma->dev,
343 "%s: Channel has no configuration.\n", __func__);
350 ret = xdma_desc_alloc_bus_dma(xchan, desc_size, align);
353 device_printf(xdma->dev,
354 "%s: Can't allocate memory for descriptors.\n",
359 xchan->flags |= XCHAN_DESC_ALLOCATED;
361 /* We are going to write to descriptors. */
362 bus_dmamap_sync(xchan->dma_tag, xchan->dma_map, BUS_DMASYNC_PREWRITE);
368 xdma_desc_free(xdma_channel_t *xchan)
371 if ((xchan->flags & XCHAN_DESC_ALLOCATED) == 0) {
372 /* No descriptors allocated. */
376 bus_dmamap_unload(xchan->dma_tag, xchan->dma_map);
377 bus_dmamem_free(xchan->dma_tag, xchan->descs, xchan->dma_map);
378 bus_dma_tag_destroy(xchan->dma_tag);
379 free(xchan->descs_phys, M_XDMA);
381 xchan->flags &= ~(XCHAN_DESC_ALLOCATED);
387 xdma_prep_memcpy(xdma_channel_t *xchan, uintptr_t src_addr,
388 uintptr_t dst_addr, size_t len)
390 xdma_controller_t *xdma;
395 KASSERT(xdma != NULL, ("xdma is NULL"));
398 conf->direction = XDMA_MEM_TO_MEM;
399 conf->src_addr = src_addr;
400 conf->dst_addr = dst_addr;
401 conf->block_len = len;
404 xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_MEMCPY);
408 /* Deallocate old descriptors, if any. */
409 xdma_desc_free(xchan);
411 ret = XDMA_CHANNEL_PREP_MEMCPY(xdma->dma_dev, xchan);
413 device_printf(xdma->dev,
414 "%s: Can't prepare memcpy transfer.\n", __func__);
420 if (xchan->flags & XCHAN_DESC_ALLOCATED) {
421 /* Driver created xDMA descriptors. */
422 bus_dmamap_sync(xchan->dma_tag, xchan->dma_map,
423 BUS_DMASYNC_POSTWRITE);
432 xdma_prep_cyclic(xdma_channel_t *xchan, enum xdma_direction dir,
433 uintptr_t src_addr, uintptr_t dst_addr, int block_len,
434 int block_num, int src_width, int dst_width)
436 xdma_controller_t *xdma;
441 KASSERT(xdma != NULL, ("xdma is NULL"));
444 conf->direction = dir;
445 conf->src_addr = src_addr;
446 conf->dst_addr = dst_addr;
447 conf->block_len = block_len;
448 conf->block_num = block_num;
449 conf->src_width = src_width;
450 conf->dst_width = dst_width;
452 xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_CYCLIC);
456 /* Deallocate old descriptors, if any. */
457 xdma_desc_free(xchan);
459 ret = XDMA_CHANNEL_PREP_CYCLIC(xdma->dma_dev, xchan);
461 device_printf(xdma->dev,
462 "%s: Can't prepare cyclic transfer.\n", __func__);
468 if (xchan->flags & XCHAN_DESC_ALLOCATED) {
469 /* Driver has created xDMA descriptors. */
470 bus_dmamap_sync(xchan->dma_tag, xchan->dma_map,
471 BUS_DMASYNC_POSTWRITE);
480 xdma_begin(xdma_channel_t *xchan)
482 xdma_controller_t *xdma;
487 ret = XDMA_CHANNEL_CONTROL(xdma->dma_dev, xchan, XDMA_CMD_BEGIN);
489 device_printf(xdma->dev,
490 "%s: Can't begin the channel operation.\n", __func__);
498 xdma_terminate(xdma_channel_t *xchan)
500 xdma_controller_t *xdma;
505 ret = XDMA_CHANNEL_CONTROL(xdma->dma_dev, xchan, XDMA_CMD_TERMINATE);
507 device_printf(xdma->dev,
508 "%s: Can't terminate the channel operation.\n", __func__);
516 xdma_pause(xdma_channel_t *xchan)
518 xdma_controller_t *xdma;
523 ret = XDMA_CHANNEL_CONTROL(xdma->dma_dev, xchan, XDMA_CMD_PAUSE);
525 device_printf(xdma->dev,
526 "%s: Can't pause the channel operation.\n", __func__);
534 xdma_callback(xdma_channel_t *xchan)
536 struct xdma_intr_handler *ih_tmp;
537 struct xdma_intr_handler *ih;
539 TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp) {
540 if (ih->cb != NULL) {
549 xdma_assert_locked(void)
552 XDMA_ASSERT_LOCKED();
557 * Notify the DMA driver we have machine-dependent data in FDT.
560 xdma_ofw_md_data(xdma_controller_t *xdma, pcell_t *cells, int ncells)
564 ret = XDMA_OFW_MD_DATA(xdma->dma_dev, cells, ncells, (void **)&xdma->data);
570 * Allocate xdma controller.
573 xdma_ofw_get(device_t dev, const char *prop)
575 phandle_t node, parent;
576 xdma_controller_t *xdma;
584 node = ofw_bus_get_node(dev);
587 "%s called on not ofw based device.\n", __func__);
590 error = ofw_bus_parse_xref_list_get_length(node,
591 "dmas", "#dma-cells", &ndmas);
594 "%s can't get dmas list.\n", __func__);
600 "%s dmas list is empty.\n", __func__);
604 error = ofw_bus_find_string_index(node, "dma-names", prop, &idx);
607 "%s can't find string index.\n", __func__);
611 error = ofw_bus_parse_xref_list_alloc(node, "dmas", "#dma-cells",
612 idx, &parent, &ncells, &cells);
615 "%s can't get dma device xref.\n", __func__);
619 dma_dev = OF_device_from_xref(parent);
620 if (dma_dev == NULL) {
622 "%s can't get dma device.\n", __func__);
626 xdma = malloc(sizeof(struct xdma_controller), M_XDMA, M_WAITOK | M_ZERO);
629 "%s can't allocate memory for xdma.\n", __func__);
633 xdma->dma_dev = dma_dev;
635 TAILQ_INIT(&xdma->channels);
637 xdma_ofw_md_data(xdma, cells, ncells);
638 free(cells, M_OFWPROP);
645 * Free xDMA controller object.
648 xdma_put(xdma_controller_t *xdma)
653 /* Ensure no channels allocated. */
654 if (!TAILQ_EMPTY(&xdma->channels)) {
655 device_printf(xdma->dev, "%s: Can't free xDMA\n", __func__);
659 free(xdma->data, M_DEVBUF);
671 mtx_init(&xdma_mtx, "xDMA", NULL, MTX_DEF);
674 SYSINIT(xdma, SI_SUB_DRIVERS, SI_ORDER_FIRST, xdma_init, NULL);