2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2016-2019 Ruslan Bukin <br@bsdpad.com>
6 * This software was developed by SRI International and the University of
7 * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
8 * ("CTSRD"), as part of the DARPA CRASH research programme.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include "opt_platform.h"
36 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/queue.h>
42 #include <sys/malloc.h>
43 #include <sys/limits.h>
45 #include <sys/mutex.h>
46 #include <sys/sysctl.h>
47 #include <sys/systm.h>
49 #include <machine/bus.h>
52 #include <dev/fdt/fdt_common.h>
53 #include <dev/ofw/ofw_bus.h>
54 #include <dev/ofw/ofw_bus_subr.h>
57 #include <dev/xdma/xdma.h>
62 * Multiple xDMA controllers may work with single DMA device,
63 * so we have global lock for physical channel management.
65 static struct mtx xdma_mtx;
67 #define XDMA_LOCK() mtx_lock(&xdma_mtx)
68 #define XDMA_UNLOCK() mtx_unlock(&xdma_mtx)
69 #define XDMA_ASSERT_LOCKED() mtx_assert(&xdma_mtx, MA_OWNED)
71 #define FDT_REG_CELLS 4
75 xdma_get_iommu_fdt(xdma_controller_t *xdma, xdma_channel_t *xchan)
77 struct xdma_iommu *xio;
82 node = ofw_bus_get_node(xdma->dma_dev);
83 if (OF_getproplen(node, "xdma,iommu") <= 0)
86 len = OF_getencprop(node, "xdma,iommu", &prop, sizeof(prop));
87 if (len != sizeof(prop)) {
88 device_printf(xdma->dev,
89 "%s: Can't get iommu device node\n", __func__);
94 xio->dev = OF_device_from_xref(prop);
95 if (xio->dev == NULL) {
96 device_printf(xdma->dev,
97 "%s: Can't get iommu device\n", __func__);
107 * Allocate virtual xDMA channel.
110 xdma_channel_alloc(xdma_controller_t *xdma, uint32_t caps)
112 xdma_channel_t *xchan;
115 xchan = malloc(sizeof(xdma_channel_t), M_XDMA, M_WAITOK | M_ZERO);
119 /* Check if this DMA controller supports IOMMU. */
120 if (xdma_get_iommu_fdt(xdma, xchan))
121 caps |= XCHAN_CAP_IOMMU | XCHAN_CAP_NOSEG;
128 /* Request a real channel from hardware driver. */
129 ret = XDMA_CHANNEL_ALLOC(xdma->dma_dev, xchan);
131 device_printf(xdma->dev,
132 "%s: Can't request hardware channel.\n", __func__);
139 TAILQ_INIT(&xchan->ie_handlers);
141 mtx_init(&xchan->mtx_lock, "xDMA chan", NULL, MTX_DEF);
142 mtx_init(&xchan->mtx_qin_lock, "xDMA qin", NULL, MTX_DEF);
143 mtx_init(&xchan->mtx_qout_lock, "xDMA qout", NULL, MTX_DEF);
144 mtx_init(&xchan->mtx_bank_lock, "xDMA bank", NULL, MTX_DEF);
145 mtx_init(&xchan->mtx_proc_lock, "xDMA proc", NULL, MTX_DEF);
147 TAILQ_INIT(&xchan->bank);
148 TAILQ_INIT(&xchan->queue_in);
149 TAILQ_INIT(&xchan->queue_out);
150 TAILQ_INIT(&xchan->processing);
152 if (xchan->caps & XCHAN_CAP_IOMMU)
153 xdma_iommu_init(&xchan->xio);
155 TAILQ_INSERT_TAIL(&xdma->channels, xchan, xchan_next);
163 xdma_channel_free(xdma_channel_t *xchan)
165 xdma_controller_t *xdma;
169 KASSERT(xdma != NULL, ("xdma is NULL"));
173 /* Free the real DMA channel. */
174 err = XDMA_CHANNEL_FREE(xdma->dma_dev, xchan);
176 device_printf(xdma->dev,
177 "%s: Can't free real hw channel.\n", __func__);
182 if (xchan->flags & XCHAN_TYPE_SG)
183 xdma_channel_free_sg(xchan);
185 if (xchan->caps & XCHAN_CAP_IOMMU)
186 xdma_iommu_release(&xchan->xio);
188 xdma_teardown_all_intr(xchan);
190 mtx_destroy(&xchan->mtx_lock);
191 mtx_destroy(&xchan->mtx_qin_lock);
192 mtx_destroy(&xchan->mtx_qout_lock);
193 mtx_destroy(&xchan->mtx_bank_lock);
194 mtx_destroy(&xchan->mtx_proc_lock);
196 TAILQ_REMOVE(&xdma->channels, xchan, xchan_next);
206 xdma_setup_intr(xdma_channel_t *xchan,
207 int (*cb)(void *, xdma_transfer_status_t *),
208 void *arg, void **ihandler)
210 struct xdma_intr_handler *ih;
211 xdma_controller_t *xdma;
214 KASSERT(xdma != NULL, ("xdma is NULL"));
218 device_printf(xdma->dev,
219 "%s: Can't setup interrupt handler.\n",
225 ih = malloc(sizeof(struct xdma_intr_handler),
226 M_XDMA, M_WAITOK | M_ZERO);
231 TAILQ_INSERT_TAIL(&xchan->ie_handlers, ih, ih_next);
234 if (ihandler != NULL)
241 xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih)
243 xdma_controller_t *xdma;
246 KASSERT(xdma != NULL, ("xdma is NULL"));
250 device_printf(xdma->dev,
251 "%s: Can't teardown interrupt.\n", __func__);
255 TAILQ_REMOVE(&xchan->ie_handlers, ih, ih_next);
262 xdma_teardown_all_intr(xdma_channel_t *xchan)
264 struct xdma_intr_handler *ih_tmp;
265 struct xdma_intr_handler *ih;
266 xdma_controller_t *xdma;
269 KASSERT(xdma != NULL, ("xdma is NULL"));
271 TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp) {
272 TAILQ_REMOVE(&xchan->ie_handlers, ih, ih_next);
280 xdma_request(xdma_channel_t *xchan, struct xdma_request *req)
282 xdma_controller_t *xdma;
287 KASSERT(xdma != NULL, ("xdma is NULL"));
290 ret = XDMA_CHANNEL_REQUEST(xdma->dma_dev, xchan, req);
292 device_printf(xdma->dev,
293 "%s: Can't request a transfer.\n", __func__);
304 xdma_control(xdma_channel_t *xchan, enum xdma_command cmd)
306 xdma_controller_t *xdma;
310 KASSERT(xdma != NULL, ("xdma is NULL"));
312 ret = XDMA_CHANNEL_CONTROL(xdma->dma_dev, xchan, cmd);
314 device_printf(xdma->dev,
315 "%s: Can't process command.\n", __func__);
323 xdma_callback(xdma_channel_t *xchan, xdma_transfer_status_t *status)
325 struct xdma_intr_handler *ih_tmp;
326 struct xdma_intr_handler *ih;
327 xdma_controller_t *xdma;
330 KASSERT(xdma != NULL, ("xdma is NULL"));
332 TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp)
334 ih->cb(ih->cb_user, status);
336 if (xchan->flags & XCHAN_TYPE_SG)
337 xdma_queue_submit(xchan);
342 * Notify the DMA driver we have machine-dependent data in FDT.
345 xdma_ofw_md_data(xdma_controller_t *xdma, pcell_t *cells, int ncells)
349 ret = XDMA_OFW_MD_DATA(xdma->dma_dev,
350 cells, ncells, (void **)&xdma->data);
356 xdma_handle_mem_node(vmem_t *vmem, phandle_t memory)
358 pcell_t reg[FDT_REG_CELLS * FDT_MEM_REGIONS];
360 int addr_cells, size_cells;
361 int i, reg_len, ret, tuple_size, tuples;
362 u_long mem_start, mem_size;
364 if ((ret = fdt_addrsize_cells(OF_parent(memory), &addr_cells,
371 tuple_size = sizeof(pcell_t) * (addr_cells + size_cells);
372 reg_len = OF_getproplen(memory, "reg");
373 if (reg_len <= 0 || reg_len > sizeof(reg))
376 if (OF_getprop(memory, "reg", reg, reg_len) <= 0)
379 tuples = reg_len / tuple_size;
380 regp = (pcell_t *)®
381 for (i = 0; i < tuples; i++) {
382 ret = fdt_data_to_res(regp, addr_cells, size_cells,
383 &mem_start, &mem_size);
387 vmem_add(vmem, mem_start, mem_size, 0);
388 regp += addr_cells + size_cells;
395 xdma_get_memory(device_t dev)
397 phandle_t mem_node, node;
401 node = ofw_bus_get_node(dev);
404 "%s called on not ofw based device.\n", __func__);
408 if (!OF_hasprop(node, "memory-region"))
411 if (OF_getencprop(node, "memory-region", (void *)&mem_handle,
412 sizeof(mem_handle)) <= 0)
415 vmem = vmem_create("xDMA vmem", 0, 0, PAGE_SIZE,
416 PAGE_SIZE, M_BESTFIT | M_WAITOK);
420 mem_node = OF_node_from_xref(mem_handle);
421 if (xdma_handle_mem_node(vmem, mem_node) != 0) {
430 xdma_put_memory(vmem_t *vmem)
437 xchan_set_memory(xdma_channel_t *xchan, vmem_t *vmem)
444 * Allocate xdma controller.
447 xdma_ofw_get(device_t dev, const char *prop)
449 phandle_t node, parent;
450 xdma_controller_t *xdma;
458 node = ofw_bus_get_node(dev);
461 "%s called on not ofw based device.\n", __func__);
463 error = ofw_bus_parse_xref_list_get_length(node,
464 "dmas", "#dma-cells", &ndmas);
467 "%s can't get dmas list.\n", __func__);
473 "%s dmas list is empty.\n", __func__);
477 error = ofw_bus_find_string_index(node, "dma-names", prop, &idx);
480 "%s can't find string index.\n", __func__);
484 error = ofw_bus_parse_xref_list_alloc(node, "dmas", "#dma-cells",
485 idx, &parent, &ncells, &cells);
488 "%s can't get dma device xref.\n", __func__);
492 dma_dev = OF_device_from_xref(parent);
493 if (dma_dev == NULL) {
495 "%s can't get dma device.\n", __func__);
499 xdma = malloc(sizeof(struct xdma_controller),
500 M_XDMA, M_WAITOK | M_ZERO);
502 xdma->dma_dev = dma_dev;
504 TAILQ_INIT(&xdma->channels);
506 xdma_ofw_md_data(xdma, cells, ncells);
507 free(cells, M_OFWPROP);
514 * Free xDMA controller object.
517 xdma_put(xdma_controller_t *xdma)
522 /* Ensure no channels allocated. */
523 if (!TAILQ_EMPTY(&xdma->channels)) {
524 device_printf(xdma->dev, "%s: Can't free xDMA\n", __func__);
528 free(xdma->data, M_DEVBUF);
540 mtx_init(&xdma_mtx, "xDMA", NULL, MTX_DEF);
543 SYSINIT(xdma, SI_SUB_DRIVERS, SI_ORDER_FIRST, xdma_init, NULL);