2 * Copyright (c) 2016 Ruslan Bukin <br@bsdpad.com>
5 * This software was developed by SRI International and the University of
6 * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
7 * ("CTSRD"), as part of the DARPA CRASH research programme.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 /* Ingenic JZ4780 PDMA Controller. */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include "opt_platform.h"
37 #include <sys/param.h>
38 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/module.h>
44 #include <sys/mutex.h>
45 #include <sys/resource.h>
48 #include <machine/bus.h>
49 #include <machine/cache.h>
52 #include <dev/fdt/fdt_common.h>
53 #include <dev/ofw/ofw_bus.h>
54 #include <dev/ofw/ofw_bus_subr.h>
57 #include <dev/xdma/xdma.h>
59 #include <mips/ingenic/jz4780_common.h>
60 #include <mips/ingenic/jz4780_pdma.h>
66 struct resource *res[2];
68 bus_space_handle_t bsh;
72 struct pdma_fdt_data {
79 xdma_channel_t *xchan;
80 struct pdma_fdt_data data;
85 #define CHAN_DESCR_RELINK (1 << 0)
88 #define PDMA_NCHANNELS 32
89 struct pdma_channel pdma_channels[PDMA_NCHANNELS];
91 static struct resource_spec pdma_spec[] = {
92 { SYS_RES_MEMORY, 0, RF_ACTIVE },
93 { SYS_RES_IRQ, 0, RF_ACTIVE },
97 static int pdma_probe(device_t dev);
98 static int pdma_attach(device_t dev);
99 static int pdma_detach(device_t dev);
100 static int chan_start(struct pdma_softc *sc, struct pdma_channel *chan);
105 struct pdma_channel *chan;
106 struct pdma_softc *sc;
107 xdma_channel_t *xchan;
114 pending = READ4(sc, PDMA_DIRQP);
116 /* Ack all the channels. */
117 WRITE4(sc, PDMA_DIRQP, 0);
119 for (i = 0; i < PDMA_NCHANNELS; i++) {
120 if (pending & (1 << i)) {
121 chan = &pdma_channels[i];
125 /* TODO: check for AR, HLT error bits here. */
127 /* Disable channel */
128 WRITE4(sc, PDMA_DCS(chan->index), 0);
130 if (chan->flags & CHAN_DESCR_RELINK) {
132 chan->cur_desc = (chan->cur_desc + 1) % \
134 chan_start(sc, chan);
137 xdma_callback(chan->xchan);
143 pdma_probe(device_t dev)
146 if (!ofw_bus_status_okay(dev))
149 if (!ofw_bus_is_compatible(dev, "ingenic,jz4780-dma"))
152 device_set_desc(dev, "Ingenic JZ4780 PDMA Controller");
154 return (BUS_PROBE_DEFAULT);
158 pdma_attach(device_t dev)
160 struct pdma_softc *sc;
161 phandle_t xref, node;
165 sc = device_get_softc(dev);
168 if (bus_alloc_resources(dev, pdma_spec, sc->res)) {
169 device_printf(dev, "could not allocate resources for device\n");
173 /* Memory interface */
174 sc->bst = rman_get_bustag(sc->res[0]);
175 sc->bsh = rman_get_bushandle(sc->res[0]);
177 /* Setup interrupt handler */
178 err = bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE,
179 NULL, pdma_intr, sc, &sc->ih);
181 device_printf(dev, "Unable to alloc interrupt resource.\n");
185 node = ofw_bus_get_node(dev);
186 xref = OF_xref_from_node(node);
187 OF_device_register_xref(xref, dev);
189 reg = READ4(sc, PDMA_DMAC);
190 reg &= ~(DMAC_HLT | DMAC_AR);
192 WRITE4(sc, PDMA_DMAC, reg);
194 WRITE4(sc, PDMA_DMACP, 0);
200 pdma_detach(device_t dev)
202 struct pdma_softc *sc;
204 sc = device_get_softc(dev);
206 bus_release_resources(dev, pdma_spec, sc->res);
212 chan_start(struct pdma_softc *sc, struct pdma_channel *chan)
214 struct xdma_channel *xchan;
218 /* 8 byte descriptor. */
219 WRITE4(sc, PDMA_DCS(chan->index), DCS_DES8);
220 WRITE4(sc, PDMA_DDA(chan->index), xchan->descs_phys[chan->cur_desc].ds_addr);
221 WRITE4(sc, PDMA_DDS, (1 << chan->index));
223 /* Channel transfer enable. */
224 WRITE4(sc, PDMA_DCS(chan->index), (DCS_DES8 | DCS_CTE));
230 chan_stop(struct pdma_softc *sc, struct pdma_channel *chan)
234 WRITE4(sc, PDMA_DCS(chan->index), 0);
239 if ((READ4(sc, PDMA_DCS(chan->index)) & DCS_CTE) == 0) {
245 device_printf(sc->dev, "%s: Can't stop channel %d\n",
246 __func__, chan->index);
253 pdma_channel_alloc(device_t dev, struct xdma_channel *xchan)
255 struct pdma_channel *chan;
256 struct pdma_softc *sc;
259 sc = device_get_softc(dev);
261 xdma_assert_locked();
263 for (i = 0; i < PDMA_NCHANNELS; i++) {
264 chan = &pdma_channels[i];
265 if (chan->used == 0) {
267 xchan->chan = (void *)chan;
279 pdma_channel_free(device_t dev, struct xdma_channel *xchan)
281 struct pdma_channel *chan;
282 struct pdma_softc *sc;
284 sc = device_get_softc(dev);
286 xdma_assert_locked();
288 chan = (struct pdma_channel *)xchan->chan;
295 pdma_channel_prep_memcpy(device_t dev, struct xdma_channel *xchan)
297 struct pdma_channel *chan;
298 struct pdma_hwdesc *desc;
299 struct pdma_softc *sc;
303 sc = device_get_softc(dev);
305 chan = (struct pdma_channel *)xchan->chan;
306 /* Ensure we are not in operation */
309 ret = xdma_desc_alloc(xchan, sizeof(struct pdma_hwdesc), 8);
311 device_printf(sc->dev,
312 "%s: Can't allocate descriptors.\n", __func__);
317 desc = (struct pdma_hwdesc *)xchan->descs;
318 desc[0].dsa = conf->src_addr;
319 desc[0].dta = conf->dst_addr;
320 desc[0].drt = DRT_AUTO;
321 desc[0].dcm = DCM_SAI | DCM_DAI;
323 /* 4 byte copy for now. */
324 desc[0].dtc = (conf->block_len / 4);
325 desc[0].dcm |= DCM_SP_4 | DCM_DP_4 | DCM_TSZ_4;
326 desc[0].dcm |= DCM_TIE;
332 access_width(xdma_config_t *conf, uint32_t *dcm, uint32_t *max_width)
336 *max_width = max(conf->src_width, conf->dst_width);
338 switch (conf->src_width) {
352 switch (conf->dst_width) {
366 switch (*max_width) {
384 pdma_channel_prep_cyclic(device_t dev, struct xdma_channel *xchan)
386 struct pdma_fdt_data *data;
387 struct pdma_channel *chan;
388 struct pdma_hwdesc *desc;
389 xdma_controller_t *xdma;
390 struct pdma_softc *sc;
398 sc = device_get_softc(dev);
402 data = (struct pdma_fdt_data *)xdma->data;
404 ret = xdma_desc_alloc(xchan, sizeof(struct pdma_hwdesc), 8);
406 device_printf(sc->dev,
407 "%s: Can't allocate descriptors.\n", __func__);
411 chan = (struct pdma_channel *)xchan->chan;
412 /* Ensure we are not in operation */
414 chan->flags = CHAN_DESCR_RELINK;
417 desc = (struct pdma_hwdesc *)xchan->descs;
419 for (i = 0; i < conf->block_num; i++) {
420 if (conf->direction == XDMA_MEM_TO_DEV) {
421 desc[i].dsa = conf->src_addr + (i * conf->block_len);
422 desc[i].dta = conf->dst_addr;
423 desc[i].drt = data->tx;
424 desc[i].dcm = DCM_SAI;
425 } else if (conf->direction == XDMA_DEV_TO_MEM) {
426 desc[i].dsa = conf->src_addr;
427 desc[i].dta = conf->dst_addr + (i * conf->block_len);
428 desc[i].drt = data->rx;
429 desc[i].dcm = DCM_DAI;
430 } else if (conf->direction == XDMA_MEM_TO_MEM) {
431 desc[i].dsa = conf->src_addr + (i * conf->block_len);
432 desc[i].dta = conf->dst_addr + (i * conf->block_len);
433 desc[i].drt = DRT_AUTO;
434 desc[i].dcm = DCM_SAI | DCM_DAI;
437 if (access_width(conf, &dcm, &max_width) != 0) {
439 "%s: can't configure access width\n", __func__);
443 desc[i].dcm |= dcm | DCM_TIE;
444 desc[i].dtc = (conf->block_len / max_width);
447 * PDMA does not provide interrupt after processing each descriptor,
448 * but after processing all the chain only.
449 * As a workaround we do unlink descriptors here, so our chain will
450 * consists of single descriptor only. And then we reconfigure channel
451 * on each interrupt again.
453 if ((chan->flags & CHAN_DESCR_RELINK) == 0) {
454 if (i != (conf->block_num - 1)) {
455 desc[i].dcm |= DCM_LINK;
456 reg = ((i + 1) * sizeof(struct pdma_hwdesc));
457 desc[i].dtc |= (reg >> 4) << 24;
466 pdma_channel_control(device_t dev, xdma_channel_t *xchan, int cmd)
468 struct pdma_channel *chan;
469 struct pdma_softc *sc;
471 sc = device_get_softc(dev);
473 chan = (struct pdma_channel *)xchan->chan;
477 chan_start(sc, chan);
479 case XDMA_CMD_TERMINATE:
483 /* TODO: implement me */
492 pdma_ofw_md_data(device_t dev, pcell_t *cells, int ncells, void **ptr)
494 struct pdma_fdt_data *data;
500 data = malloc(sizeof(struct pdma_fdt_data), M_DEVBUF, (M_WAITOK | M_ZERO));
502 device_printf(dev, "%s: Cant allocate memory\n", __func__);
508 data->chan = cells[2];
516 static device_method_t pdma_methods[] = {
517 /* Device interface */
518 DEVMETHOD(device_probe, pdma_probe),
519 DEVMETHOD(device_attach, pdma_attach),
520 DEVMETHOD(device_detach, pdma_detach),
523 DEVMETHOD(xdma_channel_alloc, pdma_channel_alloc),
524 DEVMETHOD(xdma_channel_free, pdma_channel_free),
525 DEVMETHOD(xdma_channel_prep_cyclic, pdma_channel_prep_cyclic),
526 DEVMETHOD(xdma_channel_prep_memcpy, pdma_channel_prep_memcpy),
527 DEVMETHOD(xdma_channel_control, pdma_channel_control),
529 DEVMETHOD(xdma_ofw_md_data, pdma_ofw_md_data),
535 static driver_t pdma_driver = {
538 sizeof(struct pdma_softc),
541 static devclass_t pdma_devclass;
543 EARLY_DRIVER_MODULE(pdma, simplebus, pdma_driver, pdma_devclass, 0, 0,
544 BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE);