2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
5 * Ben Gray <ben.r.gray@gmail.com>.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
36 #include <sys/kernel.h>
38 #include <sys/interrupt.h>
39 #include <sys/module.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
43 #include <sys/queue.h>
44 #include <sys/taskqueue.h>
45 #include <sys/timetc.h>
46 #include <machine/bus.h>
47 #include <machine/intr.h>
49 #include <dev/ofw/openfirm.h>
50 #include <dev/ofw/ofw_bus.h>
51 #include <dev/ofw/ofw_bus_subr.h>
53 #include <arm/ti/ti_cpuid.h>
54 #include <arm/ti/ti_prcm.h>
55 #include <arm/ti/ti_sdma.h>
56 #include <arm/ti/ti_sdmareg.h>
59 * Kernel functions for using the DMA controller
63 * A DMA transfer block consists of a number of frames (FN). Each frame
64 * consists of a number of elements, and each element can have a size of 8, 16,
67 * OMAP44xx and newer chips support linked list (aka scatter gather) transfers,
68 * where a linked list of source/destination pairs can be placed in memory
69 * for the H/W to process. Earlier chips only allowed you to chain multiple
70 * channels together. However currently this linked list feature is not
71 * supported by the driver.
76 * Data structure per DMA channel.
80 struct ti_sdma_channel {
83 * The configuration registers for the given channel, these are modified
84 * by the set functions and only written to the actual registers when a
85 * transaction is started.
91 /* Set when one of the configuration registers above change */
92 uint32_t need_reg_write;
94 /* Callback function used when an interrupt is tripped on the given channel */
95 void (*callback)(unsigned int ch, uint32_t ch_status, void *data);
97 /* Callback data passed in the callback ... duh */
103 * DMA driver context, allocated and stored globally, this driver is not
104 * intetned to ever be unloaded (see ti_sdma_sc).
107 struct ti_sdma_softc {
109 struct resource* sc_irq_res;
110 struct resource* sc_mem_res;
113 * I guess in theory we should have a mutex per DMA channel for register
114 * modifications. But since we know we are never going to be run on a SMP
115 * system, we can use just the single lock for all channels.
119 /* Stores the H/W revision read from the registers */
123 * Bits in the sc_active_channels data field indicate if the channel has
126 uint32_t sc_active_channels;
128 struct ti_sdma_channel sc_channel[NUM_DMA_CHANNELS];
132 static struct ti_sdma_softc *ti_sdma_sc = NULL;
135 * Macros for driver mutex locking
137 #define TI_SDMA_LOCK(_sc) mtx_lock_spin(&(_sc)->sc_mtx)
138 #define TI_SDMA_UNLOCK(_sc) mtx_unlock_spin(&(_sc)->sc_mtx)
139 #define TI_SDMA_LOCK_INIT(_sc) \
140 mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \
142 #define TI_SDMA_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx);
143 #define TI_SDMA_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED);
144 #define TI_SDMA_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
147 * Function prototypes
150 static void ti_sdma_intr(void *);
153 * ti_sdma_read_4 - reads a 32-bit value from one of the DMA registers
154 * @sc: DMA device context
155 * @off: The offset of a register from the DMA register address range
159 * 32-bit value read from the register.
161 static inline uint32_t
162 ti_sdma_read_4(struct ti_sdma_softc *sc, bus_size_t off)
164 return bus_read_4(sc->sc_mem_res, off);
168 * ti_sdma_write_4 - writes a 32-bit value to one of the DMA registers
169 * @sc: DMA device context
170 * @off: The offset of a register from the DMA register address range
174 * 32-bit value read from the register.
177 ti_sdma_write_4(struct ti_sdma_softc *sc, bus_size_t off, uint32_t val)
179 bus_write_4(sc->sc_mem_res, off, val);
183 * ti_sdma_is_omap3_rev - returns true if H/W is from OMAP3 series
184 * @sc: DMA device context
188 ti_sdma_is_omap3_rev(struct ti_sdma_softc *sc)
190 return (sc->sc_hw_rev == DMA4_OMAP3_REV);
194 * ti_sdma_is_omap4_rev - returns true if H/W is from OMAP4 series
195 * @sc: DMA device context
199 ti_sdma_is_omap4_rev(struct ti_sdma_softc *sc)
201 return (sc->sc_hw_rev == DMA4_OMAP4_REV);
205 * ti_sdma_intr - interrupt handler for all 4 DMA IRQs
208 * Called when any of the four DMA IRQs are triggered.
211 * DMA registers protected by internal mutex
217 ti_sdma_intr(void *arg)
219 struct ti_sdma_softc *sc = ti_sdma_sc;
223 struct ti_sdma_channel* channel;
227 for (j = 0; j < NUM_DMA_IRQS; j++) {
229 /* Get the flag interrupts (enabled) */
230 intr = ti_sdma_read_4(sc, DMA4_IRQSTATUS_L(j));
231 intr &= ti_sdma_read_4(sc, DMA4_IRQENABLE_L(j));
232 if (intr == 0x00000000)
235 /* Loop through checking the status bits */
236 for (ch = 0; ch < NUM_DMA_CHANNELS; ch++) {
237 if (intr & (1 << ch)) {
238 channel = &sc->sc_channel[ch];
240 /* Read the CSR regsiter and verify we don't have a spurious IRQ */
241 csr = ti_sdma_read_4(sc, DMA4_CSR(ch));
243 device_printf(sc->sc_dev, "Spurious DMA IRQ for channel "
248 /* Sanity check this channel is active */
249 if ((sc->sc_active_channels & (1 << ch)) == 0) {
250 device_printf(sc->sc_dev, "IRQ %d for a non-activated "
251 "channel %d\n", j, ch);
255 /* Check the status error codes */
256 if (csr & DMA4_CSR_DROP)
257 device_printf(sc->sc_dev, "Synchronization event drop "
258 "occurred during the transfer on channel %u\n",
260 if (csr & DMA4_CSR_SECURE_ERR)
261 device_printf(sc->sc_dev, "Secure transaction error event "
262 "on channel %u\n", ch);
263 if (csr & DMA4_CSR_MISALIGNED_ADRS_ERR)
264 device_printf(sc->sc_dev, "Misaligned address error event "
265 "on channel %u\n", ch);
266 if (csr & DMA4_CSR_TRANS_ERR) {
267 device_printf(sc->sc_dev, "Transaction error event on "
270 * Apparently according to linux code, there is an errata
271 * that says the channel is not disabled upon this error.
272 * They explicitly disable the channel here .. since I
273 * haven't seen the errata, I'm going to ignore for now.
277 /* Clear the status flags for the IRQ */
278 ti_sdma_write_4(sc, DMA4_CSR(ch), DMA4_CSR_CLEAR_MASK);
279 ti_sdma_write_4(sc, DMA4_IRQSTATUS_L(j), (1 << ch));
281 /* Call the callback for the given channel */
282 if (channel->callback)
283 channel->callback(ch, csr, channel->callback_data);
294 * ti_sdma_activate_channel - activates a DMA channel
295 * @ch: upon return contains the channel allocated
296 * @callback: a callback function to associate with the channel
297 * @data: optional data supplied when the callback is called
299 * Simply activates a channel be enabling and writing default values to the
300 * channel's register set. It doesn't start a transaction, just populates the
301 * internal data structures and sets defaults.
303 * Note this function doesn't enable interrupts, for that you need to call
304 * ti_sdma_enable_channel_irq(). If not using IRQ to detect the end of the
305 * transfer, you can use ti_sdma_status_poll() to detect a change in the
308 * A channel must be activated before any of the other DMA functions can be
312 * DMA registers protected by internal mutex
315 * 0 on success, otherwise an error code
318 ti_sdma_activate_channel(unsigned int *ch,
319 void (*callback)(unsigned int ch, uint32_t status, void *data),
322 struct ti_sdma_softc *sc = ti_sdma_sc;
323 struct ti_sdma_channel *channel = NULL;
336 /* Check to see if all channels are in use */
337 if (sc->sc_active_channels == 0xffffffff) {
342 /* Find the first non-active channel */
343 for (i = 0; i < NUM_DMA_CHANNELS; i++) {
344 if (!(sc->sc_active_channels & (0x1 << i))) {
345 sc->sc_active_channels |= (0x1 << i);
351 /* Get the channel struct and populate the fields */
352 channel = &sc->sc_channel[*ch];
354 channel->callback = callback;
355 channel->callback_data = data;
357 channel->need_reg_write = 1;
359 /* Set the default configuration for the DMA channel */
360 channel->reg_csdp = DMA4_CSDP_DATA_TYPE(0x2)
361 | DMA4_CSDP_SRC_BURST_MODE(0)
362 | DMA4_CSDP_DST_BURST_MODE(0)
363 | DMA4_CSDP_SRC_ENDIANISM(0)
364 | DMA4_CSDP_DST_ENDIANISM(0)
365 | DMA4_CSDP_WRITE_MODE(0)
366 | DMA4_CSDP_SRC_PACKED(0)
367 | DMA4_CSDP_DST_PACKED(0);
369 channel->reg_ccr = DMA4_CCR_DST_ADDRESS_MODE(1)
370 | DMA4_CCR_SRC_ADDRESS_MODE(1)
371 | DMA4_CCR_READ_PRIORITY(0)
372 | DMA4_CCR_WRITE_PRIORITY(0)
373 | DMA4_CCR_SYNC_TRIGGER(0)
374 | DMA4_CCR_FRAME_SYNC(0)
375 | DMA4_CCR_BLOCK_SYNC(0);
377 channel->reg_cicr = DMA4_CICR_TRANS_ERR_IE
378 | DMA4_CICR_SECURE_ERR_IE
379 | DMA4_CICR_SUPERVISOR_ERR_IE
380 | DMA4_CICR_MISALIGNED_ADRS_ERR_IE;
382 /* Clear all the channel registers, this should abort any transaction */
383 for (addr = DMA4_CCR(*ch); addr <= DMA4_COLOR(*ch); addr += 4)
384 ti_sdma_write_4(sc, addr, 0x00000000);
392 * ti_sdma_deactivate_channel - deactivates a channel
393 * @ch: the channel to deactivate
398 * DMA registers protected by internal mutex
401 * EH_HANDLED or EH_NOT_HANDLED
404 ti_sdma_deactivate_channel(unsigned int ch)
406 struct ti_sdma_softc *sc = ti_sdma_sc;
416 /* First check if the channel is currently active */
417 if ((sc->sc_active_channels & (1 << ch)) == 0) {
422 /* Mark the channel as inactive */
423 sc->sc_active_channels &= ~(1 << ch);
425 /* Disable all DMA interrupts for the channel. */
426 ti_sdma_write_4(sc, DMA4_CICR(ch), 0);
428 /* Make sure the DMA transfer is stopped. */
429 ti_sdma_write_4(sc, DMA4_CCR(ch), 0);
431 /* Clear the CSR register and IRQ status register */
432 ti_sdma_write_4(sc, DMA4_CSR(ch), DMA4_CSR_CLEAR_MASK);
433 for (j = 0; j < NUM_DMA_IRQS; j++) {
434 ti_sdma_write_4(sc, DMA4_IRQSTATUS_L(j), (1 << ch));
437 /* Clear all the channel registers, this should abort any transaction */
438 for (addr = DMA4_CCR(ch); addr <= DMA4_COLOR(ch); addr += 4)
439 ti_sdma_write_4(sc, addr, 0x00000000);
447 * ti_sdma_disable_channel_irq - disables IRQ's on the given channel
448 * @ch: the channel to disable IRQ's on
450 * Disable interrupt generation for the given channel.
453 * DMA registers protected by internal mutex
456 * EH_HANDLED or EH_NOT_HANDLED
459 ti_sdma_disable_channel_irq(unsigned int ch)
461 struct ti_sdma_softc *sc = ti_sdma_sc;
471 if ((sc->sc_active_channels & (1 << ch)) == 0) {
476 /* Disable all the individual error conditions */
477 sc->sc_channel[ch].reg_cicr = 0x0000;
478 ti_sdma_write_4(sc, DMA4_CICR(ch), 0x0000);
480 /* Disable the channel interrupt enable */
481 for (j = 0; j < NUM_DMA_IRQS; j++) {
482 irq_enable = ti_sdma_read_4(sc, DMA4_IRQENABLE_L(j));
483 irq_enable &= ~(1 << ch);
485 ti_sdma_write_4(sc, DMA4_IRQENABLE_L(j), irq_enable);
488 /* Indicate the registers need to be rewritten on the next transaction */
489 sc->sc_channel[ch].need_reg_write = 1;
497 * ti_sdma_disable_channel_irq - enables IRQ's on the given channel
498 * @ch: the channel to enable IRQ's on
499 * @flags: bitmask of interrupt types to enable
501 * Flags can be a bitmask of the following options:
503 * DMA_IRQ_FLAG_HALF_FRAME_COMPL
504 * DMA_IRQ_FLAG_FRAME_COMPL
505 * DMA_IRQ_FLAG_START_LAST_FRAME
506 * DMA_IRQ_FLAG_BLOCK_COMPL
507 * DMA_IRQ_FLAG_ENDOF_PKT
512 * DMA registers protected by internal mutex
515 * EH_HANDLED or EH_NOT_HANDLED
518 ti_sdma_enable_channel_irq(unsigned int ch, uint32_t flags)
520 struct ti_sdma_softc *sc = ti_sdma_sc;
529 if ((sc->sc_active_channels & (1 << ch)) == 0) {
534 /* Always enable the error interrupts if we have interrupts enabled */
535 flags |= DMA4_CICR_TRANS_ERR_IE | DMA4_CICR_SECURE_ERR_IE |
536 DMA4_CICR_SUPERVISOR_ERR_IE | DMA4_CICR_MISALIGNED_ADRS_ERR_IE;
538 sc->sc_channel[ch].reg_cicr = flags;
540 /* Write the values to the register */
541 ti_sdma_write_4(sc, DMA4_CICR(ch), flags);
543 /* Enable the channel interrupt enable */
544 irq_enable = ti_sdma_read_4(sc, DMA4_IRQENABLE_L(0));
545 irq_enable |= (1 << ch);
547 ti_sdma_write_4(sc, DMA4_IRQENABLE_L(0), irq_enable);
549 /* Indicate the registers need to be rewritten on the next transaction */
550 sc->sc_channel[ch].need_reg_write = 1;
558 * ti_sdma_get_channel_status - returns the status of a given channel
559 * @ch: the channel number to get the status of
560 * @status: upon return will contain the status bitmask, see below for possible
570 * DMA_STATUS_TRANS_ERR
571 * DMA_STATUS_SECURE_ERR
572 * DMA_STATUS_SUPERVISOR_ERR
573 * DMA_STATUS_MISALIGNED_ADRS_ERR
574 * DMA_STATUS_DRAIN_END
578 * DMA registers protected by internal mutex
581 * EH_HANDLED or EH_NOT_HANDLED
584 ti_sdma_get_channel_status(unsigned int ch, uint32_t *status)
586 struct ti_sdma_softc *sc = ti_sdma_sc;
595 if ((sc->sc_active_channels & (1 << ch)) == 0) {
602 csr = ti_sdma_read_4(sc, DMA4_CSR(ch));
611 * ti_sdma_start_xfer - starts a DMA transfer
612 * @ch: the channel number to set the endianness of
613 * @src_paddr: the source phsyical address
614 * @dst_paddr: the destination phsyical address
615 * @frmcnt: the number of frames per block
616 * @elmcnt: the number of elements in a frame, an element is either an 8, 16
617 * or 32-bit value as defined by ti_sdma_set_xfer_burst()
621 * DMA registers protected by internal mutex
624 * EH_HANDLED or EH_NOT_HANDLED
627 ti_sdma_start_xfer(unsigned int ch, unsigned int src_paddr,
628 unsigned long dst_paddr,
629 unsigned int frmcnt, unsigned int elmcnt)
631 struct ti_sdma_softc *sc = ti_sdma_sc;
632 struct ti_sdma_channel *channel;
641 if ((sc->sc_active_channels & (1 << ch)) == 0) {
646 channel = &sc->sc_channel[ch];
648 /* a) Write the CSDP register */
649 ti_sdma_write_4(sc, DMA4_CSDP(ch),
650 channel->reg_csdp | DMA4_CSDP_WRITE_MODE(1));
652 /* b) Set the number of element per frame CEN[23:0] */
653 ti_sdma_write_4(sc, DMA4_CEN(ch), elmcnt);
655 /* c) Set the number of frame per block CFN[15:0] */
656 ti_sdma_write_4(sc, DMA4_CFN(ch), frmcnt);
658 /* d) Set the Source/dest start address index CSSA[31:0]/CDSA[31:0] */
659 ti_sdma_write_4(sc, DMA4_CSSA(ch), src_paddr);
660 ti_sdma_write_4(sc, DMA4_CDSA(ch), dst_paddr);
662 /* e) Write the CCR register */
663 ti_sdma_write_4(sc, DMA4_CCR(ch), channel->reg_ccr);
665 /* f) - Set the source element index increment CSEI[15:0] */
666 ti_sdma_write_4(sc, DMA4_CSE(ch), 0x0001);
668 /* - Set the source frame index increment CSFI[15:0] */
669 ti_sdma_write_4(sc, DMA4_CSF(ch), 0x0001);
671 /* - Set the destination element index increment CDEI[15:0]*/
672 ti_sdma_write_4(sc, DMA4_CDE(ch), 0x0001);
674 /* - Set the destination frame index increment CDFI[31:0] */
675 ti_sdma_write_4(sc, DMA4_CDF(ch), 0x0001);
677 /* Clear the status register */
678 ti_sdma_write_4(sc, DMA4_CSR(ch), 0x1FFE);
680 /* Write the start-bit and away we go */
681 ccr = ti_sdma_read_4(sc, DMA4_CCR(ch));
683 ti_sdma_write_4(sc, DMA4_CCR(ch), ccr);
685 /* Clear the reg write flag */
686 channel->need_reg_write = 0;
694 * ti_sdma_start_xfer_packet - starts a packet DMA transfer
695 * @ch: the channel number to use for the transfer
696 * @src_paddr: the source physical address
697 * @dst_paddr: the destination physical address
698 * @frmcnt: the number of frames to transfer
699 * @elmcnt: the number of elements in a frame, an element is either an 8, 16
700 * or 32-bit value as defined by ti_sdma_set_xfer_burst()
701 * @pktsize: the number of elements in each transfer packet
703 * The @frmcnt and @elmcnt define the overall number of bytes to transfer,
704 * typically @frmcnt is 1 and @elmcnt contains the total number of elements.
705 * @pktsize is the size of each individual packet, there might be multiple
706 * packets per transfer. i.e. for the following with element size of 32-bits
708 * frmcnt = 1, elmcnt = 512, pktsize = 128
710 * Total transfer bytes = 1 * 512 = 512 elements or 2048 bytes
711 * Packets transferred = 128 / 512 = 4
715 * DMA registers protected by internal mutex
718 * EH_HANDLED or EH_NOT_HANDLED
721 ti_sdma_start_xfer_packet(unsigned int ch, unsigned int src_paddr,
722 unsigned long dst_paddr, unsigned int frmcnt,
723 unsigned int elmcnt, unsigned int pktsize)
725 struct ti_sdma_softc *sc = ti_sdma_sc;
726 struct ti_sdma_channel *channel;
735 if ((sc->sc_active_channels & (1 << ch)) == 0) {
740 channel = &sc->sc_channel[ch];
742 /* a) Write the CSDP register */
743 if (channel->need_reg_write)
744 ti_sdma_write_4(sc, DMA4_CSDP(ch),
745 channel->reg_csdp | DMA4_CSDP_WRITE_MODE(1));
747 /* b) Set the number of elements to transfer CEN[23:0] */
748 ti_sdma_write_4(sc, DMA4_CEN(ch), elmcnt);
750 /* c) Set the number of frames to transfer CFN[15:0] */
751 ti_sdma_write_4(sc, DMA4_CFN(ch), frmcnt);
753 /* d) Set the Source/dest start address index CSSA[31:0]/CDSA[31:0] */
754 ti_sdma_write_4(sc, DMA4_CSSA(ch), src_paddr);
755 ti_sdma_write_4(sc, DMA4_CDSA(ch), dst_paddr);
757 /* e) Write the CCR register */
758 ti_sdma_write_4(sc, DMA4_CCR(ch),
759 channel->reg_ccr | DMA4_CCR_PACKET_TRANS);
761 /* f) - Set the source element index increment CSEI[15:0] */
762 ti_sdma_write_4(sc, DMA4_CSE(ch), 0x0001);
764 /* - Set the packet size, this is dependent on the sync source */
765 if (channel->reg_ccr & DMA4_CCR_SEL_SRC_DST_SYNC(1))
766 ti_sdma_write_4(sc, DMA4_CSF(ch), pktsize);
768 ti_sdma_write_4(sc, DMA4_CDF(ch), pktsize);
770 /* - Set the destination frame index increment CDFI[31:0] */
771 ti_sdma_write_4(sc, DMA4_CDE(ch), 0x0001);
773 /* Clear the status register */
774 ti_sdma_write_4(sc, DMA4_CSR(ch), 0x1FFE);
776 /* Write the start-bit and away we go */
777 ccr = ti_sdma_read_4(sc, DMA4_CCR(ch));
779 ti_sdma_write_4(sc, DMA4_CCR(ch), ccr);
781 /* Clear the reg write flag */
782 channel->need_reg_write = 0;
790 * ti_sdma_stop_xfer - stops any currently active transfers
791 * @ch: the channel number to set the endianness of
793 * This function call is effectively a NOP if no transaction is in progress.
796 * DMA registers protected by internal mutex
799 * EH_HANDLED or EH_NOT_HANDLED
802 ti_sdma_stop_xfer(unsigned int ch)
804 struct ti_sdma_softc *sc = ti_sdma_sc;
813 if ((sc->sc_active_channels & (1 << ch)) == 0) {
818 /* Disable all DMA interrupts for the channel. */
819 ti_sdma_write_4(sc, DMA4_CICR(ch), 0);
821 /* Make sure the DMA transfer is stopped. */
822 ti_sdma_write_4(sc, DMA4_CCR(ch), 0);
824 /* Clear the CSR register and IRQ status register */
825 ti_sdma_write_4(sc, DMA4_CSR(ch), DMA4_CSR_CLEAR_MASK);
826 for (j = 0; j < NUM_DMA_IRQS; j++) {
827 ti_sdma_write_4(sc, DMA4_IRQSTATUS_L(j), (1 << ch));
830 /* Configuration registers need to be re-written on the next xfer */
831 sc->sc_channel[ch].need_reg_write = 1;
839 * ti_sdma_set_xfer_endianess - sets the endianness of subsequent transfers
840 * @ch: the channel number to set the endianness of
841 * @src: the source endianness (either DMA_ENDIAN_LITTLE or DMA_ENDIAN_BIG)
842 * @dst: the destination endianness (either DMA_ENDIAN_LITTLE or DMA_ENDIAN_BIG)
846 * DMA registers protected by internal mutex
849 * EH_HANDLED or EH_NOT_HANDLED
852 ti_sdma_set_xfer_endianess(unsigned int ch, unsigned int src, unsigned int dst)
854 struct ti_sdma_softc *sc = ti_sdma_sc;
862 if ((sc->sc_active_channels & (1 << ch)) == 0) {
867 sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_SRC_ENDIANISM(1);
868 sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_SRC_ENDIANISM(src);
870 sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_DST_ENDIANISM(1);
871 sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_DST_ENDIANISM(dst);
873 sc->sc_channel[ch].need_reg_write = 1;
881 * ti_sdma_set_xfer_burst - sets the source and destination element size
882 * @ch: the channel number to set the burst settings of
883 * @src: the source endianness (either DMA_BURST_NONE, DMA_BURST_16, DMA_BURST_32
885 * @dst: the destination endianness (either DMA_BURST_NONE, DMA_BURST_16,
886 * DMA_BURST_32 or DMA_BURST_64)
888 * This function sets the size of the elements for all subsequent transfers.
891 * DMA registers protected by internal mutex
894 * EH_HANDLED or EH_NOT_HANDLED
897 ti_sdma_set_xfer_burst(unsigned int ch, unsigned int src, unsigned int dst)
899 struct ti_sdma_softc *sc = ti_sdma_sc;
907 if ((sc->sc_active_channels & (1 << ch)) == 0) {
912 sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_SRC_BURST_MODE(0x3);
913 sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_SRC_BURST_MODE(src);
915 sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_DST_BURST_MODE(0x3);
916 sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_DST_BURST_MODE(dst);
918 sc->sc_channel[ch].need_reg_write = 1;
926 * ti_sdma_set_xfer_data_type - driver attach function
927 * @ch: the channel number to set the endianness of
928 * @type: the xfer data type (either DMA_DATA_8BITS_SCALAR, DMA_DATA_16BITS_SCALAR
929 * or DMA_DATA_32BITS_SCALAR)
933 * DMA registers protected by internal mutex
936 * EH_HANDLED or EH_NOT_HANDLED
939 ti_sdma_set_xfer_data_type(unsigned int ch, unsigned int type)
941 struct ti_sdma_softc *sc = ti_sdma_sc;
949 if ((sc->sc_active_channels & (1 << ch)) == 0) {
954 sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_DATA_TYPE(0x3);
955 sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_DATA_TYPE(type);
957 sc->sc_channel[ch].need_reg_write = 1;
965 * ti_sdma_set_callback - driver attach function
966 * @dev: dma device handle
971 * DMA registers protected by internal mutex
974 * EH_HANDLED or EH_NOT_HANDLED
977 ti_sdma_set_callback(unsigned int ch,
978 void (*callback)(unsigned int ch, uint32_t status, void *data),
981 struct ti_sdma_softc *sc = ti_sdma_sc;
989 if ((sc->sc_active_channels & (1 << ch)) == 0) {
994 sc->sc_channel[ch].callback = callback;
995 sc->sc_channel[ch].callback_data = data;
997 sc->sc_channel[ch].need_reg_write = 1;
1005 * ti_sdma_sync_params - sets channel sync settings
1006 * @ch: the channel number to set the sync on
1007 * @trigger: the number of the sync trigger, this depends on what other H/W
1008 * module is triggering/receiving the DMA transactions
1009 * @mode: flags describing the sync mode to use, it may have one or more of
1010 * the following bits set; TI_SDMA_SYNC_FRAME,
1011 * TI_SDMA_SYNC_BLOCK, TI_SDMA_SYNC_TRIG_ON_SRC.
1016 * DMA registers protected by internal mutex
1019 * EH_HANDLED or EH_NOT_HANDLED
1022 ti_sdma_sync_params(unsigned int ch, unsigned int trigger, unsigned int mode)
1024 struct ti_sdma_softc *sc = ti_sdma_sc;
1033 if ((sc->sc_active_channels & (1 << ch)) == 0) {
1038 ccr = sc->sc_channel[ch].reg_ccr;
1040 ccr &= ~DMA4_CCR_SYNC_TRIGGER(0x7F);
1041 ccr |= DMA4_CCR_SYNC_TRIGGER(trigger + 1);
1043 if (mode & TI_SDMA_SYNC_FRAME)
1044 ccr |= DMA4_CCR_FRAME_SYNC(1);
1046 ccr &= ~DMA4_CCR_FRAME_SYNC(1);
1048 if (mode & TI_SDMA_SYNC_BLOCK)
1049 ccr |= DMA4_CCR_BLOCK_SYNC(1);
1051 ccr &= ~DMA4_CCR_BLOCK_SYNC(1);
1053 if (mode & TI_SDMA_SYNC_TRIG_ON_SRC)
1054 ccr |= DMA4_CCR_SEL_SRC_DST_SYNC(1);
1056 ccr &= ~DMA4_CCR_SEL_SRC_DST_SYNC(1);
1058 sc->sc_channel[ch].reg_ccr = ccr;
1060 sc->sc_channel[ch].need_reg_write = 1;
1068 * ti_sdma_set_addr_mode - driver attach function
1069 * @ch: the channel number to set the endianness of
1070 * @rd_mode: the xfer source addressing mode (either DMA_ADDR_CONSTANT,
1071 * DMA_ADDR_POST_INCREMENT, DMA_ADDR_SINGLE_INDEX or
1072 * DMA_ADDR_DOUBLE_INDEX)
1073 * @wr_mode: the xfer destination addressing mode (either DMA_ADDR_CONSTANT,
1074 * DMA_ADDR_POST_INCREMENT, DMA_ADDR_SINGLE_INDEX or
1075 * DMA_ADDR_DOUBLE_INDEX)
1079 * DMA registers protected by internal mutex
1082 * EH_HANDLED or EH_NOT_HANDLED
1085 ti_sdma_set_addr_mode(unsigned int ch, unsigned int src_mode,
1086 unsigned int dst_mode)
1088 struct ti_sdma_softc *sc = ti_sdma_sc;
1097 if ((sc->sc_active_channels & (1 << ch)) == 0) {
1102 ccr = sc->sc_channel[ch].reg_ccr;
1104 ccr &= ~DMA4_CCR_SRC_ADDRESS_MODE(0x3);
1105 ccr |= DMA4_CCR_SRC_ADDRESS_MODE(src_mode);
1107 ccr &= ~DMA4_CCR_DST_ADDRESS_MODE(0x3);
1108 ccr |= DMA4_CCR_DST_ADDRESS_MODE(dst_mode);
1110 sc->sc_channel[ch].reg_ccr = ccr;
1112 sc->sc_channel[ch].need_reg_write = 1;
1120 * ti_sdma_probe - driver probe function
1121 * @dev: dma device handle
1129 ti_sdma_probe(device_t dev)
1132 if (!ofw_bus_status_okay(dev))
1135 if (!ofw_bus_is_compatible(dev, "ti,omap4430-sdma"))
1138 device_set_desc(dev, "TI sDMA Controller");
1143 * ti_sdma_attach - driver attach function
1144 * @dev: dma device handle
1146 * Initialises memory mapping/pointers to the DMA register set and requests
1147 * IRQs. This is effectively the setup function for the driver.
1150 * 0 on success or a negative error code failure.
1153 ti_sdma_attach(device_t dev)
1155 struct ti_sdma_softc *sc = device_get_softc(dev);
1156 unsigned int timeout;
1162 /* Setup the basics */
1165 /* No channels active at the moment */
1166 sc->sc_active_channels = 0x00000000;
1168 /* Mutex to protect the shared data structures */
1169 TI_SDMA_LOCK_INIT(sc);
1171 /* Get the memory resource for the register mapping */
1173 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
1174 if (sc->sc_mem_res == NULL)
1175 panic("%s: Cannot map registers", device_get_name(dev));
1177 /* Enable the interface and functional clocks */
1178 ti_prcm_clk_enable(SDMA_CLK);
1180 /* Read the sDMA revision register and sanity check it's known */
1181 sc->sc_hw_rev = ti_sdma_read_4(sc, DMA4_REVISION);
1182 device_printf(dev, "sDMA revision %08x\n", sc->sc_hw_rev);
1184 if (!ti_sdma_is_omap4_rev(sc) && !ti_sdma_is_omap3_rev(sc)) {
1185 device_printf(sc->sc_dev, "error - unknown sDMA H/W revision\n");
1189 /* Disable all interrupts */
1190 for (i = 0; i < NUM_DMA_IRQS; i++) {
1191 ti_sdma_write_4(sc, DMA4_IRQENABLE_L(i), 0x00000000);
1194 /* Soft-reset is only supported on pre-OMAP44xx devices */
1195 if (ti_sdma_is_omap3_rev(sc)) {
1198 ti_sdma_write_4(sc, DMA4_OCP_SYSCONFIG, 0x0002);
1200 /* Set the timeout to 100ms*/
1201 timeout = (hz < 10) ? 1 : ((100 * hz) / 1000);
1203 /* Wait for DMA reset to complete */
1204 while ((ti_sdma_read_4(sc, DMA4_SYSSTATUS) & 0x1) == 0x0) {
1206 /* Sleep for a tick */
1207 pause("DMARESET", 1);
1209 if (timeout-- == 0) {
1210 device_printf(sc->sc_dev, "sDMA reset operation timed out\n");
1217 * Install interrupt handlers for the for possible interrupts. Any channel
1218 * can trip one of the four IRQs
1221 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1222 RF_ACTIVE | RF_SHAREABLE);
1223 if (sc->sc_irq_res == NULL)
1224 panic("Unable to setup the dma irq handler.\n");
1226 err = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
1227 NULL, ti_sdma_intr, NULL, &ihl);
1229 panic("%s: Cannot register IRQ", device_get_name(dev));
1231 /* Store the DMA structure globally ... this driver should never be unloaded */
1237 static device_method_t ti_sdma_methods[] = {
1238 DEVMETHOD(device_probe, ti_sdma_probe),
1239 DEVMETHOD(device_attach, ti_sdma_attach),
1243 static driver_t ti_sdma_driver = {
1246 sizeof(struct ti_sdma_softc),
1248 static devclass_t ti_sdma_devclass;
1250 DRIVER_MODULE(ti_sdma, simplebus, ti_sdma_driver, ti_sdma_devclass, 0, 0);
1251 MODULE_DEPEND(ti_sdma, ti_prcm, 1, 1, 1);