2 * SPDX-License-Identifier: BSD-2-Clause
5 * Ben Gray <ben.r.gray@gmail.com>.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 #include <sys/param.h>
32 #include <sys/systm.h>
34 #include <sys/kernel.h>
36 #include <sys/interrupt.h>
37 #include <sys/module.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
41 #include <sys/queue.h>
42 #include <sys/taskqueue.h>
43 #include <sys/timetc.h>
44 #include <machine/bus.h>
45 #include <machine/intr.h>
47 #include <dev/ofw/openfirm.h>
48 #include <dev/ofw/ofw_bus.h>
49 #include <dev/ofw/ofw_bus_subr.h>
51 #include <arm/ti/ti_cpuid.h>
52 #include <arm/ti/ti_sysc.h>
53 #include <arm/ti/ti_sdma.h>
54 #include <arm/ti/ti_sdmareg.h>
57 * Kernel functions for using the DMA controller
61 * A DMA transfer block consists of a number of frames (FN). Each frame
62 * consists of a number of elements, and each element can have a size of 8, 16,
65 * OMAP44xx and newer chips support linked list (aka scatter gather) transfers,
66 * where a linked list of source/destination pairs can be placed in memory
67 * for the H/W to process. Earlier chips only allowed you to chain multiple
68 * channels together. However currently this linked list feature is not
69 * supported by the driver.
74 * Data structure per DMA channel.
78 struct ti_sdma_channel {
80 * The configuration registers for the given channel, these are modified
81 * by the set functions and only written to the actual registers when a
82 * transaction is started.
88 /* Set when one of the configuration registers above change */
89 uint32_t need_reg_write;
91 /* Callback function used when an interrupt is tripped on the given channel */
92 void (*callback)(unsigned int ch, uint32_t ch_status, void *data);
94 /* Callback data passed in the callback ... duh */
100 * DMA driver context, allocated and stored globally, this driver is not
101 * intetned to ever be unloaded (see ti_sdma_sc).
104 struct ti_sdma_softc {
106 struct resource* sc_irq_res;
107 struct resource* sc_mem_res;
110 * I guess in theory we should have a mutex per DMA channel for register
111 * modifications. But since we know we are never going to be run on a SMP
112 * system, we can use just the single lock for all channels.
116 /* Stores the H/W revision read from the registers */
120 * Bits in the sc_active_channels data field indicate if the channel has
123 uint32_t sc_active_channels;
125 struct ti_sdma_channel sc_channel[NUM_DMA_CHANNELS];
129 static struct ti_sdma_softc *ti_sdma_sc = NULL;
132 * Macros for driver mutex locking
134 #define TI_SDMA_LOCK(_sc) mtx_lock_spin(&(_sc)->sc_mtx)
135 #define TI_SDMA_UNLOCK(_sc) mtx_unlock_spin(&(_sc)->sc_mtx)
136 #define TI_SDMA_LOCK_INIT(_sc) \
137 mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \
139 #define TI_SDMA_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx);
140 #define TI_SDMA_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED);
141 #define TI_SDMA_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
144 * Function prototypes
147 static void ti_sdma_intr(void *);
150 * ti_sdma_read_4 - reads a 32-bit value from one of the DMA registers
151 * @sc: DMA device context
152 * @off: The offset of a register from the DMA register address range
156 * 32-bit value read from the register.
158 static inline uint32_t
159 ti_sdma_read_4(struct ti_sdma_softc *sc, bus_size_t off)
161 return bus_read_4(sc->sc_mem_res, off);
165 * ti_sdma_write_4 - writes a 32-bit value to one of the DMA registers
166 * @sc: DMA device context
167 * @off: The offset of a register from the DMA register address range
171 * 32-bit value read from the register.
174 ti_sdma_write_4(struct ti_sdma_softc *sc, bus_size_t off, uint32_t val)
176 bus_write_4(sc->sc_mem_res, off, val);
180 * ti_sdma_is_omap3_rev - returns true if H/W is from OMAP3 series
181 * @sc: DMA device context
185 ti_sdma_is_omap3_rev(struct ti_sdma_softc *sc)
187 return (sc->sc_hw_rev == DMA4_OMAP3_REV);
191 * ti_sdma_is_omap4_rev - returns true if H/W is from OMAP4 series
192 * @sc: DMA device context
196 ti_sdma_is_omap4_rev(struct ti_sdma_softc *sc)
198 return (sc->sc_hw_rev == DMA4_OMAP4_REV);
202 * ti_sdma_intr - interrupt handler for all 4 DMA IRQs
205 * Called when any of the four DMA IRQs are triggered.
208 * DMA registers protected by internal mutex
214 ti_sdma_intr(void *arg)
216 struct ti_sdma_softc *sc = ti_sdma_sc;
220 struct ti_sdma_channel* channel;
224 for (j = 0; j < NUM_DMA_IRQS; j++) {
225 /* Get the flag interrupts (enabled) */
226 intr = ti_sdma_read_4(sc, DMA4_IRQSTATUS_L(j));
227 intr &= ti_sdma_read_4(sc, DMA4_IRQENABLE_L(j));
228 if (intr == 0x00000000)
231 /* Loop through checking the status bits */
232 for (ch = 0; ch < NUM_DMA_CHANNELS; ch++) {
233 if (intr & (1 << ch)) {
234 channel = &sc->sc_channel[ch];
236 /* Read the CSR regsiter and verify we don't have a spurious IRQ */
237 csr = ti_sdma_read_4(sc, DMA4_CSR(ch));
239 device_printf(sc->sc_dev, "Spurious DMA IRQ for channel "
244 /* Sanity check this channel is active */
245 if ((sc->sc_active_channels & (1 << ch)) == 0) {
246 device_printf(sc->sc_dev, "IRQ %d for a non-activated "
247 "channel %d\n", j, ch);
251 /* Check the status error codes */
252 if (csr & DMA4_CSR_DROP)
253 device_printf(sc->sc_dev, "Synchronization event drop "
254 "occurred during the transfer on channel %u\n",
256 if (csr & DMA4_CSR_SECURE_ERR)
257 device_printf(sc->sc_dev, "Secure transaction error event "
258 "on channel %u\n", ch);
259 if (csr & DMA4_CSR_MISALIGNED_ADRS_ERR)
260 device_printf(sc->sc_dev, "Misaligned address error event "
261 "on channel %u\n", ch);
262 if (csr & DMA4_CSR_TRANS_ERR) {
263 device_printf(sc->sc_dev, "Transaction error event on "
266 * Apparently according to linux code, there is an errata
267 * that says the channel is not disabled upon this error.
268 * They explicitly disable the channel here .. since I
269 * haven't seen the errata, I'm going to ignore for now.
273 /* Clear the status flags for the IRQ */
274 ti_sdma_write_4(sc, DMA4_CSR(ch), DMA4_CSR_CLEAR_MASK);
275 ti_sdma_write_4(sc, DMA4_IRQSTATUS_L(j), (1 << ch));
277 /* Call the callback for the given channel */
278 if (channel->callback)
279 channel->callback(ch, csr, channel->callback_data);
290 * ti_sdma_activate_channel - activates a DMA channel
291 * @ch: upon return contains the channel allocated
292 * @callback: a callback function to associate with the channel
293 * @data: optional data supplied when the callback is called
295 * Simply activates a channel be enabling and writing default values to the
296 * channel's register set. It doesn't start a transaction, just populates the
297 * internal data structures and sets defaults.
299 * Note this function doesn't enable interrupts, for that you need to call
300 * ti_sdma_enable_channel_irq(). If not using IRQ to detect the end of the
301 * transfer, you can use ti_sdma_status_poll() to detect a change in the
304 * A channel must be activated before any of the other DMA functions can be
308 * DMA registers protected by internal mutex
311 * 0 on success, otherwise an error code
314 ti_sdma_activate_channel(unsigned int *ch,
315 void (*callback)(unsigned int ch, uint32_t status, void *data),
318 struct ti_sdma_softc *sc = ti_sdma_sc;
319 struct ti_sdma_channel *channel = NULL;
332 /* Check to see if all channels are in use */
333 if (sc->sc_active_channels == 0xffffffff) {
338 /* Find the first non-active channel */
339 for (i = 0; i < NUM_DMA_CHANNELS; i++) {
340 if (!(sc->sc_active_channels & (0x1 << i))) {
341 sc->sc_active_channels |= (0x1 << i);
347 /* Get the channel struct and populate the fields */
348 channel = &sc->sc_channel[*ch];
350 channel->callback = callback;
351 channel->callback_data = data;
353 channel->need_reg_write = 1;
355 /* Set the default configuration for the DMA channel */
356 channel->reg_csdp = DMA4_CSDP_DATA_TYPE(0x2)
357 | DMA4_CSDP_SRC_BURST_MODE(0)
358 | DMA4_CSDP_DST_BURST_MODE(0)
359 | DMA4_CSDP_SRC_ENDIANISM(0)
360 | DMA4_CSDP_DST_ENDIANISM(0)
361 | DMA4_CSDP_WRITE_MODE(0)
362 | DMA4_CSDP_SRC_PACKED(0)
363 | DMA4_CSDP_DST_PACKED(0);
365 channel->reg_ccr = DMA4_CCR_DST_ADDRESS_MODE(1)
366 | DMA4_CCR_SRC_ADDRESS_MODE(1)
367 | DMA4_CCR_READ_PRIORITY(0)
368 | DMA4_CCR_WRITE_PRIORITY(0)
369 | DMA4_CCR_SYNC_TRIGGER(0)
370 | DMA4_CCR_FRAME_SYNC(0)
371 | DMA4_CCR_BLOCK_SYNC(0);
373 channel->reg_cicr = DMA4_CICR_TRANS_ERR_IE
374 | DMA4_CICR_SECURE_ERR_IE
375 | DMA4_CICR_SUPERVISOR_ERR_IE
376 | DMA4_CICR_MISALIGNED_ADRS_ERR_IE;
378 /* Clear all the channel registers, this should abort any transaction */
379 for (addr = DMA4_CCR(*ch); addr <= DMA4_COLOR(*ch); addr += 4)
380 ti_sdma_write_4(sc, addr, 0x00000000);
388 * ti_sdma_deactivate_channel - deactivates a channel
389 * @ch: the channel to deactivate
394 * DMA registers protected by internal mutex
397 * EH_HANDLED or EH_NOT_HANDLED
400 ti_sdma_deactivate_channel(unsigned int ch)
402 struct ti_sdma_softc *sc = ti_sdma_sc;
412 /* First check if the channel is currently active */
413 if ((sc->sc_active_channels & (1 << ch)) == 0) {
418 /* Mark the channel as inactive */
419 sc->sc_active_channels &= ~(1 << ch);
421 /* Disable all DMA interrupts for the channel. */
422 ti_sdma_write_4(sc, DMA4_CICR(ch), 0);
424 /* Make sure the DMA transfer is stopped. */
425 ti_sdma_write_4(sc, DMA4_CCR(ch), 0);
427 /* Clear the CSR register and IRQ status register */
428 ti_sdma_write_4(sc, DMA4_CSR(ch), DMA4_CSR_CLEAR_MASK);
429 for (j = 0; j < NUM_DMA_IRQS; j++) {
430 ti_sdma_write_4(sc, DMA4_IRQSTATUS_L(j), (1 << ch));
433 /* Clear all the channel registers, this should abort any transaction */
434 for (addr = DMA4_CCR(ch); addr <= DMA4_COLOR(ch); addr += 4)
435 ti_sdma_write_4(sc, addr, 0x00000000);
443 * ti_sdma_disable_channel_irq - disables IRQ's on the given channel
444 * @ch: the channel to disable IRQ's on
446 * Disable interrupt generation for the given channel.
449 * DMA registers protected by internal mutex
452 * EH_HANDLED or EH_NOT_HANDLED
455 ti_sdma_disable_channel_irq(unsigned int ch)
457 struct ti_sdma_softc *sc = ti_sdma_sc;
467 if ((sc->sc_active_channels & (1 << ch)) == 0) {
472 /* Disable all the individual error conditions */
473 sc->sc_channel[ch].reg_cicr = 0x0000;
474 ti_sdma_write_4(sc, DMA4_CICR(ch), 0x0000);
476 /* Disable the channel interrupt enable */
477 for (j = 0; j < NUM_DMA_IRQS; j++) {
478 irq_enable = ti_sdma_read_4(sc, DMA4_IRQENABLE_L(j));
479 irq_enable &= ~(1 << ch);
481 ti_sdma_write_4(sc, DMA4_IRQENABLE_L(j), irq_enable);
484 /* Indicate the registers need to be rewritten on the next transaction */
485 sc->sc_channel[ch].need_reg_write = 1;
493 * ti_sdma_disable_channel_irq - enables IRQ's on the given channel
494 * @ch: the channel to enable IRQ's on
495 * @flags: bitmask of interrupt types to enable
497 * Flags can be a bitmask of the following options:
499 * DMA_IRQ_FLAG_HALF_FRAME_COMPL
500 * DMA_IRQ_FLAG_FRAME_COMPL
501 * DMA_IRQ_FLAG_START_LAST_FRAME
502 * DMA_IRQ_FLAG_BLOCK_COMPL
503 * DMA_IRQ_FLAG_ENDOF_PKT
508 * DMA registers protected by internal mutex
511 * EH_HANDLED or EH_NOT_HANDLED
514 ti_sdma_enable_channel_irq(unsigned int ch, uint32_t flags)
516 struct ti_sdma_softc *sc = ti_sdma_sc;
525 if ((sc->sc_active_channels & (1 << ch)) == 0) {
530 /* Always enable the error interrupts if we have interrupts enabled */
531 flags |= DMA4_CICR_TRANS_ERR_IE | DMA4_CICR_SECURE_ERR_IE |
532 DMA4_CICR_SUPERVISOR_ERR_IE | DMA4_CICR_MISALIGNED_ADRS_ERR_IE;
534 sc->sc_channel[ch].reg_cicr = flags;
536 /* Write the values to the register */
537 ti_sdma_write_4(sc, DMA4_CICR(ch), flags);
539 /* Enable the channel interrupt enable */
540 irq_enable = ti_sdma_read_4(sc, DMA4_IRQENABLE_L(0));
541 irq_enable |= (1 << ch);
543 ti_sdma_write_4(sc, DMA4_IRQENABLE_L(0), irq_enable);
545 /* Indicate the registers need to be rewritten on the next transaction */
546 sc->sc_channel[ch].need_reg_write = 1;
554 * ti_sdma_get_channel_status - returns the status of a given channel
555 * @ch: the channel number to get the status of
556 * @status: upon return will contain the status bitmask, see below for possible
566 * DMA_STATUS_TRANS_ERR
567 * DMA_STATUS_SECURE_ERR
568 * DMA_STATUS_SUPERVISOR_ERR
569 * DMA_STATUS_MISALIGNED_ADRS_ERR
570 * DMA_STATUS_DRAIN_END
574 * DMA registers protected by internal mutex
577 * EH_HANDLED or EH_NOT_HANDLED
580 ti_sdma_get_channel_status(unsigned int ch, uint32_t *status)
582 struct ti_sdma_softc *sc = ti_sdma_sc;
591 if ((sc->sc_active_channels & (1 << ch)) == 0) {
598 csr = ti_sdma_read_4(sc, DMA4_CSR(ch));
607 * ti_sdma_start_xfer - starts a DMA transfer
608 * @ch: the channel number to set the endianness of
609 * @src_paddr: the source phsyical address
610 * @dst_paddr: the destination phsyical address
611 * @frmcnt: the number of frames per block
612 * @elmcnt: the number of elements in a frame, an element is either an 8, 16
613 * or 32-bit value as defined by ti_sdma_set_xfer_burst()
617 * DMA registers protected by internal mutex
620 * EH_HANDLED or EH_NOT_HANDLED
623 ti_sdma_start_xfer(unsigned int ch, unsigned int src_paddr,
624 unsigned long dst_paddr,
625 unsigned int frmcnt, unsigned int elmcnt)
627 struct ti_sdma_softc *sc = ti_sdma_sc;
628 struct ti_sdma_channel *channel;
637 if ((sc->sc_active_channels & (1 << ch)) == 0) {
642 channel = &sc->sc_channel[ch];
644 /* a) Write the CSDP register */
645 ti_sdma_write_4(sc, DMA4_CSDP(ch),
646 channel->reg_csdp | DMA4_CSDP_WRITE_MODE(1));
648 /* b) Set the number of element per frame CEN[23:0] */
649 ti_sdma_write_4(sc, DMA4_CEN(ch), elmcnt);
651 /* c) Set the number of frame per block CFN[15:0] */
652 ti_sdma_write_4(sc, DMA4_CFN(ch), frmcnt);
654 /* d) Set the Source/dest start address index CSSA[31:0]/CDSA[31:0] */
655 ti_sdma_write_4(sc, DMA4_CSSA(ch), src_paddr);
656 ti_sdma_write_4(sc, DMA4_CDSA(ch), dst_paddr);
658 /* e) Write the CCR register */
659 ti_sdma_write_4(sc, DMA4_CCR(ch), channel->reg_ccr);
661 /* f) - Set the source element index increment CSEI[15:0] */
662 ti_sdma_write_4(sc, DMA4_CSE(ch), 0x0001);
664 /* - Set the source frame index increment CSFI[15:0] */
665 ti_sdma_write_4(sc, DMA4_CSF(ch), 0x0001);
667 /* - Set the destination element index increment CDEI[15:0]*/
668 ti_sdma_write_4(sc, DMA4_CDE(ch), 0x0001);
670 /* - Set the destination frame index increment CDFI[31:0] */
671 ti_sdma_write_4(sc, DMA4_CDF(ch), 0x0001);
673 /* Clear the status register */
674 ti_sdma_write_4(sc, DMA4_CSR(ch), 0x1FFE);
676 /* Write the start-bit and away we go */
677 ccr = ti_sdma_read_4(sc, DMA4_CCR(ch));
679 ti_sdma_write_4(sc, DMA4_CCR(ch), ccr);
681 /* Clear the reg write flag */
682 channel->need_reg_write = 0;
690 * ti_sdma_start_xfer_packet - starts a packet DMA transfer
691 * @ch: the channel number to use for the transfer
692 * @src_paddr: the source physical address
693 * @dst_paddr: the destination physical address
694 * @frmcnt: the number of frames to transfer
695 * @elmcnt: the number of elements in a frame, an element is either an 8, 16
696 * or 32-bit value as defined by ti_sdma_set_xfer_burst()
697 * @pktsize: the number of elements in each transfer packet
699 * The @frmcnt and @elmcnt define the overall number of bytes to transfer,
700 * typically @frmcnt is 1 and @elmcnt contains the total number of elements.
701 * @pktsize is the size of each individual packet, there might be multiple
702 * packets per transfer. i.e. for the following with element size of 32-bits
704 * frmcnt = 1, elmcnt = 512, pktsize = 128
706 * Total transfer bytes = 1 * 512 = 512 elements or 2048 bytes
707 * Packets transferred = 128 / 512 = 4
711 * DMA registers protected by internal mutex
714 * EH_HANDLED or EH_NOT_HANDLED
717 ti_sdma_start_xfer_packet(unsigned int ch, unsigned int src_paddr,
718 unsigned long dst_paddr, unsigned int frmcnt,
719 unsigned int elmcnt, unsigned int pktsize)
721 struct ti_sdma_softc *sc = ti_sdma_sc;
722 struct ti_sdma_channel *channel;
731 if ((sc->sc_active_channels & (1 << ch)) == 0) {
736 channel = &sc->sc_channel[ch];
738 /* a) Write the CSDP register */
739 if (channel->need_reg_write)
740 ti_sdma_write_4(sc, DMA4_CSDP(ch),
741 channel->reg_csdp | DMA4_CSDP_WRITE_MODE(1));
743 /* b) Set the number of elements to transfer CEN[23:0] */
744 ti_sdma_write_4(sc, DMA4_CEN(ch), elmcnt);
746 /* c) Set the number of frames to transfer CFN[15:0] */
747 ti_sdma_write_4(sc, DMA4_CFN(ch), frmcnt);
749 /* d) Set the Source/dest start address index CSSA[31:0]/CDSA[31:0] */
750 ti_sdma_write_4(sc, DMA4_CSSA(ch), src_paddr);
751 ti_sdma_write_4(sc, DMA4_CDSA(ch), dst_paddr);
753 /* e) Write the CCR register */
754 ti_sdma_write_4(sc, DMA4_CCR(ch),
755 channel->reg_ccr | DMA4_CCR_PACKET_TRANS);
757 /* f) - Set the source element index increment CSEI[15:0] */
758 ti_sdma_write_4(sc, DMA4_CSE(ch), 0x0001);
760 /* - Set the packet size, this is dependent on the sync source */
761 if (channel->reg_ccr & DMA4_CCR_SEL_SRC_DST_SYNC(1))
762 ti_sdma_write_4(sc, DMA4_CSF(ch), pktsize);
764 ti_sdma_write_4(sc, DMA4_CDF(ch), pktsize);
766 /* - Set the destination frame index increment CDFI[31:0] */
767 ti_sdma_write_4(sc, DMA4_CDE(ch), 0x0001);
769 /* Clear the status register */
770 ti_sdma_write_4(sc, DMA4_CSR(ch), 0x1FFE);
772 /* Write the start-bit and away we go */
773 ccr = ti_sdma_read_4(sc, DMA4_CCR(ch));
775 ti_sdma_write_4(sc, DMA4_CCR(ch), ccr);
777 /* Clear the reg write flag */
778 channel->need_reg_write = 0;
786 * ti_sdma_stop_xfer - stops any currently active transfers
787 * @ch: the channel number to set the endianness of
789 * This function call is effectively a NOP if no transaction is in progress.
792 * DMA registers protected by internal mutex
795 * EH_HANDLED or EH_NOT_HANDLED
798 ti_sdma_stop_xfer(unsigned int ch)
800 struct ti_sdma_softc *sc = ti_sdma_sc;
809 if ((sc->sc_active_channels & (1 << ch)) == 0) {
814 /* Disable all DMA interrupts for the channel. */
815 ti_sdma_write_4(sc, DMA4_CICR(ch), 0);
817 /* Make sure the DMA transfer is stopped. */
818 ti_sdma_write_4(sc, DMA4_CCR(ch), 0);
820 /* Clear the CSR register and IRQ status register */
821 ti_sdma_write_4(sc, DMA4_CSR(ch), DMA4_CSR_CLEAR_MASK);
822 for (j = 0; j < NUM_DMA_IRQS; j++) {
823 ti_sdma_write_4(sc, DMA4_IRQSTATUS_L(j), (1 << ch));
826 /* Configuration registers need to be re-written on the next xfer */
827 sc->sc_channel[ch].need_reg_write = 1;
835 * ti_sdma_set_xfer_endianess - sets the endianness of subsequent transfers
836 * @ch: the channel number to set the endianness of
837 * @src: the source endianness (either DMA_ENDIAN_LITTLE or DMA_ENDIAN_BIG)
838 * @dst: the destination endianness (either DMA_ENDIAN_LITTLE or DMA_ENDIAN_BIG)
842 * DMA registers protected by internal mutex
845 * EH_HANDLED or EH_NOT_HANDLED
848 ti_sdma_set_xfer_endianess(unsigned int ch, unsigned int src, unsigned int dst)
850 struct ti_sdma_softc *sc = ti_sdma_sc;
858 if ((sc->sc_active_channels & (1 << ch)) == 0) {
863 sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_SRC_ENDIANISM(1);
864 sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_SRC_ENDIANISM(src);
866 sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_DST_ENDIANISM(1);
867 sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_DST_ENDIANISM(dst);
869 sc->sc_channel[ch].need_reg_write = 1;
877 * ti_sdma_set_xfer_burst - sets the source and destination element size
878 * @ch: the channel number to set the burst settings of
879 * @src: the source endianness (either DMA_BURST_NONE, DMA_BURST_16, DMA_BURST_32
881 * @dst: the destination endianness (either DMA_BURST_NONE, DMA_BURST_16,
882 * DMA_BURST_32 or DMA_BURST_64)
884 * This function sets the size of the elements for all subsequent transfers.
887 * DMA registers protected by internal mutex
890 * EH_HANDLED or EH_NOT_HANDLED
893 ti_sdma_set_xfer_burst(unsigned int ch, unsigned int src, unsigned int dst)
895 struct ti_sdma_softc *sc = ti_sdma_sc;
903 if ((sc->sc_active_channels & (1 << ch)) == 0) {
908 sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_SRC_BURST_MODE(0x3);
909 sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_SRC_BURST_MODE(src);
911 sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_DST_BURST_MODE(0x3);
912 sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_DST_BURST_MODE(dst);
914 sc->sc_channel[ch].need_reg_write = 1;
922 * ti_sdma_set_xfer_data_type - driver attach function
923 * @ch: the channel number to set the endianness of
924 * @type: the xfer data type (either DMA_DATA_8BITS_SCALAR, DMA_DATA_16BITS_SCALAR
925 * or DMA_DATA_32BITS_SCALAR)
929 * DMA registers protected by internal mutex
932 * EH_HANDLED or EH_NOT_HANDLED
935 ti_sdma_set_xfer_data_type(unsigned int ch, unsigned int type)
937 struct ti_sdma_softc *sc = ti_sdma_sc;
945 if ((sc->sc_active_channels & (1 << ch)) == 0) {
950 sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_DATA_TYPE(0x3);
951 sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_DATA_TYPE(type);
953 sc->sc_channel[ch].need_reg_write = 1;
961 * ti_sdma_set_callback - driver attach function
962 * @dev: dma device handle
967 * DMA registers protected by internal mutex
970 * EH_HANDLED or EH_NOT_HANDLED
973 ti_sdma_set_callback(unsigned int ch,
974 void (*callback)(unsigned int ch, uint32_t status, void *data),
977 struct ti_sdma_softc *sc = ti_sdma_sc;
985 if ((sc->sc_active_channels & (1 << ch)) == 0) {
990 sc->sc_channel[ch].callback = callback;
991 sc->sc_channel[ch].callback_data = data;
993 sc->sc_channel[ch].need_reg_write = 1;
1001 * ti_sdma_sync_params - sets channel sync settings
1002 * @ch: the channel number to set the sync on
1003 * @trigger: the number of the sync trigger, this depends on what other H/W
1004 * module is triggering/receiving the DMA transactions
1005 * @mode: flags describing the sync mode to use, it may have one or more of
1006 * the following bits set; TI_SDMA_SYNC_FRAME,
1007 * TI_SDMA_SYNC_BLOCK, TI_SDMA_SYNC_TRIG_ON_SRC.
1012 * DMA registers protected by internal mutex
1015 * EH_HANDLED or EH_NOT_HANDLED
1018 ti_sdma_sync_params(unsigned int ch, unsigned int trigger, unsigned int mode)
1020 struct ti_sdma_softc *sc = ti_sdma_sc;
1029 if ((sc->sc_active_channels & (1 << ch)) == 0) {
1034 ccr = sc->sc_channel[ch].reg_ccr;
1036 ccr &= ~DMA4_CCR_SYNC_TRIGGER(0x7F);
1037 ccr |= DMA4_CCR_SYNC_TRIGGER(trigger + 1);
1039 if (mode & TI_SDMA_SYNC_FRAME)
1040 ccr |= DMA4_CCR_FRAME_SYNC(1);
1042 ccr &= ~DMA4_CCR_FRAME_SYNC(1);
1044 if (mode & TI_SDMA_SYNC_BLOCK)
1045 ccr |= DMA4_CCR_BLOCK_SYNC(1);
1047 ccr &= ~DMA4_CCR_BLOCK_SYNC(1);
1049 if (mode & TI_SDMA_SYNC_TRIG_ON_SRC)
1050 ccr |= DMA4_CCR_SEL_SRC_DST_SYNC(1);
1052 ccr &= ~DMA4_CCR_SEL_SRC_DST_SYNC(1);
1054 sc->sc_channel[ch].reg_ccr = ccr;
1056 sc->sc_channel[ch].need_reg_write = 1;
1064 * ti_sdma_set_addr_mode - driver attach function
1065 * @ch: the channel number to set the endianness of
1066 * @rd_mode: the xfer source addressing mode (either DMA_ADDR_CONSTANT,
1067 * DMA_ADDR_POST_INCREMENT, DMA_ADDR_SINGLE_INDEX or
1068 * DMA_ADDR_DOUBLE_INDEX)
1069 * @wr_mode: the xfer destination addressing mode (either DMA_ADDR_CONSTANT,
1070 * DMA_ADDR_POST_INCREMENT, DMA_ADDR_SINGLE_INDEX or
1071 * DMA_ADDR_DOUBLE_INDEX)
1075 * DMA registers protected by internal mutex
1078 * EH_HANDLED or EH_NOT_HANDLED
1081 ti_sdma_set_addr_mode(unsigned int ch, unsigned int src_mode,
1082 unsigned int dst_mode)
1084 struct ti_sdma_softc *sc = ti_sdma_sc;
1093 if ((sc->sc_active_channels & (1 << ch)) == 0) {
1098 ccr = sc->sc_channel[ch].reg_ccr;
1100 ccr &= ~DMA4_CCR_SRC_ADDRESS_MODE(0x3);
1101 ccr |= DMA4_CCR_SRC_ADDRESS_MODE(src_mode);
1103 ccr &= ~DMA4_CCR_DST_ADDRESS_MODE(0x3);
1104 ccr |= DMA4_CCR_DST_ADDRESS_MODE(dst_mode);
1106 sc->sc_channel[ch].reg_ccr = ccr;
1108 sc->sc_channel[ch].need_reg_write = 1;
1116 * ti_sdma_probe - driver probe function
1117 * @dev: dma device handle
1125 ti_sdma_probe(device_t dev)
1128 if (!ofw_bus_status_okay(dev))
1131 if (!ofw_bus_is_compatible(dev, "ti,omap4430-sdma"))
1134 device_set_desc(dev, "TI sDMA Controller");
1139 * ti_sdma_attach - driver attach function
1140 * @dev: dma device handle
1142 * Initialises memory mapping/pointers to the DMA register set and requests
1143 * IRQs. This is effectively the setup function for the driver.
1146 * 0 on success or a negative error code failure.
1149 ti_sdma_attach(device_t dev)
1151 struct ti_sdma_softc *sc = device_get_softc(dev);
1152 unsigned int timeout;
1158 /* Setup the basics */
1161 /* No channels active at the moment */
1162 sc->sc_active_channels = 0x00000000;
1164 /* Mutex to protect the shared data structures */
1165 TI_SDMA_LOCK_INIT(sc);
1167 /* Get the memory resource for the register mapping */
1169 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
1170 if (sc->sc_mem_res == NULL)
1171 panic("%s: Cannot map registers", device_get_name(dev));
1173 /* Enable the interface and functional clocks */
1174 ti_sysc_clock_enable(device_get_parent(dev));
1176 /* Read the sDMA revision register and sanity check it's known */
1177 sc->sc_hw_rev = ti_sdma_read_4(sc,
1178 ti_sysc_get_rev_address_offset_host(device_get_parent(dev)));
1179 device_printf(dev, "sDMA revision %08x\n", sc->sc_hw_rev);
1181 if (!ti_sdma_is_omap4_rev(sc) && !ti_sdma_is_omap3_rev(sc)) {
1182 device_printf(sc->sc_dev, "error - unknown sDMA H/W revision\n");
1186 /* Disable all interrupts */
1187 for (i = 0; i < NUM_DMA_IRQS; i++) {
1188 ti_sdma_write_4(sc, DMA4_IRQENABLE_L(i), 0x00000000);
1191 /* Soft-reset is only supported on pre-OMAP44xx devices */
1192 if (ti_sdma_is_omap3_rev(sc)) {
1194 ti_sdma_write_4(sc, DMA4_OCP_SYSCONFIG, 0x0002);
1196 /* Set the timeout to 100ms*/
1197 timeout = (hz < 10) ? 1 : ((100 * hz) / 1000);
1199 /* Wait for DMA reset to complete */
1200 while ((ti_sdma_read_4(sc, DMA4_SYSSTATUS) & 0x1) == 0x0) {
1201 /* Sleep for a tick */
1202 pause("DMARESET", 1);
1204 if (timeout-- == 0) {
1205 device_printf(sc->sc_dev, "sDMA reset operation timed out\n");
1212 * Install interrupt handlers for the for possible interrupts. Any channel
1213 * can trip one of the four IRQs
1216 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1217 RF_ACTIVE | RF_SHAREABLE);
1218 if (sc->sc_irq_res == NULL)
1219 panic("Unable to setup the dma irq handler.\n");
1221 err = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
1222 NULL, ti_sdma_intr, NULL, &ihl);
1224 panic("%s: Cannot register IRQ", device_get_name(dev));
1226 /* Store the DMA structure globally ... this driver should never be unloaded */
1232 static device_method_t ti_sdma_methods[] = {
1233 DEVMETHOD(device_probe, ti_sdma_probe),
1234 DEVMETHOD(device_attach, ti_sdma_attach),
1238 static driver_t ti_sdma_driver = {
1241 sizeof(struct ti_sdma_softc),
1244 DRIVER_MODULE(ti_sdma, simplebus, ti_sdma_driver, 0, 0);
1245 MODULE_DEPEND(ti_sdma, ti_sysc, 1, 1, 1);