2 *******************************************************************************
3 Copyright (C) 2015 Annapurna Labs Ltd.
5 This file may be licensed under the terms of the Annapurna Labs Commercial
8 Alternatively, this file can be distributed under the terms of the GNU General
9 Public License V2 as published by the Free Software Foundation and can be
10 found at http://www.gnu.org/licenses/gpl-2.0.html
12 Alternatively, redistribution and use in source and binary forms, with or
13 without modification, are permitted provided that the following conditions are
16 * Redistributions of source code must retain the above copyright notice,
17 this list of conditions and the following disclaimer.
19 * Redistributions in binary form must reproduce the above copyright
20 notice, this list of conditions and the following disclaimer in
21 the documentation and/or other materials provided with the
24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
25 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
28 ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
31 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *******************************************************************************/
39 * @file al_hal_udma_main.c
41 * @brief Universal DMA HAL driver for main functions (initialization, data path)
45 #include <al_hal_udma.h>
46 #include <al_hal_udma_config.h>
48 #define AL_UDMA_Q_RST_TOUT 10000 /* Queue reset timeout [uSecs] */
50 #define UDMA_STATE_IDLE 0x0
51 #define UDMA_STATE_NORMAL 0x1
52 #define UDMA_STATE_ABORT 0x2
53 #define UDMA_STATE_RESERVED 0x3
55 const char *const al_udma_states_name[] = {
63 #define AL_UDMA_INITIAL_RING_ID 1
66 #define AL_UDMA_Q_FLAGS_IGNORE_RING_ID AL_BIT(0)
67 #define AL_UDMA_Q_FLAGS_NO_COMP_UPDATE AL_BIT(1)
68 #define AL_UDMA_Q_FLAGS_EN_COMP_COAL AL_BIT(2)
71 static void al_udma_set_defaults(struct al_udma *udma)
74 uint8_t rev_id = udma->rev_id;
76 if (udma->type == UDMA_TX) {
77 struct unit_regs* tmp_unit_regs =
78 (struct unit_regs*)udma->udma_regs;
80 /* Setting the data fifo depth to 4K (256 strips of 16B)
81 * This allows the UDMA to have 16 outstanding writes */
82 if (rev_id >= AL_UDMA_REV_ID_2) {
83 al_reg_write32_masked(&tmp_unit_regs->m2s.m2s_rd.data_cfg,
84 UDMA_M2S_RD_DATA_CFG_DATA_FIFO_DEPTH_MASK,
85 256 << UDMA_M2S_RD_DATA_CFG_DATA_FIFO_DEPTH_SHIFT);
88 if (rev_id == AL_UDMA_REV_ID_0)
89 /* disable AXI timeout for M0*/
90 al_reg_write32(&tmp_unit_regs->gen.axi.cfg_1, 0);
92 /* set AXI timeout to 1M (~2.6 ms) */
93 al_reg_write32(&tmp_unit_regs->gen.axi.cfg_1, 1000000);
95 al_reg_write32(&tmp_unit_regs->m2s.m2s_comp.cfg_application_ack
96 , 0); /* Ack time out */
99 if (rev_id == AL_UDMA_REV_ID_0) {
100 tmp = al_reg_read32(&udma->udma_regs->m2s.axi_m2s.desc_wr_cfg_1);
101 tmp &= ~UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK;
102 tmp |= 4 << UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_SHIFT;
103 al_reg_write32(&udma->udma_regs->m2s.axi_m2s.desc_wr_cfg_1
108 if (udma->type == UDMA_RX) {
110 &udma->udma_regs->s2m.s2m_comp.cfg_application_ack, 0);
116 * misc queue configurations
118 * @param udma_q udma queue data structure
122 static int al_udma_q_config(struct al_udma_q *udma_q)
127 if (udma_q->udma->type == UDMA_TX) {
128 reg_addr = &udma_q->q_regs->m2s_q.rlimit.mask;
130 val = al_reg_read32(reg_addr);
132 val &= ~UDMA_M2S_Q_RATE_LIMIT_MASK_INTERNAL_PAUSE_DMB;
133 al_reg_write32(reg_addr, val);
139 * set the queue's completion configuration register
141 * @param udma_q udma queue data structure
145 static int al_udma_q_config_compl(struct al_udma_q *udma_q)
150 if (udma_q->udma->type == UDMA_TX)
151 reg_addr = &udma_q->q_regs->m2s_q.comp_cfg;
153 reg_addr = &udma_q->q_regs->s2m_q.comp_cfg;
155 val = al_reg_read32(reg_addr);
157 if (udma_q->flags & AL_UDMA_Q_FLAGS_NO_COMP_UPDATE)
158 val &= ~UDMA_M2S_Q_COMP_CFG_EN_COMP_RING_UPDATE;
160 val |= UDMA_M2S_Q_COMP_CFG_EN_COMP_RING_UPDATE;
162 if (udma_q->flags & AL_UDMA_Q_FLAGS_EN_COMP_COAL)
163 val &= ~UDMA_M2S_Q_COMP_CFG_DIS_COMP_COAL;
165 val |= UDMA_M2S_Q_COMP_CFG_DIS_COMP_COAL;
167 al_reg_write32(reg_addr, val);
169 /* set the completion queue size */
170 if (udma_q->udma->type == UDMA_RX) {
172 &udma_q->udma->udma_regs->s2m.s2m_comp.cfg_1c);
173 val &= ~UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK;
174 /* the register expects it to be in words */
175 val |= (udma_q->cdesc_size >> 2)
176 & UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK;
177 al_reg_write32(&udma_q->udma->udma_regs->s2m.s2m_comp.cfg_1c
184 * reset the queues pointers (Head, Tail, etc) and set the base addresses
186 * @param udma_q udma queue data structure
188 static int al_udma_q_set_pointers(struct al_udma_q *udma_q)
190 /* reset the descriptors ring pointers */
191 /* assert descriptor base address aligned. */
192 al_assert((AL_ADDR_LOW(udma_q->desc_phy_base) &
193 ~UDMA_M2S_Q_TDRBP_LOW_ADDR_MASK) == 0);
194 al_reg_write32(&udma_q->q_regs->rings.drbp_low,
195 AL_ADDR_LOW(udma_q->desc_phy_base));
196 al_reg_write32(&udma_q->q_regs->rings.drbp_high,
197 AL_ADDR_HIGH(udma_q->desc_phy_base));
199 al_reg_write32(&udma_q->q_regs->rings.drl, udma_q->size);
201 /* if completion ring update disabled */
202 if (udma_q->cdesc_base_ptr == NULL) {
203 udma_q->flags |= AL_UDMA_Q_FLAGS_NO_COMP_UPDATE;
205 /* reset the completion descriptors ring pointers */
206 /* assert completion base address aligned. */
207 al_assert((AL_ADDR_LOW(udma_q->cdesc_phy_base) &
208 ~UDMA_M2S_Q_TCRBP_LOW_ADDR_MASK) == 0);
209 al_reg_write32(&udma_q->q_regs->rings.crbp_low,
210 AL_ADDR_LOW(udma_q->cdesc_phy_base));
211 al_reg_write32(&udma_q->q_regs->rings.crbp_high,
212 AL_ADDR_HIGH(udma_q->cdesc_phy_base));
214 al_udma_q_config_compl(udma_q);
219 * enable/disable udma queue
221 * @param udma_q udma queue data structure
222 * @param enable none zero value enables the queue, zero means disable
226 static int al_udma_q_enable(struct al_udma_q *udma_q, int enable)
228 uint32_t reg = al_reg_read32(&udma_q->q_regs->rings.cfg);
231 reg |= (UDMA_M2S_Q_CFG_EN_PREF | UDMA_M2S_Q_CFG_EN_SCHEDULING);
232 udma_q->status = AL_QUEUE_ENABLED;
234 reg &= ~(UDMA_M2S_Q_CFG_EN_PREF | UDMA_M2S_Q_CFG_EN_SCHEDULING);
235 udma_q->status = AL_QUEUE_DISABLED;
237 al_reg_write32(&udma_q->q_regs->rings.cfg, reg);
242 /************************ API functions ***************************************/
244 /* Initializations functions */
246 * Initialize the udma engine
248 int al_udma_init(struct al_udma *udma, struct al_udma_params *udma_params)
254 if (udma_params->num_of_queues > DMA_MAX_Q) {
255 al_err("udma: invalid num_of_queues parameter\n");
259 udma->type = udma_params->type;
260 udma->num_of_queues = udma_params->num_of_queues;
261 udma->gen_regs = &udma_params->udma_regs_base->gen;
263 if (udma->type == UDMA_TX)
264 udma->udma_regs = (union udma_regs *)&udma_params->udma_regs_base->m2s;
266 udma->udma_regs = (union udma_regs *)&udma_params->udma_regs_base->s2m;
268 udma->rev_id = al_udma_get_revision(udma_params->udma_regs_base);
270 if (udma_params->name == NULL)
273 udma->name = udma_params->name;
275 udma->state = UDMA_DISABLE;
276 for (i = 0; i < DMA_MAX_Q; i++) {
277 udma->udma_q[i].status = AL_QUEUE_NOT_INITIALIZED;
279 /* initialize configuration registers to correct values */
280 al_udma_set_defaults(udma);
281 al_dbg("udma [%s] initialized. base %p\n", udma->name,
287 * Initialize the udma queue data structure
289 int al_udma_q_init(struct al_udma *udma, uint32_t qid,
290 struct al_udma_q_params *q_params)
292 struct al_udma_q *udma_q;
297 if (qid >= udma->num_of_queues) {
298 al_err("udma: invalid queue id (%d)\n", qid);
302 if (udma->udma_q[qid].status == AL_QUEUE_ENABLED) {
303 al_err("udma: queue (%d) already enabled!\n", qid);
307 if (q_params->size < AL_UDMA_MIN_Q_SIZE) {
308 al_err("udma: queue (%d) size too small\n", qid);
312 if (q_params->size > AL_UDMA_MAX_Q_SIZE) {
313 al_err("udma: queue (%d) size too large\n", qid);
317 if (q_params->size & (q_params->size - 1)) {
318 al_err("udma: queue (%d) size (%d) must be power of 2\n",
319 q_params->size, qid);
323 udma_q = &udma->udma_q[qid];
324 /* set the queue's regs base address */
325 if (udma->type == UDMA_TX)
326 udma_q->q_regs = (union udma_q_regs __iomem *)
327 &udma->udma_regs->m2s.m2s_q[qid];
329 udma_q->q_regs = (union udma_q_regs __iomem *)
330 &udma->udma_regs->s2m.s2m_q[qid];
332 udma_q->adapter_rev_id = q_params->adapter_rev_id;
333 udma_q->size = q_params->size;
334 udma_q->size_mask = q_params->size - 1;
335 udma_q->desc_base_ptr = q_params->desc_base;
336 udma_q->desc_phy_base = q_params->desc_phy_base;
337 udma_q->cdesc_base_ptr = q_params->cdesc_base;
338 udma_q->cdesc_phy_base = q_params->cdesc_phy_base;
339 udma_q->cdesc_size = q_params->cdesc_size;
341 udma_q->next_desc_idx = 0;
342 udma_q->next_cdesc_idx = 0;
343 udma_q->end_cdesc_ptr = (uint8_t *) udma_q->cdesc_base_ptr +
344 (udma_q->size - 1) * udma_q->cdesc_size;
345 udma_q->comp_head_idx = 0;
346 udma_q->comp_head_ptr = (union al_udma_cdesc *)udma_q->cdesc_base_ptr;
347 udma_q->desc_ring_id = AL_UDMA_INITIAL_RING_ID;
348 udma_q->comp_ring_id = AL_UDMA_INITIAL_RING_ID;
350 udma_q->desc_ctrl_bits = AL_UDMA_INITIAL_RING_ID <<
351 AL_M2S_DESC_RING_ID_SHIFT;
353 udma_q->pkt_crnt_descs = 0;
355 udma_q->status = AL_QUEUE_DISABLED;
359 /* start hardware configuration: */
360 al_udma_q_config(udma_q);
361 /* reset the queue pointers */
362 al_udma_q_set_pointers(udma_q);
365 al_udma_q_enable(udma_q, 1);
367 al_dbg("udma [%s %d]: %s q init. size 0x%x\n"
368 " desc ring info: phys base 0x%llx virt base %p\n"
369 " cdesc ring info: phys base 0x%llx virt base %p "
371 udma_q->udma->name, udma_q->qid,
372 udma->type == UDMA_TX ? "Tx" : "Rx",
374 (unsigned long long)q_params->desc_phy_base,
376 (unsigned long long)q_params->cdesc_phy_base,
377 q_params->cdesc_base,
378 q_params->cdesc_size);
386 int al_udma_q_reset(struct al_udma_q *udma_q)
388 unsigned int remaining_time = AL_UDMA_Q_RST_TOUT;
389 uint32_t *status_reg;
392 uint32_t *q_sw_ctrl_reg;
396 /* De-assert scheduling and prefetch */
397 al_udma_q_enable(udma_q, 0);
399 /* Wait for scheduling and prefetch to stop */
400 status_reg = &udma_q->q_regs->rings.status;
402 while (remaining_time) {
403 uint32_t status = al_reg_read32(status_reg);
405 if (!(status & (UDMA_M2S_Q_STATUS_PREFETCH |
406 UDMA_M2S_Q_STATUS_SCHEDULER)))
413 if (!remaining_time) {
414 al_err("udma [%s %d]: %s timeout waiting for prefetch and "
415 "scheduler disable\n", udma_q->udma->name, udma_q->qid,
420 /* Wait for the completion queue to reach to the same pointer as the
421 * prefetch stopped at ([TR]DCP == [TR]CRHP) */
422 dcp_reg = &udma_q->q_regs->rings.dcp;
423 crhp_reg = &udma_q->q_regs->rings.crhp;
425 while (remaining_time) {
426 uint32_t dcp = al_reg_read32(dcp_reg);
427 uint32_t crhp = al_reg_read32(crhp_reg);
436 if (!remaining_time) {
437 al_err("udma [%s %d]: %s timeout waiting for dcp==crhp\n",
438 udma_q->udma->name, udma_q->qid, __func__);
442 /* Assert the queue reset */
443 if (udma_q->udma->type == UDMA_TX)
444 q_sw_ctrl_reg = &udma_q->q_regs->m2s_q.q_sw_ctrl;
446 q_sw_ctrl_reg = &udma_q->q_regs->s2m_q.q_sw_ctrl;
448 al_reg_write32(q_sw_ctrl_reg, UDMA_M2S_Q_SW_CTRL_RST_Q);
454 * return (by reference) a pointer to a specific queue date structure.
456 int al_udma_q_handle_get(struct al_udma *udma, uint32_t qid,
457 struct al_udma_q **q_handle)
463 if (unlikely(qid >= udma->num_of_queues)) {
464 al_err("udma [%s]: invalid queue id (%d)\n", udma->name, qid);
467 *q_handle = &udma->udma_q[qid];
472 * Change the UDMA's state
474 int al_udma_state_set(struct al_udma *udma, enum al_udma_state state)
478 al_assert(udma != NULL);
479 if (state == udma->state)
480 al_dbg("udma [%s]: requested state identical to "
481 "current state (%d)\n", udma->name, state);
483 al_dbg("udma [%s]: change state from (%s) to (%s)\n",
484 udma->name, al_udma_states_name[udma->state],
485 al_udma_states_name[state]);
490 reg |= UDMA_M2S_CHANGE_STATE_DIS;
493 reg |= UDMA_M2S_CHANGE_STATE_NORMAL;
496 reg |= UDMA_M2S_CHANGE_STATE_ABORT;
499 al_err("udma: invalid state (%d)\n", state);
503 if (udma->type == UDMA_TX)
504 al_reg_write32(&udma->udma_regs->m2s.m2s.change_state, reg);
506 al_reg_write32(&udma->udma_regs->s2m.s2m.change_state, reg);
513 * return the current UDMA hardware state
515 enum al_udma_state al_udma_state_get(struct al_udma *udma)
523 if (udma->type == UDMA_TX)
524 state_reg = al_reg_read32(&udma->udma_regs->m2s.m2s.state);
526 state_reg = al_reg_read32(&udma->udma_regs->s2m.s2m.state);
528 comp_ctrl = AL_REG_FIELD_GET(state_reg,
529 UDMA_M2S_STATE_COMP_CTRL_MASK,
530 UDMA_M2S_STATE_COMP_CTRL_SHIFT);
531 stream_if = AL_REG_FIELD_GET(state_reg,
532 UDMA_M2S_STATE_STREAM_IF_MASK,
533 UDMA_M2S_STATE_STREAM_IF_SHIFT);
534 data_rd = AL_REG_FIELD_GET(state_reg,
535 UDMA_M2S_STATE_DATA_RD_CTRL_MASK,
536 UDMA_M2S_STATE_DATA_RD_CTRL_SHIFT);
537 desc_pref = AL_REG_FIELD_GET(state_reg,
538 UDMA_M2S_STATE_DESC_PREF_MASK,
539 UDMA_M2S_STATE_DESC_PREF_SHIFT);
541 al_assert(comp_ctrl != UDMA_STATE_RESERVED);
542 al_assert(stream_if != UDMA_STATE_RESERVED);
543 al_assert(data_rd != UDMA_STATE_RESERVED);
544 al_assert(desc_pref != UDMA_STATE_RESERVED);
546 /* if any of the states is abort then return abort */
547 if ((comp_ctrl == UDMA_STATE_ABORT) || (stream_if == UDMA_STATE_ABORT)
548 || (data_rd == UDMA_STATE_ABORT)
549 || (desc_pref == UDMA_STATE_ABORT))
552 /* if any of the states is normal then return normal */
553 if ((comp_ctrl == UDMA_STATE_NORMAL)
554 || (stream_if == UDMA_STATE_NORMAL)
555 || (data_rd == UDMA_STATE_NORMAL)
556 || (desc_pref == UDMA_STATE_NORMAL))
567 * get next completed packet from completion ring of the queue
569 uint32_t al_udma_cdesc_packet_get(
570 struct al_udma_q *udma_q,
571 volatile union al_udma_cdesc **cdesc)
574 volatile union al_udma_cdesc *curr;
577 /* this function requires the completion ring update */
578 al_assert(!(udma_q->flags & AL_UDMA_Q_FLAGS_NO_COMP_UPDATE));
580 /* comp_head points to the last comp desc that was processed */
581 curr = udma_q->comp_head_ptr;
582 comp_flags = swap32_from_le(curr->al_desc_comp_tx.ctrl_meta);
584 /* check if the completion descriptor is new */
585 if (unlikely(al_udma_new_cdesc(udma_q, comp_flags) == AL_FALSE))
587 /* if new desc found, increment the current packets descriptors */
588 count = udma_q->pkt_crnt_descs + 1;
589 while (!cdesc_is_last(comp_flags)) {
590 curr = al_cdesc_next_update(udma_q, curr);
591 comp_flags = swap32_from_le(curr->al_desc_comp_tx.ctrl_meta);
592 if (unlikely(al_udma_new_cdesc(udma_q, comp_flags)
594 /* the current packet here doesn't have all */
595 /* descriptors completed. log the current desc */
596 /* location and number of completed descriptors so */
597 /* far. then return */
598 udma_q->pkt_crnt_descs = count;
599 udma_q->comp_head_ptr = curr;
603 /* check against max descs per packet. */
604 al_assert(count <= udma_q->size);
606 /* return back the first descriptor of the packet */
607 *cdesc = al_udma_cdesc_idx_to_ptr(udma_q, udma_q->next_cdesc_idx);
608 udma_q->pkt_crnt_descs = 0;
609 udma_q->comp_head_ptr = al_cdesc_next_update(udma_q, curr);
611 al_dbg("udma [%s %d]: packet completed. first desc %p (ixd 0x%x)"
612 " descs %d\n", udma_q->udma->name, udma_q->qid, *cdesc,
613 udma_q->next_cdesc_idx, count);
618 /** @} end of UDMA group */