2 *******************************************************************************
3 Copyright (C) 2015 Annapurna Labs Ltd.
5 This file may be licensed under the terms of the Annapurna Labs Commercial
8 Alternatively, this file can be distributed under the terms of the GNU General
9 Public License V2 as published by the Free Software Foundation and can be
10 found at http://www.gnu.org/licenses/gpl-2.0.html
12 Alternatively, redistribution and use in source and binary forms, with or
13 without modification, are permitted provided that the following conditions are
16 * Redistributions of source code must retain the above copyright notice,
17 this list of conditions and the following disclaimer.
19 * Redistributions in binary form must reproduce the above copyright
20 notice, this list of conditions and the following disclaimer in
21 the documentation and/or other materials provided with the
24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
25 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
28 ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
31 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *******************************************************************************/
39 * @file al_hal_udma_main.c
41 * @brief Universal DMA HAL driver for main functions (initialization, data path)
45 #include <al_hal_udma.h>
46 #include <al_hal_udma_config.h>
48 #define AL_UDMA_Q_RST_TOUT 10000 /* Queue reset timeout [uSecs] */
50 #define UDMA_STATE_IDLE 0x0
51 #define UDMA_STATE_NORMAL 0x1
52 #define UDMA_STATE_ABORT 0x2
53 #define UDMA_STATE_RESERVED 0x3
55 const char *const al_udma_states_name[] = {
63 #define AL_UDMA_INITIAL_RING_ID 1
66 #define AL_UDMA_Q_FLAGS_IGNORE_RING_ID AL_BIT(0)
67 #define AL_UDMA_Q_FLAGS_NO_COMP_UPDATE AL_BIT(1)
68 #define AL_UDMA_Q_FLAGS_EN_COMP_COAL AL_BIT(2)
71 static void al_udma_set_defaults(struct al_udma *udma)
73 uint8_t rev_id = udma->rev_id;
75 if (udma->type == UDMA_TX) {
76 struct unit_regs* tmp_unit_regs =
77 (struct unit_regs*)udma->udma_regs;
79 /* Setting the data fifo depth to 4K (256 strips of 16B)
80 * This allows the UDMA to have 16 outstanding writes */
81 if (rev_id >= AL_UDMA_REV_ID_2) {
82 al_reg_write32_masked(&tmp_unit_regs->m2s.m2s_rd.data_cfg,
83 UDMA_M2S_RD_DATA_CFG_DATA_FIFO_DEPTH_MASK,
84 256 << UDMA_M2S_RD_DATA_CFG_DATA_FIFO_DEPTH_SHIFT);
87 /* set AXI timeout to 1M (~2.6 ms) */
88 al_reg_write32(&tmp_unit_regs->gen.axi.cfg_1, 1000000);
90 al_reg_write32(&tmp_unit_regs->m2s.m2s_comp.cfg_application_ack
91 , 0); /* Ack time out */
93 if (udma->type == UDMA_RX) {
95 &udma->udma_regs->s2m.s2m_comp.cfg_application_ack, 0);
101 * misc queue configurations
103 * @param udma_q udma queue data structure
107 static int al_udma_q_config(struct al_udma_q *udma_q)
112 if (udma_q->udma->type == UDMA_TX) {
113 reg_addr = &udma_q->q_regs->m2s_q.rlimit.mask;
115 val = al_reg_read32(reg_addr);
117 val &= ~UDMA_M2S_Q_RATE_LIMIT_MASK_INTERNAL_PAUSE_DMB;
118 al_reg_write32(reg_addr, val);
124 * set the queue's completion configuration register
126 * @param udma_q udma queue data structure
130 static int al_udma_q_config_compl(struct al_udma_q *udma_q)
135 if (udma_q->udma->type == UDMA_TX)
136 reg_addr = &udma_q->q_regs->m2s_q.comp_cfg;
138 reg_addr = &udma_q->q_regs->s2m_q.comp_cfg;
140 val = al_reg_read32(reg_addr);
142 if (udma_q->flags & AL_UDMA_Q_FLAGS_NO_COMP_UPDATE)
143 val &= ~UDMA_M2S_Q_COMP_CFG_EN_COMP_RING_UPDATE;
145 val |= UDMA_M2S_Q_COMP_CFG_EN_COMP_RING_UPDATE;
147 if (udma_q->flags & AL_UDMA_Q_FLAGS_EN_COMP_COAL)
148 val &= ~UDMA_M2S_Q_COMP_CFG_DIS_COMP_COAL;
150 val |= UDMA_M2S_Q_COMP_CFG_DIS_COMP_COAL;
152 al_reg_write32(reg_addr, val);
154 /* set the completion queue size */
155 if (udma_q->udma->type == UDMA_RX) {
157 &udma_q->udma->udma_regs->s2m.s2m_comp.cfg_1c);
158 val &= ~UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK;
159 /* the register expects it to be in words */
160 val |= (udma_q->cdesc_size >> 2)
161 & UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK;
162 al_reg_write32(&udma_q->udma->udma_regs->s2m.s2m_comp.cfg_1c
169 * reset the queues pointers (Head, Tail, etc) and set the base addresses
171 * @param udma_q udma queue data structure
173 static int al_udma_q_set_pointers(struct al_udma_q *udma_q)
175 /* reset the descriptors ring pointers */
176 /* assert descriptor base address aligned. */
177 al_assert((AL_ADDR_LOW(udma_q->desc_phy_base) &
178 ~UDMA_M2S_Q_TDRBP_LOW_ADDR_MASK) == 0);
179 al_reg_write32(&udma_q->q_regs->rings.drbp_low,
180 AL_ADDR_LOW(udma_q->desc_phy_base));
181 al_reg_write32(&udma_q->q_regs->rings.drbp_high,
182 AL_ADDR_HIGH(udma_q->desc_phy_base));
184 al_reg_write32(&udma_q->q_regs->rings.drl, udma_q->size);
186 /* if completion ring update disabled */
187 if (udma_q->cdesc_base_ptr == NULL) {
188 udma_q->flags |= AL_UDMA_Q_FLAGS_NO_COMP_UPDATE;
190 /* reset the completion descriptors ring pointers */
191 /* assert completion base address aligned. */
192 al_assert((AL_ADDR_LOW(udma_q->cdesc_phy_base) &
193 ~UDMA_M2S_Q_TCRBP_LOW_ADDR_MASK) == 0);
194 al_reg_write32(&udma_q->q_regs->rings.crbp_low,
195 AL_ADDR_LOW(udma_q->cdesc_phy_base));
196 al_reg_write32(&udma_q->q_regs->rings.crbp_high,
197 AL_ADDR_HIGH(udma_q->cdesc_phy_base));
199 al_udma_q_config_compl(udma_q);
204 * enable/disable udma queue
206 * @param udma_q udma queue data structure
207 * @param enable none zero value enables the queue, zero means disable
211 static int al_udma_q_enable(struct al_udma_q *udma_q, int enable)
213 uint32_t reg = al_reg_read32(&udma_q->q_regs->rings.cfg);
216 reg |= (UDMA_M2S_Q_CFG_EN_PREF | UDMA_M2S_Q_CFG_EN_SCHEDULING);
217 udma_q->status = AL_QUEUE_ENABLED;
219 reg &= ~(UDMA_M2S_Q_CFG_EN_PREF | UDMA_M2S_Q_CFG_EN_SCHEDULING);
220 udma_q->status = AL_QUEUE_DISABLED;
222 al_reg_write32(&udma_q->q_regs->rings.cfg, reg);
227 /************************ API functions ***************************************/
229 /* Initializations functions */
231 * Initialize the udma engine
233 int al_udma_init(struct al_udma *udma, struct al_udma_params *udma_params)
239 if (udma_params->num_of_queues > DMA_MAX_Q) {
240 al_err("udma: invalid num_of_queues parameter\n");
244 udma->type = udma_params->type;
245 udma->num_of_queues = udma_params->num_of_queues;
246 udma->gen_regs = &udma_params->udma_regs_base->gen;
248 if (udma->type == UDMA_TX)
249 udma->udma_regs = (union udma_regs *)&udma_params->udma_regs_base->m2s;
251 udma->udma_regs = (union udma_regs *)&udma_params->udma_regs_base->s2m;
253 udma->rev_id = al_udma_get_revision(udma_params->udma_regs_base);
255 if (udma_params->name == NULL)
258 udma->name = udma_params->name;
260 udma->state = UDMA_DISABLE;
261 for (i = 0; i < DMA_MAX_Q; i++) {
262 udma->udma_q[i].status = AL_QUEUE_NOT_INITIALIZED;
264 /* initialize configuration registers to correct values */
265 al_udma_set_defaults(udma);
266 al_dbg("udma [%s] initialized. base %p\n", udma->name,
272 * Initialize the udma queue data structure
274 int al_udma_q_init(struct al_udma *udma, uint32_t qid,
275 struct al_udma_q_params *q_params)
277 struct al_udma_q *udma_q;
282 if (qid >= udma->num_of_queues) {
283 al_err("udma: invalid queue id (%d)\n", qid);
287 if (udma->udma_q[qid].status == AL_QUEUE_ENABLED) {
288 al_err("udma: queue (%d) already enabled!\n", qid);
292 if (q_params->size < AL_UDMA_MIN_Q_SIZE) {
293 al_err("udma: queue (%d) size too small\n", qid);
297 if (q_params->size > AL_UDMA_MAX_Q_SIZE) {
298 al_err("udma: queue (%d) size too large\n", qid);
302 if (q_params->size & (q_params->size - 1)) {
303 al_err("udma: queue (%d) size (%d) must be power of 2\n",
304 q_params->size, qid);
308 udma_q = &udma->udma_q[qid];
309 /* set the queue's regs base address */
310 if (udma->type == UDMA_TX)
311 udma_q->q_regs = (union udma_q_regs __iomem *)
312 &udma->udma_regs->m2s.m2s_q[qid];
314 udma_q->q_regs = (union udma_q_regs __iomem *)
315 &udma->udma_regs->s2m.s2m_q[qid];
317 udma_q->adapter_rev_id = q_params->adapter_rev_id;
318 udma_q->size = q_params->size;
319 udma_q->size_mask = q_params->size - 1;
320 udma_q->desc_base_ptr = q_params->desc_base;
321 udma_q->desc_phy_base = q_params->desc_phy_base;
322 udma_q->cdesc_base_ptr = q_params->cdesc_base;
323 udma_q->cdesc_phy_base = q_params->cdesc_phy_base;
324 udma_q->cdesc_size = q_params->cdesc_size;
326 udma_q->next_desc_idx = 0;
327 udma_q->next_cdesc_idx = 0;
328 udma_q->end_cdesc_ptr = (uint8_t *) udma_q->cdesc_base_ptr +
329 (udma_q->size - 1) * udma_q->cdesc_size;
330 udma_q->comp_head_idx = 0;
331 udma_q->comp_head_ptr = (union al_udma_cdesc *)udma_q->cdesc_base_ptr;
332 udma_q->desc_ring_id = AL_UDMA_INITIAL_RING_ID;
333 udma_q->comp_ring_id = AL_UDMA_INITIAL_RING_ID;
335 udma_q->desc_ctrl_bits = AL_UDMA_INITIAL_RING_ID <<
336 AL_M2S_DESC_RING_ID_SHIFT;
338 udma_q->pkt_crnt_descs = 0;
340 udma_q->status = AL_QUEUE_DISABLED;
344 /* start hardware configuration: */
345 al_udma_q_config(udma_q);
346 /* reset the queue pointers */
347 al_udma_q_set_pointers(udma_q);
350 al_udma_q_enable(udma_q, 1);
352 al_dbg("udma [%s %d]: %s q init. size 0x%x\n"
353 " desc ring info: phys base 0x%llx virt base %p)",
354 udma_q->udma->name, udma_q->qid,
355 udma->type == UDMA_TX ? "Tx" : "Rx",
357 (unsigned long long)q_params->desc_phy_base,
358 q_params->desc_base);
359 al_dbg(" cdesc ring info: phys base 0x%llx virt base %p entry size 0x%x",
360 (unsigned long long)q_params->cdesc_phy_base,
361 q_params->cdesc_base,
362 q_params->cdesc_size);
370 int al_udma_q_reset(struct al_udma_q *udma_q)
372 unsigned int remaining_time = AL_UDMA_Q_RST_TOUT;
373 uint32_t *status_reg;
376 uint32_t *q_sw_ctrl_reg;
380 /* De-assert scheduling and prefetch */
381 al_udma_q_enable(udma_q, 0);
383 /* Wait for scheduling and prefetch to stop */
384 status_reg = &udma_q->q_regs->rings.status;
386 while (remaining_time) {
387 uint32_t status = al_reg_read32(status_reg);
389 if (!(status & (UDMA_M2S_Q_STATUS_PREFETCH |
390 UDMA_M2S_Q_STATUS_SCHEDULER)))
397 if (!remaining_time) {
398 al_err("udma [%s %d]: %s timeout waiting for prefetch and "
399 "scheduler disable\n", udma_q->udma->name, udma_q->qid,
404 /* Wait for the completion queue to reach to the same pointer as the
405 * prefetch stopped at ([TR]DCP == [TR]CRHP) */
406 dcp_reg = &udma_q->q_regs->rings.dcp;
407 crhp_reg = &udma_q->q_regs->rings.crhp;
409 while (remaining_time) {
410 uint32_t dcp = al_reg_read32(dcp_reg);
411 uint32_t crhp = al_reg_read32(crhp_reg);
420 if (!remaining_time) {
421 al_err("udma [%s %d]: %s timeout waiting for dcp==crhp\n",
422 udma_q->udma->name, udma_q->qid, __func__);
426 /* Assert the queue reset */
427 if (udma_q->udma->type == UDMA_TX)
428 q_sw_ctrl_reg = &udma_q->q_regs->m2s_q.q_sw_ctrl;
430 q_sw_ctrl_reg = &udma_q->q_regs->s2m_q.q_sw_ctrl;
432 al_reg_write32(q_sw_ctrl_reg, UDMA_M2S_Q_SW_CTRL_RST_Q);
438 * return (by reference) a pointer to a specific queue date structure.
440 int al_udma_q_handle_get(struct al_udma *udma, uint32_t qid,
441 struct al_udma_q **q_handle)
447 if (unlikely(qid >= udma->num_of_queues)) {
448 al_err("udma [%s]: invalid queue id (%d)\n", udma->name, qid);
451 *q_handle = &udma->udma_q[qid];
456 * Change the UDMA's state
458 int al_udma_state_set(struct al_udma *udma, enum al_udma_state state)
462 al_assert(udma != NULL);
463 if (state == udma->state)
464 al_dbg("udma [%s]: requested state identical to "
465 "current state (%d)\n", udma->name, state);
467 al_dbg("udma [%s]: change state from (%s) to (%s)\n",
468 udma->name, al_udma_states_name[udma->state],
469 al_udma_states_name[state]);
474 reg |= UDMA_M2S_CHANGE_STATE_DIS;
477 reg |= UDMA_M2S_CHANGE_STATE_NORMAL;
480 reg |= UDMA_M2S_CHANGE_STATE_ABORT;
483 al_err("udma: invalid state (%d)\n", state);
487 if (udma->type == UDMA_TX)
488 al_reg_write32(&udma->udma_regs->m2s.m2s.change_state, reg);
490 al_reg_write32(&udma->udma_regs->s2m.s2m.change_state, reg);
497 * return the current UDMA hardware state
499 enum al_udma_state al_udma_state_get(struct al_udma *udma)
507 if (udma->type == UDMA_TX)
508 state_reg = al_reg_read32(&udma->udma_regs->m2s.m2s.state);
510 state_reg = al_reg_read32(&udma->udma_regs->s2m.s2m.state);
512 comp_ctrl = AL_REG_FIELD_GET(state_reg,
513 UDMA_M2S_STATE_COMP_CTRL_MASK,
514 UDMA_M2S_STATE_COMP_CTRL_SHIFT);
515 stream_if = AL_REG_FIELD_GET(state_reg,
516 UDMA_M2S_STATE_STREAM_IF_MASK,
517 UDMA_M2S_STATE_STREAM_IF_SHIFT);
518 data_rd = AL_REG_FIELD_GET(state_reg,
519 UDMA_M2S_STATE_DATA_RD_CTRL_MASK,
520 UDMA_M2S_STATE_DATA_RD_CTRL_SHIFT);
521 desc_pref = AL_REG_FIELD_GET(state_reg,
522 UDMA_M2S_STATE_DESC_PREF_MASK,
523 UDMA_M2S_STATE_DESC_PREF_SHIFT);
525 al_assert(comp_ctrl != UDMA_STATE_RESERVED);
526 al_assert(stream_if != UDMA_STATE_RESERVED);
527 al_assert(data_rd != UDMA_STATE_RESERVED);
528 al_assert(desc_pref != UDMA_STATE_RESERVED);
530 /* if any of the states is abort then return abort */
531 if ((comp_ctrl == UDMA_STATE_ABORT) || (stream_if == UDMA_STATE_ABORT)
532 || (data_rd == UDMA_STATE_ABORT)
533 || (desc_pref == UDMA_STATE_ABORT))
536 /* if any of the states is normal then return normal */
537 if ((comp_ctrl == UDMA_STATE_NORMAL)
538 || (stream_if == UDMA_STATE_NORMAL)
539 || (data_rd == UDMA_STATE_NORMAL)
540 || (desc_pref == UDMA_STATE_NORMAL))
551 * get next completed packet from completion ring of the queue
553 uint32_t al_udma_cdesc_packet_get(
554 struct al_udma_q *udma_q,
555 volatile union al_udma_cdesc **cdesc)
558 volatile union al_udma_cdesc *curr;
561 /* this function requires the completion ring update */
562 al_assert(!(udma_q->flags & AL_UDMA_Q_FLAGS_NO_COMP_UPDATE));
564 /* comp_head points to the last comp desc that was processed */
565 curr = udma_q->comp_head_ptr;
566 comp_flags = swap32_from_le(curr->al_desc_comp_tx.ctrl_meta);
568 /* check if the completion descriptor is new */
569 if (unlikely(al_udma_new_cdesc(udma_q, comp_flags) == AL_FALSE))
571 /* if new desc found, increment the current packets descriptors */
572 count = udma_q->pkt_crnt_descs + 1;
573 while (!cdesc_is_last(comp_flags)) {
574 curr = al_cdesc_next_update(udma_q, curr);
575 comp_flags = swap32_from_le(curr->al_desc_comp_tx.ctrl_meta);
576 if (unlikely(al_udma_new_cdesc(udma_q, comp_flags)
578 /* the current packet here doesn't have all */
579 /* descriptors completed. log the current desc */
580 /* location and number of completed descriptors so */
581 /* far. then return */
582 udma_q->pkt_crnt_descs = count;
583 udma_q->comp_head_ptr = curr;
587 /* check against max descs per packet. */
588 al_assert(count <= udma_q->size);
590 /* return back the first descriptor of the packet */
591 *cdesc = al_udma_cdesc_idx_to_ptr(udma_q, udma_q->next_cdesc_idx);
592 udma_q->pkt_crnt_descs = 0;
593 udma_q->comp_head_ptr = al_cdesc_next_update(udma_q, curr);
595 al_dbg("udma [%s %d]: packet completed. first desc %p (ixd 0x%x)"
596 " descs %d\n", udma_q->udma->name, udma_q->qid, *cdesc,
597 udma_q->next_cdesc_idx, count);
602 /** @} end of UDMA group */