2 *******************************************************************************
3 Copyright (C) 2015 Annapurna Labs Ltd.
5 This file may be licensed under the terms of the Annapurna Labs Commercial
8 Alternatively, this file can be distributed under the terms of the GNU General
9 Public License V2 as published by the Free Software Foundation and can be
10 found at http://www.gnu.org/licenses/gpl-2.0.html
12 Alternatively, redistribution and use in source and binary forms, with or
13 without modification, are permitted provided that the following conditions are
16 * Redistributions of source code must retain the above copyright notice,
17 this list of conditions and the following disclaimer.
19 * Redistributions in binary form must reproduce the above copyright
20 notice, this list of conditions and the following disclaimer in
21 the documentation and/or other materials provided with the
24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
25 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
28 ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
31 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *******************************************************************************/
38 * @defgroup group_udma_config UDMA Config
39 * @ingroup group_udma_api
42 * @file al_hal_udma_config.h
44 * @brief C Header file for the Universal DMA HAL driver for configuration APIs
48 #ifndef __AL_HAL_UDMA_CONFIG_H__
49 #define __AL_HAL_UDMA_CONFIG_H__
51 #include <al_hal_udma.h>
60 /** Scheduling mode */
61 enum al_udma_sch_mode {
63 SRR, /* Simple Sound Rubin */
64 DWRR /* Deficit Weighted Round Rubin */
67 /** AXI configuration */
68 struct al_udma_axi_conf {
69 uint32_t axi_timeout; /* Timeout for AXI transactions */
70 uint8_t arb_promotion; /* arbitration promotion */
71 al_bool swap_8_bytes; /* enable 8 bytes swap instead of 4 bytes */
72 al_bool swap_s2m_data;
73 al_bool swap_s2m_desc;
74 al_bool swap_m2s_data;
75 al_bool swap_m2s_desc;
78 /** UDMA AXI M2S configuration */
79 struct al_udma_axi_submaster {
80 uint8_t id; /* AXI ID */
90 /** UDMA AXI M2S configuration */
91 struct al_udma_m2s_axi_conf {
92 struct al_udma_axi_submaster comp_write;
93 struct al_udma_axi_submaster data_read;
94 struct al_udma_axi_submaster desc_read;
95 al_bool break_on_max_boundary; /* Data read break on max boundary */
96 uint8_t min_axi_beats; /* Minimum burst for writing completion desc. */
97 uint8_t ostand_max_data_read;
98 uint8_t ostand_max_desc_read;
99 uint8_t ostand_max_comp_req;
100 uint8_t ostand_max_comp_write;
103 /** UDMA AXI S2M configuration */
104 struct al_udma_s2m_axi_conf {
105 struct al_udma_axi_submaster data_write;
106 struct al_udma_axi_submaster desc_read;
107 struct al_udma_axi_submaster comp_write;
108 al_bool break_on_max_boundary; /* Data read break on max boundary */
109 uint8_t min_axi_beats; /* Minimum burst for writing completion desc. */
110 uint8_t ostand_max_data_req;
111 uint8_t ostand_max_data_write;
112 uint8_t ostand_max_comp_req;
113 uint8_t ostand_max_comp_write;
114 uint8_t ostand_max_desc_read;
115 uint8_t ack_fifo_depth; /* size of the stream application ack fifo */
118 /** M2S error logging */
119 struct al_udma_err_log {
120 uint32_t error_status;
124 /** M2S max packet size configuration */
125 struct al_udma_m2s_pkt_len_conf {
126 uint32_t max_pkt_size;
127 al_bool encode_64k_as_zero;
130 /** M2S Descriptor Prefetch configuration */
131 struct al_udma_m2s_desc_pref_conf {
132 uint8_t desc_fifo_depth;
133 enum al_udma_sch_mode sch_mode; /* Scheduling mode
134 * (either strict or RR) */
136 uint8_t max_desc_per_packet; /* max number of descriptors to
138 /* in one burst (5b) */
140 uint8_t min_burst_above_thr; /* min burst size when fifo above
143 uint8_t min_burst_below_thr; /* min burst size when fifo below
146 uint8_t max_pkt_limit; /* maximum number of packets in the data
147 * read FIFO, defined based on header
150 uint16_t data_fifo_depth; /* maximum number of data beats in the
152 * defined based on header FIFO size
156 /** S2M Descriptor Prefetch configuration */
157 struct al_udma_s2m_desc_pref_conf {
158 uint8_t desc_fifo_depth;
159 enum al_udma_sch_mode sch_mode; /* Scheduling mode *
160 * (either strict or RR)
163 al_bool q_promotion; /* enable promotion */
164 al_bool force_promotion; /* force promotion */
165 al_bool en_pref_prediction; /* enable prefetch prediction */
166 uint8_t promotion_th; /* Threshold for queue promotion */
169 uint8_t min_burst_above_thr; /* min burst size when fifo above
172 uint8_t min_burst_below_thr; /* min burst size when fifo below
175 uint8_t a_full_thr; /* almost full threshold */
178 /** S2M Data write configuration */
179 struct al_udma_s2m_data_write_conf {
180 uint16_t data_fifo_depth; /* maximum number of data beats in the
181 * data write FIFO, defined based on
184 uint8_t max_pkt_limit; /* maximum number of packets in the
185 * data write FIFO,defined based on
189 uint32_t desc_wait_timer; /* waiting time for the host to write
190 * new descriptor to the queue
191 * (for the current packet in process)
193 uint32_t flags; /* bitwise of flags of s2m
194 * data_cfg_2 register
198 /** S2M Completion configuration */
199 struct al_udma_s2m_completion_conf {
200 uint8_t desc_size; /* Size of completion descriptor
203 al_bool cnt_words; /* Completion fifo in use counter:
204 * AL_TRUE words, AL_FALS descriptors
206 al_bool q_promotion; /* Enable promotion of the current
207 * unack in progress */
208 /* in the completion write scheduler */
209 al_bool force_rr; /* force RR arbitration in the
212 // uint8_t ack_fifo_depth; /* size of the stream application ack fifo */
213 uint8_t q_free_min; /* minimum number of free completion
216 /* to qualify for promotion */
218 uint16_t comp_fifo_depth; /* Size of completion fifo in words */
219 uint16_t unack_fifo_depth; /* Size of unacked fifo in descs */
220 uint32_t timeout; /* Ack timout from stream interface */
223 /** M2S UDMA DWRR configuration */
224 struct al_udma_m2s_dwrr_conf {
229 uint32_t deficit_init_val;
232 /** M2S DMA Rate Limitation mode */
233 struct al_udma_m2s_rlimit_mode {
235 uint16_t short_cycle_sz;
236 uint32_t token_init_val;
239 /** M2S Stream/Q Rate Limitation */
240 struct al_udma_m2s_rlimit_cfg {
241 uint32_t max_burst_sz; /* maximum number of accumulated bytes in the
244 uint16_t long_cycle_sz; /* number of short cycles between token fill */
245 uint32_t long_cycle; /* number of bits to add in each long cycle */
246 uint32_t short_cycle; /* number of bits to add in each cycle */
247 uint32_t mask; /* mask the different types of rate limiters */
250 enum al_udma_m2s_rlimit_action {
251 AL_UDMA_STRM_RLIMIT_ENABLE,
252 AL_UDMA_STRM_RLIMIT_PAUSE,
253 AL_UDMA_STRM_RLIMIT_RESET
256 /** M2S UDMA Q scheduling configuration */
257 struct al_udma_m2s_q_dwrr_conf {
258 uint32_t max_deficit_cnt_sz; /*maximum number of accumulated bytes
259 * in the deficit counter
261 al_bool strict; /* bypass DWRR */
267 /** M2S UDMA / UDMA Q scheduling configuration */
268 struct al_udma_m2s_sc {
269 enum al_udma_sch_mode sch_mode; /* Scheduling Mode */
270 struct al_udma_m2s_dwrr_conf dwrr; /* DWRR configuration */
273 /** UDMA / UDMA Q rate limitation configuration */
274 struct al_udma_m2s_rlimit {
275 struct al_udma_m2s_rlimit_mode rlimit_mode;
276 /* rate limitation enablers */
278 struct al_udma_tkn_bkt_conf token_bkt; /* Token Bucket configuration */
282 /** UDMA Data read configuration */
283 struct al_udma_m2s_data_rd_conf {
284 uint8_t max_rd_d_beats; /* max burst size for reading data
285 * (in AXI beats-128b) (5b)
287 uint8_t max_rd_d_out_req; /* max number of outstanding data
290 uint16_t max_rd_d_out_beats; /* max num. of data read beats (10b) */
293 /** M2S UDMA completion and application timeouts */
294 struct al_udma_m2s_comp_timeouts {
295 enum al_udma_sch_mode sch_mode; /* Scheduling mode
296 * (either strict or RR)
298 al_bool enable_q_promotion;
299 uint8_t unack_fifo_depth; /* unacked desc fifo size */
300 uint8_t comp_fifo_depth; /* desc fifo size */
301 uint32_t coal_timeout; /* (24b) */
302 uint32_t app_timeout; /* (24b) */
305 /** S2M UDMA per queue completion configuration */
306 struct al_udma_s2m_q_comp_conf {
307 al_bool dis_comp_coal; /* disable completion coalescing */
308 al_bool en_comp_ring_update; /* enable writing completion descs */
309 uint32_t comp_timer; /* completion coalescing timer */
310 al_bool en_hdr_split; /* enable header split */
311 al_bool force_hdr_split; /* force header split */
312 uint16_t hdr_split_size; /* size used for the header split */
313 uint8_t q_qos; /* queue QoS */
316 /** UDMA per queue VMID control configuration */
317 struct al_udma_gen_vmid_q_conf {
318 /* Enable usage of the VMID per queue according to 'vmid' */
321 /* Enable usage of the VMID from the descriptor buffer address 63:48 */
324 /* VMID to be applied when 'queue_en' is asserted */
327 /* VMADDR to be applied to msbs when 'desc_en' is asserted.
328 * Relevant for revisions >= AL_UDMA_REV_ID_REV2 */
332 /** UDMA VMID control configuration */
333 struct al_udma_gen_vmid_conf {
334 /* TX queue configuration */
335 struct al_udma_gen_vmid_q_conf tx_q_conf[DMA_MAX_Q];
337 /* RX queue configuration */
338 struct al_udma_gen_vmid_q_conf rx_q_conf[DMA_MAX_Q];
341 /** UDMA VMID MSIX control configuration */
342 struct al_udma_gen_vmid_msix_conf {
343 /* Enable write to all VMID_n registers in the MSI-X Controller */
346 /* use VMID_n [7:0] from MSI-X Controller for MSI-X message */
350 /** UDMA per Tx queue advanced VMID control configuration */
351 struct al_udma_gen_vmid_advanced_tx_q_conf {
352 /**********************************************************************
354 **********************************************************************/
355 /* Tx data VMID enable */
356 al_bool tx_q_data_vmid_en;
359 * For Tx data reads, replacement bits for the original address.
360 * The number of bits replaced is determined according to
363 unsigned int tx_q_addr_hi;
366 * For Tx data reads, 6 bits serving the number of bits taken from the
367 * extra register on account of bits coming from the original address
369 * When 'tx_q_addr_hi_sel'=32 all of 'tx_q_addr_hi' will be taken.
370 * When 'tx_q_addr_hi_sel'=0 none of it will be taken, and when any
371 * value in between, it will start from the MSB bit and sweep down as
372 * many bits as needed. For example if 'tx_q_addr_hi_sel'=8, the final
373 * address [63:56] will carry 'tx_q_addr_hi'[31:24] while [55:32] will
374 * carry the original buffer address[55:32].
376 unsigned int tx_q_addr_hi_sel;
380 * Masked per bit with 'tx_q_data_vmid_mask'
382 unsigned int tx_q_data_vmid;
385 * Tx data read VMID mask
386 * Each '1' selects from the buffer address, each '0' selects from
389 unsigned int tx_q_data_vmid_mask;
391 /**********************************************************************
393 **********************************************************************/
394 /* Tx prefetch VMID enable */
395 al_bool tx_q_prefetch_vmid_en;
397 /* Tx prefetch VMID */
398 unsigned int tx_q_prefetch_vmid;
400 /**********************************************************************
402 **********************************************************************/
403 /* Tx completion VMID enable */
404 al_bool tx_q_compl_vmid_en;
406 /* Tx completion VMID */
407 unsigned int tx_q_compl_vmid;
410 /** UDMA per Rx queue advanced VMID control configuration */
411 struct al_udma_gen_vmid_advanced_rx_q_conf {
412 /**********************************************************************
414 **********************************************************************/
415 /* Rx data VMID enable */
416 al_bool rx_q_data_vmid_en;
419 * For Rx data writes, replacement bits for the original address.
420 * The number of bits replaced is determined according to
423 unsigned int rx_q_addr_hi;
426 * For Rx data writes, 6 bits serving the number of bits taken from the
427 * extra register on account of bits coming from the original address
430 unsigned int rx_q_addr_hi_sel;
434 * Masked per bit with 'rx_q_data_vmid_mask'
436 unsigned int rx_q_data_vmid;
438 /* Rx data write VMID mask */
439 unsigned int rx_q_data_vmid_mask;
441 /**********************************************************************
442 * Rx Data Buffer 2 VMID
443 **********************************************************************/
444 /* Rx data buff2 VMID enable */
445 al_bool rx_q_data_buff2_vmid_en;
448 * For Rx data buff2 writes, replacement bits for the original address.
449 * The number of bits replaced is determined according to
450 * 'rx_q_data_buff2_addr_hi_sel'
452 unsigned int rx_q_data_buff2_addr_hi;
455 * For Rx data buff2 writes, 6 bits serving the number of bits taken
456 * from the extra register on account of bits coming from the original
459 unsigned int rx_q_data_buff2_addr_hi_sel;
462 * Rx data buff2 write VMID
463 * Masked per bit with 'rx_q_data_buff2_mask'
465 unsigned int rx_q_data_buff2_vmid;
467 /* Rx data buff2 write VMID mask */
468 unsigned int rx_q_data_buff2_mask;
470 /**********************************************************************
472 **********************************************************************/
473 /* Rx DDP write VMID enable */
474 al_bool rx_q_ddp_vmid_en;
477 * For Rx DDP writes, replacement bits for the original address.
478 * The number of bits replaced is determined according to
479 * 'rx_q_ddp_addr_hi_sel'
481 unsigned int rx_q_ddp_addr_hi;
484 * For Rx DDP writes, 6 bits serving the number of bits taken from the
485 * extra register on account of bits coming from the original address
488 unsigned int rx_q_ddp_addr_hi_sel;
492 * Masked per bit with 'rx_q_ddp_mask'
494 unsigned int rx_q_ddp_vmid;
496 /* Rx DDP write VMID mask */
497 unsigned int rx_q_ddp_mask;
499 /**********************************************************************
501 **********************************************************************/
502 /* Rx prefetch VMID enable */
503 al_bool rx_q_prefetch_vmid_en;
505 /* Rx prefetch VMID */
506 unsigned int rx_q_prefetch_vmid;
508 /**********************************************************************
510 **********************************************************************/
511 /* Rx completion VMID enable */
512 al_bool rx_q_compl_vmid_en;
514 /* Rx completion VMID */
515 unsigned int rx_q_compl_vmid;
519 * Header split, buffer 2 per queue configuration
520 * When header split is enabled, Buffer_2 is used as an address for the header
521 * data. Buffer_2 is defined as 32-bits in the RX descriptor and it is defined
522 * that the MSB ([63:32]) of Buffer_1 is used as address [63:32] for the header
525 struct al_udma_gen_hdr_split_buff2_q_conf {
527 * MSB of the 64-bit address (bits [63:32]) that can be used for header
528 * split for this queue
530 unsigned int addr_msb;
533 * Determine how to select the MSB (bits [63:32]) of the address when
534 * header split is enabled (4 bits, one per byte)
536 * [0] – selector for bits [39:32]
537 * [1] – selector for bits [47:40]
538 * [2] – selector for bits [55:48]
539 * [3] – selector for bits [63:55]
541 * 0 – Use Buffer_1 (legacy operation)
542 * 1 – Use the queue configuration 'addr_msb'
544 unsigned int add_msb_sel;
547 /* Report Error - to be used for abort */
548 void al_udma_err_report(struct al_udma *udma);
550 /* Statistics - TBD */
551 void al_udma_stats_get(struct al_udma *udma);
553 /* Misc configurations */
554 /* Configure AXI configuration */
555 int al_udma_axi_set(struct udma_gen_axi *axi_regs,
556 struct al_udma_axi_conf *axi);
558 /* Configure UDMA AXI M2S configuration */
559 int al_udma_m2s_axi_set(struct al_udma *udma,
560 struct al_udma_m2s_axi_conf *axi_m2s);
562 /* Configure UDMA AXI S2M configuration */
563 int al_udma_s2m_axi_set(struct al_udma *udma,
564 struct al_udma_s2m_axi_conf *axi_s2m);
566 /* Configure M2S packet len */
567 int al_udma_m2s_packet_size_cfg_set(struct al_udma *udma,
568 struct al_udma_m2s_pkt_len_conf *conf);
570 /* Configure M2S UDMA descriptor prefetch */
571 int al_udma_m2s_pref_set(struct al_udma *udma,
572 struct al_udma_m2s_desc_pref_conf *conf);
573 int al_udma_m2s_pref_get(struct al_udma *udma,
574 struct al_udma_m2s_desc_pref_conf *conf);
576 /* set m2s packet's max descriptors (including meta descriptors) */
577 #define AL_UDMA_M2S_MAX_ALLOWED_DESCS_PER_PACKET 31
578 int al_udma_m2s_max_descs_set(struct al_udma *udma, uint8_t max_descs);
580 /* set s2m packets' max descriptors */
581 #define AL_UDMA_S2M_MAX_ALLOWED_DESCS_PER_PACKET 31
582 int al_udma_s2m_max_descs_set(struct al_udma *udma, uint8_t max_descs);
585 /* Configure S2M UDMA descriptor prefetch */
586 int al_udma_s2m_pref_set(struct al_udma *udma,
587 struct al_udma_s2m_desc_pref_conf *conf);
588 int al_udma_m2s_pref_get(struct al_udma *udma,
589 struct al_udma_m2s_desc_pref_conf *conf);
591 /* Configure S2M UDMA data write */
592 int al_udma_s2m_data_write_set(struct al_udma *udma,
593 struct al_udma_s2m_data_write_conf *conf);
595 /* Configure the s2m full line write feature */
596 int al_udma_s2m_full_line_write_set(struct al_udma *umda, al_bool enable);
598 /* Configure S2M UDMA completion */
599 int al_udma_s2m_completion_set(struct al_udma *udma,
600 struct al_udma_s2m_completion_conf *conf);
602 /* Configure the M2S UDMA scheduling mode */
603 int al_udma_m2s_sc_set(struct al_udma *udma,
604 struct al_udma_m2s_dwrr_conf *sched);
606 /* Configure the M2S UDMA rate limitation */
607 int al_udma_m2s_rlimit_set(struct al_udma *udma,
608 struct al_udma_m2s_rlimit_mode *mode);
609 int al_udma_m2s_rlimit_reset(struct al_udma *udma);
611 /* Configure the M2S Stream rate limitation */
612 int al_udma_m2s_strm_rlimit_set(struct al_udma *udma,
613 struct al_udma_m2s_rlimit_cfg *conf);
614 int al_udma_m2s_strm_rlimit_act(struct al_udma *udma,
615 enum al_udma_m2s_rlimit_action act);
617 /* Configure the M2S UDMA Q rate limitation */
618 int al_udma_m2s_q_rlimit_set(struct al_udma_q *udma_q,
619 struct al_udma_m2s_rlimit_cfg *conf);
620 int al_udma_m2s_q_rlimit_act(struct al_udma_q *udma_q,
621 enum al_udma_m2s_rlimit_action act);
623 /* Configure the M2S UDMA Q scheduling mode */
624 int al_udma_m2s_q_sc_set(struct al_udma_q *udma_q,
625 struct al_udma_m2s_q_dwrr_conf *conf);
626 int al_udma_m2s_q_sc_pause(struct al_udma_q *udma_q, al_bool set);
627 int al_udma_m2s_q_sc_reset(struct al_udma_q *udma_q);
629 /* M2S UDMA completion and application timeouts */
630 int al_udma_m2s_comp_timeouts_set(struct al_udma *udma,
631 struct al_udma_m2s_comp_timeouts *conf);
632 int al_udma_m2s_comp_timeouts_get(struct al_udma *udma,
633 struct al_udma_m2s_comp_timeouts *conf);
635 /* UDMA get revision */
636 static INLINE unsigned int al_udma_get_revision(struct unit_regs __iomem *unit_regs)
638 return (al_reg_read32(&unit_regs->gen.dma_misc.revision)
639 & UDMA_GEN_DMA_MISC_REVISION_PROGRAMMING_ID_MASK) >>
640 UDMA_GEN_DMA_MISC_REVISION_PROGRAMMING_ID_SHIFT;
644 * S2M UDMA Configure the expected behavior of Rx/S2M UDMA when there are no Rx Descriptors.
647 * @param drop_packet when set to true, the UDMA will drop packet.
648 * @param gen_interrupt when set to true, the UDMA will generate
649 * no_desc_hint interrupt when a packet received and the UDMA
650 * doesn't find enough free descriptors for it.
651 * @param wait_for_desc_timeout timeout in SB cycles to wait for new
652 * descriptors before dropping the packets.
654 * - The hint interrupt is raised immediately without waiting
656 * - value 0 means wait for ever.
659 * - When get_interrupt is set, the API won't program the iofic to unmask this
660 * interrupt, in this case the callee should take care for doing that unmask
661 * using the al_udma_iofic_config() API.
663 * - The hardware's default configuration is: no drop packet, generate hint
665 * - This API must be called once and before enabling the UDMA
667 * @return 0 if no error found.
669 int al_udma_s2m_no_desc_cfg_set(struct al_udma *udma, al_bool drop_packet, al_bool gen_interrupt, uint32_t wait_for_desc_timeout);
672 * S2M UDMA configure a queue's completion update
675 * @param enable set to true to enable completion update
677 * completion update better be disabled for tx queues as those descriptors
678 * doesn't carry useful information, thus disabling it saves DMA accesses.
680 * @return 0 if no error found.
682 int al_udma_s2m_q_compl_updade_config(struct al_udma_q *udma_q, al_bool enable);
685 * S2M UDMA configure a queue's completion descriptors coalescing
688 * @param enable set to true to enable completion coalescing
689 * @param coal_timeout in South Bridge cycles.
691 * @return 0 if no error found.
693 int al_udma_s2m_q_compl_coal_config(struct al_udma_q *udma_q, al_bool enable, uint32_t coal_timeout);
696 * S2M UDMA configure completion descriptors write burst parameters
699 * @param burst_size completion descriptors write burst size in bytes.
701 * @return 0 if no error found.
702 */int al_udma_s2m_compl_desc_burst_config(struct al_udma *udma, uint16_t
706 * S2M UDMA configure a queue's completion header split
709 * @param enable set to true to enable completion header split
710 * @param force_hdr_split the header split length will be taken from the queue configuration
711 * @param hdr_len header split length.
713 * @return 0 if no error found.
715 int al_udma_s2m_q_compl_hdr_split_config(struct al_udma_q *udma_q,
717 al_bool force_hdr_split,
720 /* S2M UDMA per queue completion configuration */
721 int al_udma_s2m_q_comp_set(struct al_udma_q *udma_q,
722 struct al_udma_s2m_q_comp_conf *conf);
724 /** UDMA VMID control configuration */
725 void al_udma_gen_vmid_conf_set(
726 struct unit_regs __iomem *unit_regs,
727 struct al_udma_gen_vmid_conf *conf);
729 /** UDMA VMID MSIX control configuration */
730 void al_udma_gen_vmid_msix_conf_set(
731 struct unit_regs __iomem *unit_regs,
732 struct al_udma_gen_vmid_msix_conf *conf);
734 /** UDMA VMID control advanced Tx queue configuration */
735 void al_udma_gen_vmid_advanced_tx_q_conf(
737 struct al_udma_gen_vmid_advanced_tx_q_conf *conf);
739 /** UDMA VMID control advanced Rx queue configuration */
740 void al_udma_gen_vmid_advanced_rx_q_conf(
742 struct al_udma_gen_vmid_advanced_rx_q_conf *conf);
744 /** UDMA header split buffer 2 Rx queue configuration */
745 void al_udma_gen_hdr_split_buff2_rx_q_conf(
747 struct al_udma_gen_hdr_split_buff2_q_conf *conf);
754 /** @} end of UDMA config group */
755 #endif /* __AL_HAL_UDMA_CONFIG_H__ */