1 /***********************license start***************
2 * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Networks nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
24 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
25 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
26 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
27 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
28 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
29 * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
30 * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
31 * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
32 * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
35 * For any questions regarding licensing please contact marketing@caviumnetworks.com
37 ***********************license end**************************************/
47 * Interface to the PCI / PCIe DMA engines. These are only avialable
48 * on chips with PCI / PCIe.
50 * <hr>$Revision: 41586 $<hr>
52 #include "executive-config.h"
53 #include "cvmx-config.h"
55 #include "cvmx-cmd-queue.h"
56 #include "cvmx-dma-engine.h"
58 #ifdef CVMX_ENABLE_PKO_FUNCTIONS
61 * Return the number of DMA engimes supported by this chip
63 * @return Number of DMA engines
65 int cvmx_dma_engine_get_num(void)
67 if (octeon_has_feature(OCTEON_FEATURE_PCIE))
69 if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
79 * Initialize the DMA engines for use
81 * @return Zero on success, negative on failure
83 int cvmx_dma_engine_initialize(void)
85 cvmx_npei_dmax_ibuff_saddr_t dmax_ibuff_saddr;
88 for (engine=0; engine < cvmx_dma_engine_get_num(); engine++)
90 cvmx_cmd_queue_result_t result;
91 result = cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_DMA(engine),
92 0, CVMX_FPA_OUTPUT_BUFFER_POOL,
93 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE);
94 if (result != CVMX_CMD_QUEUE_SUCCESS)
96 dmax_ibuff_saddr.u64 = 0;
97 dmax_ibuff_saddr.s.saddr = cvmx_ptr_to_phys(cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_DMA(engine))) >> 7;
98 if (octeon_has_feature(OCTEON_FEATURE_PCIE))
99 cvmx_write_csr(CVMX_PEXP_NPEI_DMAX_IBUFF_SADDR(engine), dmax_ibuff_saddr.u64);
103 cvmx_write_csr(CVMX_NPI_HIGHP_IBUFF_SADDR, dmax_ibuff_saddr.u64);
105 cvmx_write_csr(CVMX_NPI_LOWP_IBUFF_SADDR, dmax_ibuff_saddr.u64);
109 if (octeon_has_feature(OCTEON_FEATURE_PCIE))
111 cvmx_npei_dma_control_t dma_control;
113 if (cvmx_dma_engine_get_num() >= 5)
114 dma_control.s.dma4_enb = 1;
115 dma_control.s.dma3_enb = 1;
116 dma_control.s.dma2_enb = 1;
117 dma_control.s.dma1_enb = 1;
118 dma_control.s.dma0_enb = 1;
119 dma_control.s.o_mode = 1; /* Pull NS and RO from this register, not the pointers */
120 //dma_control.s.dwb_denb = 1;
121 //dma_control.s.dwb_ichk = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/128;
122 dma_control.s.fpa_que = CVMX_FPA_OUTPUT_BUFFER_POOL;
123 dma_control.s.csize = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/8;
124 cvmx_write_csr(CVMX_PEXP_NPEI_DMA_CONTROL, dma_control.u64);
125 /* As a workaround for errata PCIE-811 we only allow a single
126 outstanding DMA read over PCIe at a time. This limits performance,
127 but works in all cases. If you need higher performance, remove
128 this code and implement the more complicated workaround documented
129 in the errata. This only affects CN56XX pass 2.0 chips */
130 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_0))
132 cvmx_npei_dma_pcie_req_num_t pcie_req_num;
133 pcie_req_num.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DMA_PCIE_REQ_NUM);
134 pcie_req_num.s.dma_cnt = 1;
135 cvmx_write_csr(CVMX_PEXP_NPEI_DMA_PCIE_REQ_NUM, pcie_req_num.u64);
140 cvmx_npi_dma_control_t dma_control;
142 //dma_control.s.dwb_denb = 1;
143 //dma_control.s.dwb_ichk = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/128;
144 dma_control.s.o_add1 = 1;
145 dma_control.s.fpa_que = CVMX_FPA_OUTPUT_BUFFER_POOL;
146 dma_control.s.hp_enb = 1;
147 dma_control.s.lp_enb = 1;
148 dma_control.s.csize = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/8;
149 cvmx_write_csr(CVMX_NPI_DMA_CONTROL, dma_control.u64);
157 * Shutdown all DMA engines. The engeines must be idle when this
158 * function is called.
160 * @return Zero on success, negative on failure
162 int cvmx_dma_engine_shutdown(void)
166 for (engine=0; engine < cvmx_dma_engine_get_num(); engine++)
168 if (cvmx_cmd_queue_length(CVMX_CMD_QUEUE_DMA(engine)))
170 cvmx_dprintf("ERROR: cvmx_dma_engine_shutdown: Engine not idle.\n");
175 if (octeon_has_feature(OCTEON_FEATURE_PCIE))
177 cvmx_npei_dma_control_t dma_control;
178 dma_control.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DMA_CONTROL);
179 if (cvmx_dma_engine_get_num() >= 5)
180 dma_control.s.dma4_enb = 0;
181 dma_control.s.dma3_enb = 0;
182 dma_control.s.dma2_enb = 0;
183 dma_control.s.dma1_enb = 0;
184 dma_control.s.dma0_enb = 0;
185 cvmx_write_csr(CVMX_PEXP_NPEI_DMA_CONTROL, dma_control.u64);
186 /* Make sure the disable completes */
187 cvmx_read_csr(CVMX_PEXP_NPEI_DMA_CONTROL);
191 cvmx_npi_dma_control_t dma_control;
192 dma_control.u64 = cvmx_read_csr(CVMX_NPI_DMA_CONTROL);
193 dma_control.s.hp_enb = 0;
194 dma_control.s.lp_enb = 0;
195 cvmx_write_csr(CVMX_NPI_DMA_CONTROL, dma_control.u64);
196 /* Make sure the disable completes */
197 cvmx_read_csr(CVMX_NPI_DMA_CONTROL);
200 for (engine=0; engine < cvmx_dma_engine_get_num(); engine++)
202 cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_DMA(engine));
203 if (octeon_has_feature(OCTEON_FEATURE_PCIE))
204 cvmx_write_csr(CVMX_PEXP_NPEI_DMAX_IBUFF_SADDR(engine), 0);
208 cvmx_write_csr(CVMX_NPI_HIGHP_IBUFF_SADDR, 0);
210 cvmx_write_csr(CVMX_NPI_LOWP_IBUFF_SADDR, 0);
219 * Submit a series of DMA comamnd to the DMA engines.
221 * @param engine Engine to submit to (0-4)
222 * @param header Command header
224 * The number of data pointers
225 * @param buffers Comamnd data pointers
227 * @return Zero on success, negative on failure
229 int cvmx_dma_engine_submit(int engine, cvmx_dma_engine_header_t header, int num_buffers, cvmx_dma_engine_buffer_t buffers[])
231 cvmx_cmd_queue_result_t result;
233 uint64_t cmds[num_buffers + 1];
235 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
237 /* Check for Errata PCIe-604 */
238 if ((header.s.nfst > 11) || (header.s.nlst > 11) || (header.s.nfst + header.s.nlst > 15))
240 cvmx_dprintf("DMA engine submit too large\n");
245 cmds[0] = header.u64;
246 while (num_buffers--)
248 cmds[cmd_count++] = buffers->u64;
252 /* Due to errata PCIE-13315, it is necessary to have the queue lock while we
253 ring the doorbell for the DMA engines. This prevents doorbells from
254 possibly arriving out of order with respect to the command queue
256 __cvmx_cmd_queue_lock(CVMX_CMD_QUEUE_DMA(engine), __cvmx_cmd_queue_get_state(CVMX_CMD_QUEUE_DMA(engine)));
257 result = cvmx_cmd_queue_write(CVMX_CMD_QUEUE_DMA(engine), 0, cmd_count, cmds);
258 /* This SYNCWS is needed since the command queue didn't do locking, which
259 normally implies the SYNCWS. This one makes sure the command queue
260 updates make it to L2 before we ring the doorbell */
262 /* A syncw isn't needed here since the command queue did one as part of the queue unlock */
263 if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS))
265 if (octeon_has_feature(OCTEON_FEATURE_PCIE))
267 /* DMA doorbells are 32bit writes in little endian space. This means we need to xor the address with 4 */
268 cvmx_write64_uint32(CVMX_PEXP_NPEI_DMAX_DBELL(engine)^4, cmd_count);
273 cvmx_write_csr(CVMX_NPI_HIGHP_DBELL, cmd_count);
275 cvmx_write_csr(CVMX_NPI_LOWP_DBELL, cmd_count);
278 /* Here is the unlock for the above errata workaround */
279 __cvmx_cmd_queue_unlock(__cvmx_cmd_queue_get_state(CVMX_CMD_QUEUE_DMA(engine)));
286 * Function used by cvmx_dma_engine_transfer() to build the
287 * internal address list.
289 * @param buffers Location to store the list
290 * @param address Address to build list for
291 * @param size Length of the memory pointed to by address
293 * @return Number of internal pointer chunks created
295 static inline int __cvmx_dma_engine_build_internal_pointers(cvmx_dma_engine_buffer_t *buffers, uint64_t address, int size)
300 /* Each internal chunk can contain a maximum of 8191 bytes */
304 buffers[segments].u64 = 0;
305 buffers[segments].internal.size = chunk;
306 buffers[segments].internal.addr = address;
317 * Function used by cvmx_dma_engine_transfer() to build the PCI / PCIe address
319 * @param buffers Location to store the list
320 * @param address Address to build list for
321 * @param size Length of the memory pointed to by address
323 * @return Number of PCI / PCIe address chunks created. The number of words used
324 * will be segments + (segments-1)/4 + 1.
326 static inline int __cvmx_dma_engine_build_external_pointers(cvmx_dma_engine_buffer_t *buffers, uint64_t address, int size)
328 const int MAX_SIZE = 65535;
332 /* Each block of 4 PCI / PCIe pointers uses one dword for lengths followed by
333 up to 4 addresses. This then repeats if more data is needed */
335 if (size <= MAX_SIZE)
337 /* Only one more segment needed */
338 buffers[0].pcie_length.len0 = size;
339 buffers[1].u64 = address;
343 else if (size <= MAX_SIZE * 2)
345 /* Two more segments needed */
346 buffers[0].pcie_length.len0 = MAX_SIZE;
347 buffers[0].pcie_length.len1 = size - MAX_SIZE;
348 buffers[1].u64 = address;
350 buffers[2].u64 = address;
354 else if (size <= MAX_SIZE * 3)
356 /* Three more segments needed */
357 buffers[0].pcie_length.len0 = MAX_SIZE;
358 buffers[0].pcie_length.len1 = MAX_SIZE;
359 buffers[0].pcie_length.len2 = size - MAX_SIZE * 2;
360 buffers[1].u64 = address;
362 buffers[2].u64 = address;
364 buffers[3].u64 = address;
368 else if (size <= MAX_SIZE * 4)
370 /* Four more segments needed */
371 buffers[0].pcie_length.len0 = MAX_SIZE;
372 buffers[0].pcie_length.len1 = MAX_SIZE;
373 buffers[0].pcie_length.len2 = MAX_SIZE;
374 buffers[0].pcie_length.len3 = size - MAX_SIZE * 3;
375 buffers[1].u64 = address;
377 buffers[2].u64 = address;
379 buffers[3].u64 = address;
381 buffers[4].u64 = address;
387 /* Five or more segments are needed */
388 buffers[0].pcie_length.len0 = MAX_SIZE;
389 buffers[0].pcie_length.len1 = MAX_SIZE;
390 buffers[0].pcie_length.len2 = MAX_SIZE;
391 buffers[0].pcie_length.len3 = MAX_SIZE;
392 buffers[1].u64 = address;
394 buffers[2].u64 = address;
396 buffers[3].u64 = address;
398 buffers[4].u64 = address;
410 * Build the first and last pointers based on a DMA engine header
411 * and submit them to the engine. The purpose of this function is
412 * to simplify the building of DMA engine commands by automatically
413 * converting a simple address and size into the apropriate internal
414 * or PCI / PCIe address list. This function does not support gather lists,
415 * so you will need to build your own lists in that case.
417 * @param engine Engine to submit to (0-4)
418 * @param header DMA Command header. Note that the nfst and nlst fields do not
419 * need to be filled in. All other fields must be set properly.
420 * @param first_address
421 * Address to use for the first pointers. In the case of INTERNAL,
422 * INBOUND, and OUTBOUND this is an Octeon memory address. In the
423 * case of EXTERNAL, this is the source PCI / PCIe address.
424 * @param last_address
425 * Address to use for the last pointers. In the case of EXTERNAL,
426 * INBOUND, and OUTBOUND this is a PCI / PCIe address. In the
427 * case of INTERNAL, this is the Octeon memory destination address.
428 * @param size Size of the transfer to perform.
430 * @return Zero on success, negative on failure
432 int cvmx_dma_engine_transfer(int engine, cvmx_dma_engine_header_t header,
433 uint64_t first_address, uint64_t last_address,
436 cvmx_dma_engine_buffer_t buffers[32];
439 switch (header.s.type)
441 case CVMX_DMA_ENGINE_TRANSFER_INTERNAL:
442 header.s.nfst = __cvmx_dma_engine_build_internal_pointers(buffers, first_address, size);
443 words += header.s.nfst;
444 header.s.nlst = __cvmx_dma_engine_build_internal_pointers(buffers + words, last_address, size);
445 words += header.s.nlst;
447 case CVMX_DMA_ENGINE_TRANSFER_INBOUND:
448 case CVMX_DMA_ENGINE_TRANSFER_OUTBOUND:
449 header.s.nfst = __cvmx_dma_engine_build_internal_pointers(buffers, first_address, size);
450 words += header.s.nfst;
451 header.s.nlst = __cvmx_dma_engine_build_external_pointers(buffers + words, last_address, size);
452 words += header.s.nlst + ((header.s.nlst-1) >> 2) + 1;
454 case CVMX_DMA_ENGINE_TRANSFER_EXTERNAL:
455 header.s.nfst = __cvmx_dma_engine_build_external_pointers(buffers, first_address, size);
456 words += header.s.nfst + ((header.s.nfst-1) >> 2) + 1;
457 header.s.nlst = __cvmx_dma_engine_build_external_pointers(buffers + words, last_address, size);
458 words += header.s.nlst + ((header.s.nlst-1) >> 2) + 1;
461 return cvmx_dma_engine_submit(engine, header, words, buffers);