1 /***********************license start***************
2 * Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Inc. nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
44 * Interface to the hardware Fetch and Add Unit.
46 * <hr>$Revision: 70030 $<hr>
49 #ifndef __CVMX_FAU_H__
50 #define __CVMX_FAU_H__
52 #ifndef CVMX_DONT_INCLUDE_CONFIG
53 #include "cvmx-config.h"
55 typedef int cvmx_fau_reg_64_t;
56 typedef int cvmx_fau_reg_32_t;
57 typedef int cvmx_fau_reg_16_t;
58 typedef int cvmx_fau_reg_8_t;
66 * Octeon Fetch and Add Unit (FAU)
69 #define CVMX_FAU_LOAD_IO_ADDRESS cvmx_build_io_address(0x1e, 0)
70 #define CVMX_FAU_BITS_SCRADDR 63,56
71 #define CVMX_FAU_BITS_LEN 55,48
72 #define CVMX_FAU_BITS_INEVAL 35,14
73 #define CVMX_FAU_BITS_TAGWAIT 13,13
74 #define CVMX_FAU_BITS_NOADD 13,13
75 #define CVMX_FAU_BITS_SIZE 12,11
76 #define CVMX_FAU_BITS_REGISTER 10,0
80 CVMX_FAU_OP_SIZE_8 = 0,
81 CVMX_FAU_OP_SIZE_16 = 1,
82 CVMX_FAU_OP_SIZE_32 = 2,
83 CVMX_FAU_OP_SIZE_64 = 3
87 * Tagwait return definition. If a timeout occurs, the error
88 * bit will be set. Otherwise the value of the register before
89 * the update will be returned.
95 } cvmx_fau_tagwait64_t;
98 * Tagwait return definition. If a timeout occurs, the error
99 * bit will be set. Otherwise the value of the register before
100 * the update will be returned.
106 } cvmx_fau_tagwait32_t;
109 * Tagwait return definition. If a timeout occurs, the error
110 * bit will be set. Otherwise the value of the register before
111 * the update will be returned.
117 } cvmx_fau_tagwait16_t;
120 * Tagwait return definition. If a timeout occurs, the error
121 * bit will be set. Otherwise the value of the register before
122 * the update will be returned.
128 } cvmx_fau_tagwait8_t;
131 * Asynchronous tagwait return definition. If a timeout occurs,
132 * the error bit will be set. Otherwise the value of the
133 * register before the update will be returned.
139 uint64_t data :63; /* unpredictable if invalid is set */
141 } cvmx_fau_async_tagwait_result_t;
145 * Builds a store I/O address for writing to the FAU
147 * @param noadd 0 = Store value is atomically added to the current value
148 * 1 = Store value is atomically written over the current value
149 * @param reg FAU atomic register to access. 0 <= reg < 2048.
150 * - Step by 2 for 16 bit access.
151 * - Step by 4 for 32 bit access.
152 * - Step by 8 for 64 bit access.
153 * @return Address to store for atomic update
155 static inline uint64_t __cvmx_fau_store_address(uint64_t noadd, uint64_t reg)
157 return (CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
158 cvmx_build_bits(CVMX_FAU_BITS_NOADD, noadd) |
159 cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg));
164 * Builds a I/O address for accessing the FAU
166 * @param tagwait Should the atomic add wait for the current tag switch
167 * operation to complete.
169 * - 1 = Wait for tag switch to complete
170 * @param reg FAU atomic register to access. 0 <= reg < 2048.
171 * - Step by 2 for 16 bit access.
172 * - Step by 4 for 32 bit access.
173 * - Step by 8 for 64 bit access.
174 * @param value Signed value to add.
175 * Note: When performing 32 and 64 bit access, only the low
176 * 22 bits are available.
177 * @return Address to read from for atomic update
179 static inline uint64_t __cvmx_fau_atomic_address(uint64_t tagwait, uint64_t reg, int64_t value)
181 return (CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
182 cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
183 cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
184 cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg));
188 * Perform an atomic 64 bit add
190 * @param reg FAU atomic register to access. 0 <= reg < 2048.
191 * - Step by 8 for 64 bit access.
192 * @param value Signed value to add.
193 * Note: Only the low 22 bits are available.
194 * @return Value of the register before the update
196 static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
198 return cvmx_read64_int64(__cvmx_fau_atomic_address(0, reg, value));
202 * Perform an atomic 32 bit add
204 * @param reg FAU atomic register to access. 0 <= reg < 2048.
205 * - Step by 4 for 32 bit access.
206 * @param value Signed value to add.
207 * Note: Only the low 22 bits are available.
208 * @return Value of the register before the update
210 static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
212 return cvmx_read64_int32(__cvmx_fau_atomic_address(0, reg, value));
216 * Perform an atomic 16 bit add
218 * @param reg FAU atomic register to access. 0 <= reg < 2048.
219 * - Step by 2 for 16 bit access.
220 * @param value Signed value to add.
221 * @return Value of the register before the update
223 static inline int16_t cvmx_fau_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
225 return cvmx_read64_int16(__cvmx_fau_atomic_address(0, reg, value));
229 * Perform an atomic 8 bit add
231 * @param reg FAU atomic register to access. 0 <= reg < 2048.
232 * @param value Signed value to add.
233 * @return Value of the register before the update
235 static inline int8_t cvmx_fau_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
237 return cvmx_read64_int8(__cvmx_fau_atomic_address(0, reg, value));
241 * Perform an atomic 64 bit add after the current tag switch
244 * @param reg FAU atomic register to access. 0 <= reg < 2048.
245 * - Step by 8 for 64 bit access.
246 * @param value Signed value to add.
247 * Note: Only the low 22 bits are available.
248 * @return If a timeout occurs, the error bit will be set. Otherwise
249 * the value of the register before the update will be
252 static inline cvmx_fau_tagwait64_t cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
257 cvmx_fau_tagwait64_t t;
259 result.i64 = cvmx_read64_int64(__cvmx_fau_atomic_address(1, reg, value));
264 * Perform an atomic 32 bit add after the current tag switch
267 * @param reg FAU atomic register to access. 0 <= reg < 2048.
268 * - Step by 4 for 32 bit access.
269 * @param value Signed value to add.
270 * Note: Only the low 22 bits are available.
271 * @return If a timeout occurs, the error bit will be set. Otherwise
272 * the value of the register before the update will be
275 static inline cvmx_fau_tagwait32_t cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
280 cvmx_fau_tagwait32_t t;
282 result.i32 = cvmx_read64_int32(__cvmx_fau_atomic_address(1, reg, value));
287 * Perform an atomic 16 bit add after the current tag switch
290 * @param reg FAU atomic register to access. 0 <= reg < 2048.
291 * - Step by 2 for 16 bit access.
292 * @param value Signed value to add.
293 * @return If a timeout occurs, the error bit will be set. Otherwise
294 * the value of the register before the update will be
297 static inline cvmx_fau_tagwait16_t cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
302 cvmx_fau_tagwait16_t t;
304 result.i16 = cvmx_read64_int16(__cvmx_fau_atomic_address(1, reg, value));
309 * Perform an atomic 8 bit add after the current tag switch
312 * @param reg FAU atomic register to access. 0 <= reg < 2048.
313 * @param value Signed value to add.
314 * @return If a timeout occurs, the error bit will be set. Otherwise
315 * the value of the register before the update will be
318 static inline cvmx_fau_tagwait8_t cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
323 cvmx_fau_tagwait8_t t;
325 result.i8 = cvmx_read64_int8(__cvmx_fau_atomic_address(1, reg, value));
331 * Builds I/O data for async operations
333 * @param scraddr Scratch pad byte addres to write to. Must be 8 byte aligned
334 * @param value Signed value to add.
335 * Note: When performing 32 and 64 bit access, only the low
336 * 22 bits are available.
337 * @param tagwait Should the atomic add wait for the current tag switch
338 * operation to complete.
340 * - 1 = Wait for tag switch to complete
341 * @param size The size of the operation:
342 * - CVMX_FAU_OP_SIZE_8 (0) = 8 bits
343 * - CVMX_FAU_OP_SIZE_16 (1) = 16 bits
344 * - CVMX_FAU_OP_SIZE_32 (2) = 32 bits
345 * - CVMX_FAU_OP_SIZE_64 (3) = 64 bits
346 * @param reg FAU atomic register to access. 0 <= reg < 2048.
347 * - Step by 2 for 16 bit access.
348 * - Step by 4 for 32 bit access.
349 * - Step by 8 for 64 bit access.
350 * @return Data to write using cvmx_send_single
352 static inline uint64_t __cvmx_fau_iobdma_data(uint64_t scraddr, int64_t value, uint64_t tagwait,
353 cvmx_fau_op_size_t size, uint64_t reg)
355 return (CVMX_FAU_LOAD_IO_ADDRESS |
356 cvmx_build_bits(CVMX_FAU_BITS_SCRADDR, scraddr>>3) |
357 cvmx_build_bits(CVMX_FAU_BITS_LEN, 1) |
358 cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
359 cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
360 cvmx_build_bits(CVMX_FAU_BITS_SIZE, size) |
361 cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg));
365 * Perform an async atomic 64 bit add. The old value is
366 * placed in the scratch memory at byte address scraddr.
368 * @param scraddr Scratch memory byte address to put response in.
369 * Must be 8 byte aligned.
370 * @param reg FAU atomic register to access. 0 <= reg < 2048.
371 * - Step by 8 for 64 bit access.
372 * @param value Signed value to add.
373 * Note: Only the low 22 bits are available.
374 * @return Placed in the scratch pad register
376 static inline void cvmx_fau_async_fetch_and_add64(uint64_t scraddr, cvmx_fau_reg_64_t reg, int64_t value)
378 cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_64, reg));
382 * Perform an async atomic 32 bit add. The old value is
383 * placed in the scratch memory at byte address scraddr.
385 * @param scraddr Scratch memory byte address to put response in.
386 * Must be 8 byte aligned.
387 * @param reg FAU atomic register to access. 0 <= reg < 2048.
388 * - Step by 4 for 32 bit access.
389 * @param value Signed value to add.
390 * Note: Only the low 22 bits are available.
391 * @return Placed in the scratch pad register
393 static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr, cvmx_fau_reg_32_t reg, int32_t value)
395 cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_32, reg));
399 * Perform an async atomic 16 bit add. The old value is
400 * placed in the scratch memory at byte address scraddr.
402 * @param scraddr Scratch memory byte address to put response in.
403 * Must be 8 byte aligned.
404 * @param reg FAU atomic register to access. 0 <= reg < 2048.
405 * - Step by 2 for 16 bit access.
406 * @param value Signed value to add.
407 * @return Placed in the scratch pad register
409 static inline void cvmx_fau_async_fetch_and_add16(uint64_t scraddr, cvmx_fau_reg_16_t reg, int16_t value)
411 cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_16, reg));
415 * Perform an async atomic 8 bit add. The old value is
416 * placed in the scratch memory at byte address scraddr.
418 * @param scraddr Scratch memory byte address to put response in.
419 * Must be 8 byte aligned.
420 * @param reg FAU atomic register to access. 0 <= reg < 2048.
421 * @param value Signed value to add.
422 * @return Placed in the scratch pad register
424 static inline void cvmx_fau_async_fetch_and_add8(uint64_t scraddr, cvmx_fau_reg_8_t reg, int8_t value)
426 cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_8, reg));
430 * Perform an async atomic 64 bit add after the current tag
433 * @param scraddr Scratch memory byte address to put response in.
434 * Must be 8 byte aligned.
435 * If a timeout occurs, the error bit (63) will be set. Otherwise
436 * the value of the register before the update will be
438 * @param reg FAU atomic register to access. 0 <= reg < 2048.
439 * - Step by 8 for 64 bit access.
440 * @param value Signed value to add.
441 * Note: Only the low 22 bits are available.
442 * @return Placed in the scratch pad register
444 static inline void cvmx_fau_async_tagwait_fetch_and_add64(uint64_t scraddr, cvmx_fau_reg_64_t reg, int64_t value)
446 cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_64, reg));
450 * Perform an async atomic 32 bit add after the current tag
453 * @param scraddr Scratch memory byte address to put response in.
454 * Must be 8 byte aligned.
455 * If a timeout occurs, the error bit (63) will be set. Otherwise
456 * the value of the register before the update will be
458 * @param reg FAU atomic register to access. 0 <= reg < 2048.
459 * - Step by 4 for 32 bit access.
460 * @param value Signed value to add.
461 * Note: Only the low 22 bits are available.
462 * @return Placed in the scratch pad register
464 static inline void cvmx_fau_async_tagwait_fetch_and_add32(uint64_t scraddr, cvmx_fau_reg_32_t reg, int32_t value)
466 cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_32, reg));
470 * Perform an async atomic 16 bit add after the current tag
473 * @param scraddr Scratch memory byte address to put response in.
474 * Must be 8 byte aligned.
475 * If a timeout occurs, the error bit (63) will be set. Otherwise
476 * the value of the register before the update will be
478 * @param reg FAU atomic register to access. 0 <= reg < 2048.
479 * - Step by 2 for 16 bit access.
480 * @param value Signed value to add.
481 * @return Placed in the scratch pad register
483 static inline void cvmx_fau_async_tagwait_fetch_and_add16(uint64_t scraddr, cvmx_fau_reg_16_t reg, int16_t value)
485 cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_16, reg));
489 * Perform an async atomic 8 bit add after the current tag
492 * @param scraddr Scratch memory byte address to put response in.
493 * Must be 8 byte aligned.
494 * If a timeout occurs, the error bit (63) will be set. Otherwise
495 * the value of the register before the update will be
497 * @param reg FAU atomic register to access. 0 <= reg < 2048.
498 * @param value Signed value to add.
499 * @return Placed in the scratch pad register
501 static inline void cvmx_fau_async_tagwait_fetch_and_add8(uint64_t scraddr, cvmx_fau_reg_8_t reg, int8_t value)
503 cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_8, reg));
507 * Perform an atomic 64 bit add
509 * @param reg FAU atomic register to access. 0 <= reg < 2048.
510 * - Step by 8 for 64 bit access.
511 * @param value Signed value to add.
513 static inline void cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg, int64_t value)
515 cvmx_write64_int64(__cvmx_fau_store_address(0, reg), value);
519 * Perform an atomic 32 bit add
521 * @param reg FAU atomic register to access. 0 <= reg < 2048.
522 * - Step by 4 for 32 bit access.
523 * @param value Signed value to add.
525 static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
527 cvmx_write64_int32(__cvmx_fau_store_address(0, reg), value);
531 * Perform an atomic 16 bit add
533 * @param reg FAU atomic register to access. 0 <= reg < 2048.
534 * - Step by 2 for 16 bit access.
535 * @param value Signed value to add.
537 static inline void cvmx_fau_atomic_add16(cvmx_fau_reg_16_t reg, int16_t value)
539 cvmx_write64_int16(__cvmx_fau_store_address(0, reg), value);
543 * Perform an atomic 8 bit add
545 * @param reg FAU atomic register to access. 0 <= reg < 2048.
546 * @param value Signed value to add.
548 static inline void cvmx_fau_atomic_add8(cvmx_fau_reg_8_t reg, int8_t value)
550 cvmx_write64_int8(__cvmx_fau_store_address(0, reg), value);
554 * Perform an atomic 64 bit write
556 * @param reg FAU atomic register to access. 0 <= reg < 2048.
557 * - Step by 8 for 64 bit access.
558 * @param value Signed value to write.
560 static inline void cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg, int64_t value)
562 cvmx_write64_int64(__cvmx_fau_store_address(1, reg), value);
566 * Perform an atomic 32 bit write
568 * @param reg FAU atomic register to access. 0 <= reg < 2048.
569 * - Step by 4 for 32 bit access.
570 * @param value Signed value to write.
572 static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
574 cvmx_write64_int32(__cvmx_fau_store_address(1, reg), value);
578 * Perform an atomic 16 bit write
580 * @param reg FAU atomic register to access. 0 <= reg < 2048.
581 * - Step by 2 for 16 bit access.
582 * @param value Signed value to write.
584 static inline void cvmx_fau_atomic_write16(cvmx_fau_reg_16_t reg, int16_t value)
586 cvmx_write64_int16(__cvmx_fau_store_address(1, reg), value);
590 * Perform an atomic 8 bit write
592 * @param reg FAU atomic register to access. 0 <= reg < 2048.
593 * @param value Signed value to write.
595 static inline void cvmx_fau_atomic_write8(cvmx_fau_reg_8_t reg, int8_t value)
597 cvmx_write64_int8(__cvmx_fau_store_address(1, reg), value);
604 #endif /* __CVMX_FAU_H__ */