1 /***********************license start***************
2 * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Networks nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
24 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
25 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
26 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
27 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
28 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
29 * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
30 * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
31 * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
32 * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
35 * For any questions regarding licensing please contact marketing@caviumnetworks.com
37 ***********************license end**************************************/
47 * Interface to the hardware Fetch and Add Unit.
49 * <hr>$Revision: 41586 $<hr>
52 #ifndef __CVMX_FAU_H__
53 #define __CVMX_FAU_H__
55 #ifndef CVMX_DONT_INCLUDE_CONFIG
56 #include "cvmx-config.h"
58 typedef int cvmx_fau_reg_64_t;
59 typedef int cvmx_fau_reg_32_t;
60 typedef int cvmx_fau_reg_16_t;
61 typedef int cvmx_fau_reg_8_t;
69 * Octeon Fetch and Add Unit (FAU)
72 #define CVMX_FAU_LOAD_IO_ADDRESS cvmx_build_io_address(0x1e, 0)
73 #define CVMX_FAU_BITS_SCRADDR 63,56
74 #define CVMX_FAU_BITS_LEN 55,48
75 #define CVMX_FAU_BITS_INEVAL 35,14
76 #define CVMX_FAU_BITS_TAGWAIT 13,13
77 #define CVMX_FAU_BITS_NOADD 13,13
78 #define CVMX_FAU_BITS_SIZE 12,11
79 #define CVMX_FAU_BITS_REGISTER 10,0
83 CVMX_FAU_OP_SIZE_8 = 0,
84 CVMX_FAU_OP_SIZE_16 = 1,
85 CVMX_FAU_OP_SIZE_32 = 2,
86 CVMX_FAU_OP_SIZE_64 = 3
90 * Tagwait return definition. If a timeout occurs, the error
91 * bit will be set. Otherwise the value of the register before
92 * the update will be returned.
98 } cvmx_fau_tagwait64_t;
101 * Tagwait return definition. If a timeout occurs, the error
102 * bit will be set. Otherwise the value of the register before
103 * the update will be returned.
109 } cvmx_fau_tagwait32_t;
112 * Tagwait return definition. If a timeout occurs, the error
113 * bit will be set. Otherwise the value of the register before
114 * the update will be returned.
120 } cvmx_fau_tagwait16_t;
123 * Tagwait return definition. If a timeout occurs, the error
124 * bit will be set. Otherwise the value of the register before
125 * the update will be returned.
131 } cvmx_fau_tagwait8_t;
134 * Asynchronous tagwait return definition. If a timeout occurs,
135 * the error bit will be set. Otherwise the value of the
136 * register before the update will be returned.
142 uint64_t data :63; // unpredictable if invalid is set
144 } cvmx_fau_async_tagwait_result_t;
149 * Builds a store I/O address for writing to the FAU
151 * @param noadd 0 = Store value is atomically added to the current value
152 * 1 = Store value is atomically written over the current value
153 * @param reg FAU atomic register to access. 0 <= reg < 2048.
154 * - Step by 2 for 16 bit access.
155 * - Step by 4 for 32 bit access.
156 * - Step by 8 for 64 bit access.
157 * @return Address to store for atomic update
159 static inline uint64_t __cvmx_fau_store_address(uint64_t noadd, uint64_t reg)
161 return (CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
162 cvmx_build_bits(CVMX_FAU_BITS_NOADD, noadd) |
163 cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg));
169 * Builds a I/O address for accessing the FAU
171 * @param tagwait Should the atomic add wait for the current tag switch
172 * operation to complete.
174 * - 1 = Wait for tag switch to complete
175 * @param reg FAU atomic register to access. 0 <= reg < 2048.
176 * - Step by 2 for 16 bit access.
177 * - Step by 4 for 32 bit access.
178 * - Step by 8 for 64 bit access.
179 * @param value Signed value to add.
180 * Note: When performing 32 and 64 bit access, only the low
181 * 22 bits are available.
182 * @return Address to read from for atomic update
184 static inline uint64_t __cvmx_fau_atomic_address(uint64_t tagwait, uint64_t reg, int64_t value)
186 return (CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
187 cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
188 cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
189 cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg));
194 * Perform an atomic 64 bit add
196 * @param reg FAU atomic register to access. 0 <= reg < 2048.
197 * - Step by 8 for 64 bit access.
198 * @param value Signed value to add.
199 * Note: Only the low 22 bits are available.
200 * @return Value of the register before the update
202 static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
204 return cvmx_read64_int64(__cvmx_fau_atomic_address(0, reg, value));
209 * Perform an atomic 32 bit add
211 * @param reg FAU atomic register to access. 0 <= reg < 2048.
212 * - Step by 4 for 32 bit access.
213 * @param value Signed value to add.
214 * Note: Only the low 22 bits are available.
215 * @return Value of the register before the update
217 static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
219 return cvmx_read64_int32(__cvmx_fau_atomic_address(0, reg, value));
224 * Perform an atomic 16 bit add
226 * @param reg FAU atomic register to access. 0 <= reg < 2048.
227 * - Step by 2 for 16 bit access.
228 * @param value Signed value to add.
229 * @return Value of the register before the update
231 static inline int16_t cvmx_fau_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
233 return cvmx_read64_int16(__cvmx_fau_atomic_address(0, reg, value));
238 * Perform an atomic 8 bit add
240 * @param reg FAU atomic register to access. 0 <= reg < 2048.
241 * @param value Signed value to add.
242 * @return Value of the register before the update
244 static inline int8_t cvmx_fau_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
246 return cvmx_read64_int8(__cvmx_fau_atomic_address(0, reg, value));
251 * Perform an atomic 64 bit add after the current tag switch
254 * @param reg FAU atomic register to access. 0 <= reg < 2048.
255 * - Step by 8 for 64 bit access.
256 * @param value Signed value to add.
257 * Note: Only the low 22 bits are available.
258 * @return If a timeout occurs, the error bit will be set. Otherwise
259 * the value of the register before the update will be
262 static inline cvmx_fau_tagwait64_t cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
267 cvmx_fau_tagwait64_t t;
269 result.i64 = cvmx_read64_int64(__cvmx_fau_atomic_address(1, reg, value));
275 * Perform an atomic 32 bit add after the current tag switch
278 * @param reg FAU atomic register to access. 0 <= reg < 2048.
279 * - Step by 4 for 32 bit access.
280 * @param value Signed value to add.
281 * Note: Only the low 22 bits are available.
282 * @return If a timeout occurs, the error bit will be set. Otherwise
283 * the value of the register before the update will be
286 static inline cvmx_fau_tagwait32_t cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
291 cvmx_fau_tagwait32_t t;
293 result.i32 = cvmx_read64_int32(__cvmx_fau_atomic_address(1, reg, value));
299 * Perform an atomic 16 bit add after the current tag switch
302 * @param reg FAU atomic register to access. 0 <= reg < 2048.
303 * - Step by 2 for 16 bit access.
304 * @param value Signed value to add.
305 * @return If a timeout occurs, the error bit will be set. Otherwise
306 * the value of the register before the update will be
309 static inline cvmx_fau_tagwait16_t cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
314 cvmx_fau_tagwait16_t t;
316 result.i16 = cvmx_read64_int16(__cvmx_fau_atomic_address(1, reg, value));
322 * Perform an atomic 8 bit add after the current tag switch
325 * @param reg FAU atomic register to access. 0 <= reg < 2048.
326 * @param value Signed value to add.
327 * @return If a timeout occurs, the error bit will be set. Otherwise
328 * the value of the register before the update will be
331 static inline cvmx_fau_tagwait8_t cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
336 cvmx_fau_tagwait8_t t;
338 result.i8 = cvmx_read64_int8(__cvmx_fau_atomic_address(1, reg, value));
345 * Builds I/O data for async operations
347 * @param scraddr Scratch pad byte addres to write to. Must be 8 byte aligned
348 * @param value Signed value to add.
349 * Note: When performing 32 and 64 bit access, only the low
350 * 22 bits are available.
351 * @param tagwait Should the atomic add wait for the current tag switch
352 * operation to complete.
354 * - 1 = Wait for tag switch to complete
355 * @param size The size of the operation:
356 * - CVMX_FAU_OP_SIZE_8 (0) = 8 bits
357 * - CVMX_FAU_OP_SIZE_16 (1) = 16 bits
358 * - CVMX_FAU_OP_SIZE_32 (2) = 32 bits
359 * - CVMX_FAU_OP_SIZE_64 (3) = 64 bits
360 * @param reg FAU atomic register to access. 0 <= reg < 2048.
361 * - Step by 2 for 16 bit access.
362 * - Step by 4 for 32 bit access.
363 * - Step by 8 for 64 bit access.
364 * @return Data to write using cvmx_send_single
366 static inline uint64_t __cvmx_fau_iobdma_data(uint64_t scraddr, int64_t value, uint64_t tagwait,
367 cvmx_fau_op_size_t size, uint64_t reg)
369 return (CVMX_FAU_LOAD_IO_ADDRESS |
370 cvmx_build_bits(CVMX_FAU_BITS_SCRADDR, scraddr>>3) |
371 cvmx_build_bits(CVMX_FAU_BITS_LEN, 1) |
372 cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
373 cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
374 cvmx_build_bits(CVMX_FAU_BITS_SIZE, size) |
375 cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg));
380 * Perform an async atomic 64 bit add. The old value is
381 * placed in the scratch memory at byte address scraddr.
383 * @param scraddr Scratch memory byte address to put response in.
384 * Must be 8 byte aligned.
385 * @param reg FAU atomic register to access. 0 <= reg < 2048.
386 * - Step by 8 for 64 bit access.
387 * @param value Signed value to add.
388 * Note: Only the low 22 bits are available.
389 * @return Placed in the scratch pad register
391 static inline void cvmx_fau_async_fetch_and_add64(uint64_t scraddr, cvmx_fau_reg_64_t reg, int64_t value)
393 cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_64, reg));
398 * Perform an async atomic 32 bit add. The old value is
399 * placed in the scratch memory at byte address scraddr.
401 * @param scraddr Scratch memory byte address to put response in.
402 * Must be 8 byte aligned.
403 * @param reg FAU atomic register to access. 0 <= reg < 2048.
404 * - Step by 4 for 32 bit access.
405 * @param value Signed value to add.
406 * Note: Only the low 22 bits are available.
407 * @return Placed in the scratch pad register
409 static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr, cvmx_fau_reg_32_t reg, int32_t value)
411 cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_32, reg));
416 * Perform an async atomic 16 bit add. The old value is
417 * placed in the scratch memory at byte address scraddr.
419 * @param scraddr Scratch memory byte address to put response in.
420 * Must be 8 byte aligned.
421 * @param reg FAU atomic register to access. 0 <= reg < 2048.
422 * - Step by 2 for 16 bit access.
423 * @param value Signed value to add.
424 * @return Placed in the scratch pad register
426 static inline void cvmx_fau_async_fetch_and_add16(uint64_t scraddr, cvmx_fau_reg_16_t reg, int16_t value)
428 cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_16, reg));
433 * Perform an async atomic 8 bit add. The old value is
434 * placed in the scratch memory at byte address scraddr.
436 * @param scraddr Scratch memory byte address to put response in.
437 * Must be 8 byte aligned.
438 * @param reg FAU atomic register to access. 0 <= reg < 2048.
439 * @param value Signed value to add.
440 * @return Placed in the scratch pad register
442 static inline void cvmx_fau_async_fetch_and_add8(uint64_t scraddr, cvmx_fau_reg_8_t reg, int8_t value)
444 cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_8, reg));
449 * Perform an async atomic 64 bit add after the current tag
452 * @param scraddr Scratch memory byte address to put response in.
453 * Must be 8 byte aligned.
454 * If a timeout occurs, the error bit (63) will be set. Otherwise
455 * the value of the register before the update will be
457 * @param reg FAU atomic register to access. 0 <= reg < 2048.
458 * - Step by 8 for 64 bit access.
459 * @param value Signed value to add.
460 * Note: Only the low 22 bits are available.
461 * @return Placed in the scratch pad register
463 static inline void cvmx_fau_async_tagwait_fetch_and_add64(uint64_t scraddr, cvmx_fau_reg_64_t reg, int64_t value)
465 cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_64, reg));
470 * Perform an async atomic 32 bit add after the current tag
473 * @param scraddr Scratch memory byte address to put response in.
474 * Must be 8 byte aligned.
475 * If a timeout occurs, the error bit (63) will be set. Otherwise
476 * the value of the register before the update will be
478 * @param reg FAU atomic register to access. 0 <= reg < 2048.
479 * - Step by 4 for 32 bit access.
480 * @param value Signed value to add.
481 * Note: Only the low 22 bits are available.
482 * @return Placed in the scratch pad register
484 static inline void cvmx_fau_async_tagwait_fetch_and_add32(uint64_t scraddr, cvmx_fau_reg_32_t reg, int32_t value)
486 cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_32, reg));
491 * Perform an async atomic 16 bit add after the current tag
494 * @param scraddr Scratch memory byte address to put response in.
495 * Must be 8 byte aligned.
496 * If a timeout occurs, the error bit (63) will be set. Otherwise
497 * the value of the register before the update will be
499 * @param reg FAU atomic register to access. 0 <= reg < 2048.
500 * - Step by 2 for 16 bit access.
501 * @param value Signed value to add.
502 * @return Placed in the scratch pad register
504 static inline void cvmx_fau_async_tagwait_fetch_and_add16(uint64_t scraddr, cvmx_fau_reg_16_t reg, int16_t value)
506 cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_16, reg));
511 * Perform an async atomic 8 bit add after the current tag
514 * @param scraddr Scratch memory byte address to put response in.
515 * Must be 8 byte aligned.
516 * If a timeout occurs, the error bit (63) will be set. Otherwise
517 * the value of the register before the update will be
519 * @param reg FAU atomic register to access. 0 <= reg < 2048.
520 * @param value Signed value to add.
521 * @return Placed in the scratch pad register
523 static inline void cvmx_fau_async_tagwait_fetch_and_add8(uint64_t scraddr, cvmx_fau_reg_8_t reg, int8_t value)
525 cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_8, reg));
532 * Perform an atomic 64 bit add
534 * @param reg FAU atomic register to access. 0 <= reg < 2048.
535 * - Step by 8 for 64 bit access.
536 * @param value Signed value to add.
538 static inline void cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg, int64_t value)
540 cvmx_write64_int64(__cvmx_fau_store_address(0, reg), value);
545 * Perform an atomic 32 bit add
547 * @param reg FAU atomic register to access. 0 <= reg < 2048.
548 * - Step by 4 for 32 bit access.
549 * @param value Signed value to add.
551 static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
553 cvmx_write64_int32(__cvmx_fau_store_address(0, reg), value);
558 * Perform an atomic 16 bit add
560 * @param reg FAU atomic register to access. 0 <= reg < 2048.
561 * - Step by 2 for 16 bit access.
562 * @param value Signed value to add.
564 static inline void cvmx_fau_atomic_add16(cvmx_fau_reg_16_t reg, int16_t value)
566 cvmx_write64_int16(__cvmx_fau_store_address(0, reg), value);
571 * Perform an atomic 8 bit add
573 * @param reg FAU atomic register to access. 0 <= reg < 2048.
574 * @param value Signed value to add.
576 static inline void cvmx_fau_atomic_add8(cvmx_fau_reg_8_t reg, int8_t value)
578 cvmx_write64_int8(__cvmx_fau_store_address(0, reg), value);
583 * Perform an atomic 64 bit write
585 * @param reg FAU atomic register to access. 0 <= reg < 2048.
586 * - Step by 8 for 64 bit access.
587 * @param value Signed value to write.
589 static inline void cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg, int64_t value)
591 cvmx_write64_int64(__cvmx_fau_store_address(1, reg), value);
596 * Perform an atomic 32 bit write
598 * @param reg FAU atomic register to access. 0 <= reg < 2048.
599 * - Step by 4 for 32 bit access.
600 * @param value Signed value to write.
602 static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
604 cvmx_write64_int32(__cvmx_fau_store_address(1, reg), value);
609 * Perform an atomic 16 bit write
611 * @param reg FAU atomic register to access. 0 <= reg < 2048.
612 * - Step by 2 for 16 bit access.
613 * @param value Signed value to write.
615 static inline void cvmx_fau_atomic_write16(cvmx_fau_reg_16_t reg, int16_t value)
617 cvmx_write64_int16(__cvmx_fau_store_address(1, reg), value);
622 * Perform an atomic 8 bit write
624 * @param reg FAU atomic register to access. 0 <= reg < 2048.
625 * @param value Signed value to write.
627 static inline void cvmx_fau_atomic_write8(cvmx_fau_reg_8_t reg, int8_t value)
629 cvmx_write64_int8(__cvmx_fau_store_address(1, reg), value);
636 #endif /* __CVMX_FAU_H__ */