1 /***********************license start***************
2 * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Networks nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
24 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
25 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
26 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
27 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
28 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
29 * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
30 * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
31 * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
32 * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
35 * For any questions regarding licensing please contact marketing@caviumnetworks.com
37 ***********************license end**************************************/
47 * This file provides atomic operations
49 * <hr>$Revision: 41586 $<hr>
55 #ifndef __CVMX_ATOMIC_H__
56 #define __CVMX_ATOMIC_H__
64 * Atomically adds a signed value to a 32 bit (aligned) memory location.
66 * This version does not perform 'sync' operations to enforce memory
67 * operations. This should only be used when there are no memory operation
68 * ordering constraints. (This should NOT be used for reference counting -
69 * use the standard version instead.)
71 * @param ptr address in memory to add incr to
72 * @param incr amount to increment memory location by (signed)
74 static inline void cvmx_atomic_add32_nosync(int32_t *ptr, int32_t incr)
76 if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
82 "1: ll %[tmp], %[val] \n"
83 " addu %[tmp], %[inc] \n"
84 " sc %[tmp], %[val] \n"
88 : [val] "+m" (*ptr), [tmp] "=&r" (tmp)
95 " saa %[inc], (%[base]) \n"
97 : [inc] "r" (incr), [base] "r" (ptr)
103 * Atomically adds a signed value to a 32 bit (aligned) memory location.
105 * Memory access ordering is enforced before/after the atomic operation,
106 * so no additional 'sync' instructions are required.
109 * @param ptr address in memory to add incr to
110 * @param incr amount to increment memory location by (signed)
112 static inline void cvmx_atomic_add32(int32_t *ptr, int32_t incr)
115 cvmx_atomic_add32_nosync(ptr, incr);
120 * Atomically sets a 32 bit (aligned) memory location to a value
122 * @param ptr address of memory to set
123 * @param value value to set memory location to.
125 static inline void cvmx_atomic_set32(int32_t *ptr, int32_t value)
133 * Returns the current value of a 32 bit (aligned) memory
136 * @param ptr Address of memory to get
137 * @return Value of the memory
139 static inline int32_t cvmx_atomic_get32(int32_t *ptr)
141 return *(volatile int32_t *)ptr;
145 * Atomically adds a signed value to a 64 bit (aligned) memory location.
147 * This version does not perform 'sync' operations to enforce memory
148 * operations. This should only be used when there are no memory operation
149 * ordering constraints. (This should NOT be used for reference counting -
150 * use the standard version instead.)
152 * @param ptr address in memory to add incr to
153 * @param incr amount to increment memory location by (signed)
155 static inline void cvmx_atomic_add64_nosync(int64_t *ptr, int64_t incr)
157 if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
160 __asm__ __volatile__(
162 "1: lld %[tmp], %[val] \n"
163 " daddu %[tmp], %[inc] \n"
164 " scd %[tmp], %[val] \n"
165 " beqz %[tmp], 1b \n"
168 : [val] "+m" (*ptr), [tmp] "=&r" (tmp)
174 __asm__ __volatile__(
175 " saad %[inc], (%[base]) \n"
177 : [inc] "r" (incr), [base] "r" (ptr)
183 * Atomically adds a signed value to a 64 bit (aligned) memory location.
185 * Memory access ordering is enforced before/after the atomic operation,
186 * so no additional 'sync' instructions are required.
189 * @param ptr address in memory to add incr to
190 * @param incr amount to increment memory location by (signed)
192 static inline void cvmx_atomic_add64(int64_t *ptr, int64_t incr)
195 cvmx_atomic_add64_nosync(ptr, incr);
200 * Atomically sets a 64 bit (aligned) memory location to a value
202 * @param ptr address of memory to set
203 * @param value value to set memory location to.
205 static inline void cvmx_atomic_set64(int64_t *ptr, int64_t value)
213 * Returns the current value of a 64 bit (aligned) memory
216 * @param ptr Address of memory to get
217 * @return Value of the memory
219 static inline int64_t cvmx_atomic_get64(int64_t *ptr)
221 return *(volatile int64_t *)ptr;
225 * Atomically compares the old value with the value at ptr, and if they match,
226 * stores new_val to ptr.
227 * If *ptr and old don't match, function returns failure immediately.
228 * If *ptr and old match, function spins until *ptr updated to new atomically, or
229 * until *ptr and old no longer match
231 * Does no memory synchronization.
233 * @return 1 on success (match and store)
236 static inline uint32_t cvmx_atomic_compare_and_store32_nosync(uint32_t *ptr, uint32_t old_val, uint32_t new_val)
240 __asm__ __volatile__(
242 "1: ll %[tmp], %[val] \n"
244 " bne %[tmp], %[old], 2f \n"
245 " move %[tmp], %[new_val] \n"
246 " sc %[tmp], %[val] \n"
247 " beqz %[tmp], 1b \n"
251 : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
252 : [old] "r" (old_val), [new_val] "r" (new_val)
260 * Atomically compares the old value with the value at ptr, and if they match,
261 * stores new_val to ptr.
262 * If *ptr and old don't match, function returns failure immediately.
263 * If *ptr and old match, function spins until *ptr updated to new atomically, or
264 * until *ptr and old no longer match
266 * Does memory synchronization that is required to use this as a locking primitive.
268 * @return 1 on success (match and store)
271 static inline uint32_t cvmx_atomic_compare_and_store32(uint32_t *ptr, uint32_t old_val, uint32_t new_val)
275 ret = cvmx_atomic_compare_and_store32_nosync(ptr, old_val, new_val);
283 * Atomically compares the old value with the value at ptr, and if they match,
284 * stores new_val to ptr.
285 * If *ptr and old don't match, function returns failure immediately.
286 * If *ptr and old match, function spins until *ptr updated to new atomically, or
287 * until *ptr and old no longer match
289 * Does no memory synchronization.
291 * @return 1 on success (match and store)
294 static inline uint64_t cvmx_atomic_compare_and_store64_nosync(uint64_t *ptr, uint64_t old_val, uint64_t new_val)
298 __asm__ __volatile__(
300 "1: lld %[tmp], %[val] \n"
302 " bne %[tmp], %[old], 2f \n"
303 " move %[tmp], %[new_val] \n"
304 " scd %[tmp], %[val] \n"
305 " beqz %[tmp], 1b \n"
309 : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
310 : [old] "r" (old_val), [new_val] "r" (new_val)
318 * Atomically compares the old value with the value at ptr, and if they match,
319 * stores new_val to ptr.
320 * If *ptr and old don't match, function returns failure immediately.
321 * If *ptr and old match, function spins until *ptr updated to new atomically, or
322 * until *ptr and old no longer match
324 * Does memory synchronization that is required to use this as a locking primitive.
326 * @return 1 on success (match and store)
329 static inline uint64_t cvmx_atomic_compare_and_store64(uint64_t *ptr, uint64_t old_val, uint64_t new_val)
333 ret = cvmx_atomic_compare_and_store64_nosync(ptr, old_val, new_val);
339 * Atomically adds a signed value to a 64 bit (aligned) memory location,
340 * and returns previous value.
342 * This version does not perform 'sync' operations to enforce memory
343 * operations. This should only be used when there are no memory operation
344 * ordering constraints. (This should NOT be used for reference counting -
345 * use the standard version instead.)
347 * @param ptr address in memory to add incr to
348 * @param incr amount to increment memory location by (signed)
350 * @return Value of memory location before increment
352 static inline int64_t cvmx_atomic_fetch_and_add64_nosync(int64_t *ptr, int64_t incr)
356 __asm__ __volatile__(
358 "1: lld %[tmp], %[val] \n"
359 " move %[ret], %[tmp] \n"
360 " daddu %[tmp], %[inc] \n"
361 " scd %[tmp], %[val] \n"
362 " beqz %[tmp], 1b \n"
365 : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
373 * Atomically adds a signed value to a 64 bit (aligned) memory location,
374 * and returns previous value.
376 * Memory access ordering is enforced before/after the atomic operation,
377 * so no additional 'sync' instructions are required.
379 * @param ptr address in memory to add incr to
380 * @param incr amount to increment memory location by (signed)
382 * @return Value of memory location before increment
384 static inline int64_t cvmx_atomic_fetch_and_add64(int64_t *ptr, int64_t incr)
388 ret = cvmx_atomic_fetch_and_add64_nosync(ptr, incr);
394 * Atomically adds a signed value to a 32 bit (aligned) memory location,
395 * and returns previous value.
397 * This version does not perform 'sync' operations to enforce memory
398 * operations. This should only be used when there are no memory operation
399 * ordering constraints. (This should NOT be used for reference counting -
400 * use the standard version instead.)
402 * @param ptr address in memory to add incr to
403 * @param incr amount to increment memory location by (signed)
405 * @return Value of memory location before increment
407 static inline int32_t cvmx_atomic_fetch_and_add32_nosync(int32_t *ptr, int32_t incr)
411 __asm__ __volatile__(
413 "1: ll %[tmp], %[val] \n"
414 " move %[ret], %[tmp] \n"
415 " addu %[tmp], %[inc] \n"
416 " sc %[tmp], %[val] \n"
417 " beqz %[tmp], 1b \n"
420 : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
428 * Atomically adds a signed value to a 32 bit (aligned) memory location,
429 * and returns previous value.
431 * Memory access ordering is enforced before/after the atomic operation,
432 * so no additional 'sync' instructions are required.
434 * @param ptr address in memory to add incr to
435 * @param incr amount to increment memory location by (signed)
437 * @return Value of memory location before increment
439 static inline int32_t cvmx_atomic_fetch_and_add32(int32_t *ptr, int32_t incr)
443 ret = cvmx_atomic_fetch_and_add32_nosync(ptr, incr);
449 * Atomically set bits in a 64 bit (aligned) memory location,
450 * and returns previous value.
452 * This version does not perform 'sync' operations to enforce memory
453 * operations. This should only be used when there are no memory operation
454 * ordering constraints.
456 * @param ptr address in memory
457 * @param mask mask of bits to set
459 * @return Value of memory location before setting bits
461 static inline uint64_t cvmx_atomic_fetch_and_bset64_nosync(uint64_t *ptr, uint64_t mask)
465 __asm__ __volatile__(
467 "1: lld %[tmp], %[val] \n"
468 " move %[ret], %[tmp] \n"
469 " or %[tmp], %[msk] \n"
470 " scd %[tmp], %[val] \n"
471 " beqz %[tmp], 1b \n"
474 : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
482 * Atomically set bits in a 32 bit (aligned) memory location,
483 * and returns previous value.
485 * This version does not perform 'sync' operations to enforce memory
486 * operations. This should only be used when there are no memory operation
487 * ordering constraints.
489 * @param ptr address in memory
490 * @param mask mask of bits to set
492 * @return Value of memory location before setting bits
494 static inline uint32_t cvmx_atomic_fetch_and_bset32_nosync(uint32_t *ptr, uint32_t mask)
498 __asm__ __volatile__(
500 "1: ll %[tmp], %[val] \n"
501 " move %[ret], %[tmp] \n"
502 " or %[tmp], %[msk] \n"
503 " sc %[tmp], %[val] \n"
504 " beqz %[tmp], 1b \n"
507 : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
515 * Atomically clear bits in a 64 bit (aligned) memory location,
516 * and returns previous value.
518 * This version does not perform 'sync' operations to enforce memory
519 * operations. This should only be used when there are no memory operation
520 * ordering constraints.
522 * @param ptr address in memory
523 * @param mask mask of bits to clear
525 * @return Value of memory location before clearing bits
527 static inline uint64_t cvmx_atomic_fetch_and_bclr64_nosync(uint64_t *ptr, uint64_t mask)
531 __asm__ __volatile__(
534 "1: lld %[tmp], %[val] \n"
535 " move %[ret], %[tmp] \n"
536 " and %[tmp], %[msk] \n"
537 " scd %[tmp], %[val] \n"
538 " beqz %[tmp], 1b \n"
541 : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
549 * Atomically clear bits in a 32 bit (aligned) memory location,
550 * and returns previous value.
552 * This version does not perform 'sync' operations to enforce memory
553 * operations. This should only be used when there are no memory operation
554 * ordering constraints.
556 * @param ptr address in memory
557 * @param mask mask of bits to clear
559 * @return Value of memory location before clearing bits
561 static inline uint32_t cvmx_atomic_fetch_and_bclr32_nosync(uint32_t *ptr, uint32_t mask)
565 __asm__ __volatile__(
568 "1: ll %[tmp], %[val] \n"
569 " move %[ret], %[tmp] \n"
570 " and %[tmp], %[msk] \n"
571 " sc %[tmp], %[val] \n"
572 " beqz %[tmp], 1b \n"
575 : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
583 * Atomically swaps value in 64 bit (aligned) memory location,
584 * and returns previous value.
586 * This version does not perform 'sync' operations to enforce memory
587 * operations. This should only be used when there are no memory operation
588 * ordering constraints.
590 * @param ptr address in memory
591 * @param new_val new value to write
593 * @return Value of memory location before swap operation
595 static inline uint64_t cvmx_atomic_swap64_nosync(uint64_t *ptr, uint64_t new_val)
599 __asm__ __volatile__(
601 "1: lld %[ret], %[val] \n"
602 " move %[tmp], %[new_val] \n"
603 " scd %[tmp], %[val] \n"
604 " beqz %[tmp], 1b \n"
607 : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
608 : [new_val] "r" (new_val)
615 * Atomically swaps value in 32 bit (aligned) memory location,
616 * and returns previous value.
618 * This version does not perform 'sync' operations to enforce memory
619 * operations. This should only be used when there are no memory operation
620 * ordering constraints.
622 * @param ptr address in memory
623 * @param new_val new value to write
625 * @return Value of memory location before swap operation
627 static inline uint32_t cvmx_atomic_swap32_nosync(uint32_t *ptr, uint32_t new_val)
631 __asm__ __volatile__(
633 "1: ll %[ret], %[val] \n"
634 " move %[tmp], %[new_val] \n"
635 " sc %[tmp], %[val] \n"
636 " beqz %[tmp], 1b \n"
639 : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
640 : [new_val] "r" (new_val)
647 * This atomic operation is now named cvmx_atomic_compare_and_store32_nosync
648 * and the (deprecated) macro is provided for backward compatibility.
651 #define cvmx_atomic_compare_and_store_nosync32 cvmx_atomic_compare_and_store32_nosync
654 * This atomic operation is now named cvmx_atomic_compare_and_store64_nosync
655 * and the (deprecated) macro is provided for backward compatibility.
658 #define cvmx_atomic_compare_and_store_nosync64 cvmx_atomic_compare_and_store64_nosync
666 #endif /* __CVMX_ATOMIC_H__ */