1 //===-- atomic_helpers.h ----------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #ifndef SCUDO_ATOMIC_H_
10 #define SCUDO_ATOMIC_H_
12 #include "internal_defs.h"
17 memory_order_relaxed = 0,
18 memory_order_consume = 1,
19 memory_order_acquire = 2,
20 memory_order_release = 3,
21 memory_order_acq_rel = 4,
22 memory_order_seq_cst = 5
24 COMPILER_CHECK(memory_order_relaxed == __ATOMIC_RELAXED);
25 COMPILER_CHECK(memory_order_consume == __ATOMIC_CONSUME);
26 COMPILER_CHECK(memory_order_acquire == __ATOMIC_ACQUIRE);
27 COMPILER_CHECK(memory_order_release == __ATOMIC_RELEASE);
28 COMPILER_CHECK(memory_order_acq_rel == __ATOMIC_ACQ_REL);
29 COMPILER_CHECK(memory_order_seq_cst == __ATOMIC_SEQ_CST);
33 volatile Type ValDoNotUse;
38 volatile Type ValDoNotUse;
43 volatile Type ValDoNotUse;
48 volatile Type ValDoNotUse;
53 // On 32-bit platforms u64 is not necessarily aligned on 8 bytes.
54 ALIGNED(8) volatile Type ValDoNotUse;
59 volatile Type ValDoNotUse;
63 INLINE typename T::Type atomic_load(const volatile T *A, memory_order MO) {
64 DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
66 __atomic_load(&A->ValDoNotUse, &V, MO);
71 INLINE void atomic_store(volatile T *A, typename T::Type V, memory_order MO) {
72 DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
73 __atomic_store(&A->ValDoNotUse, &V, MO);
76 INLINE void atomic_thread_fence(memory_order) { __sync_synchronize(); }
79 INLINE typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V,
81 DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
82 return __atomic_fetch_add(&A->ValDoNotUse, V, MO);
86 INLINE typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V,
88 DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
89 return __atomic_fetch_sub(&A->ValDoNotUse, V, MO);
93 INLINE typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
95 DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
97 __atomic_exchange(&A->ValDoNotUse, &V, &R, MO);
101 template <typename T>
102 INLINE bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp,
103 typename T::Type Xchg,
105 return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, false, MO,
109 template <typename T>
110 INLINE bool atomic_compare_exchange_weak(volatile T *A, typename T::Type *Cmp,
111 typename T::Type Xchg,
113 return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, true, MO,
117 // Clutter-reducing helpers.
119 template <typename T>
120 INLINE typename T::Type atomic_load_relaxed(const volatile T *A) {
121 return atomic_load(A, memory_order_relaxed);
124 template <typename T>
125 INLINE void atomic_store_relaxed(volatile T *A, typename T::Type V) {
126 atomic_store(A, V, memory_order_relaxed);
129 template <typename T>
130 INLINE typename T::Type atomic_compare_exchange(volatile T *A,
131 typename T::Type Cmp,
132 typename T::Type Xchg) {
133 atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire);
139 #endif // SCUDO_ATOMIC_H_