1 //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
12 //===----------------------------------------------------------------------===//
14 #ifndef SANITIZER_MUTEX_H
15 #define SANITIZER_MUTEX_H
17 #include "sanitizer_atomic.h"
18 #include "sanitizer_internal_defs.h"
19 #include "sanitizer_libc.h"
21 namespace __sanitizer {
23 class StaticSpinMutex {
26 atomic_store(&state_, 0, memory_order_relaxed);
36 return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
40 atomic_store(&state_, 0, memory_order_release);
44 CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
48 atomic_uint8_t state_;
50 void NOINLINE LockSlow() {
51 for (int i = 0;; i++) {
55 internal_sched_yield();
56 if (atomic_load(&state_, memory_order_relaxed) == 0
57 && atomic_exchange(&state_, 1, memory_order_acquire) == 0)
63 class SpinMutex : public StaticSpinMutex {
70 SpinMutex(const SpinMutex&);
71 void operator=(const SpinMutex&);
77 // Windows does not currently support LinkerInitialized
78 explicit BlockingMutex(LinkerInitialized);
80 explicit constexpr BlockingMutex(LinkerInitialized)
81 : opaque_storage_ {0, }, owner_(0) {}
87 // This function does not guarantee an explicit check that the calling thread
88 // is the thread which owns the mutex. This behavior, while more strictly
89 // correct, causes problems in cases like StopTheWorld, where a parent thread
90 // owns the mutex but a child checks that it is locked. Rather than
91 // maintaining complex state to work around those situations, the check only
92 // checks that the mutex is owned, and assumes callers to be generally
97 // Solaris mutex_t has a member that requires 64-bit alignment.
98 ALIGNED(8) uptr opaque_storage_[10];
99 uptr owner_; // for debugging
102 // Reader-writer spin mutex.
106 atomic_store(&state_, kUnlocked, memory_order_relaxed);
110 CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
115 if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
116 memory_order_acquire))
122 u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
123 DCHECK_NE(prev & kWriteLock, 0);
128 u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
129 if ((prev & kWriteLock) == 0)
135 u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
136 DCHECK_EQ(prev & kWriteLock, 0);
137 DCHECK_GT(prev & ~kWriteLock, 0);
142 CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
146 atomic_uint32_t state_;
154 void NOINLINE LockSlow() {
155 for (int i = 0;; i++) {
159 internal_sched_yield();
160 u32 cmp = atomic_load(&state_, memory_order_relaxed);
161 if (cmp == kUnlocked &&
162 atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
163 memory_order_acquire))
168 void NOINLINE ReadLockSlow() {
169 for (int i = 0;; i++) {
173 internal_sched_yield();
174 u32 prev = atomic_load(&state_, memory_order_acquire);
175 if ((prev & kWriteLock) == 0)
180 RWMutex(const RWMutex&);
181 void operator = (const RWMutex&);
184 template<typename MutexType>
185 class GenericScopedLock {
187 explicit GenericScopedLock(MutexType *mu)
192 ~GenericScopedLock() {
199 GenericScopedLock(const GenericScopedLock&);
200 void operator=(const GenericScopedLock&);
203 template<typename MutexType>
204 class GenericScopedReadLock {
206 explicit GenericScopedReadLock(MutexType *mu)
211 ~GenericScopedReadLock() {
218 GenericScopedReadLock(const GenericScopedReadLock&);
219 void operator=(const GenericScopedReadLock&);
222 typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
223 typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
224 typedef GenericScopedLock<RWMutex> RWMutexLock;
225 typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
227 } // namespace __sanitizer
229 #endif // SANITIZER_MUTEX_H