1 //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_MUTEX_H
14 #define SANITIZER_MUTEX_H
16 #include "sanitizer_atomic.h"
17 #include "sanitizer_internal_defs.h"
18 #include "sanitizer_libc.h"
19 #include "sanitizer_thread_safety.h"
21 namespace __sanitizer {
23 class MUTEX StaticSpinMutex {
26 atomic_store(&state_, 0, memory_order_relaxed);
29 void Lock() ACQUIRE() {
30 if (LIKELY(TryLock()))
35 bool TryLock() TRY_ACQUIRE(true) {
36 return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
39 void Unlock() RELEASE() { atomic_store(&state_, 0, memory_order_release); }
41 void CheckLocked() const CHECK_LOCKED() {
42 CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
46 atomic_uint8_t state_;
51 class MUTEX SpinMutex : public StaticSpinMutex {
57 SpinMutex(const SpinMutex &) = delete;
58 void operator=(const SpinMutex &) = delete;
61 // Semaphore provides an OS-dependent way to park/unpark threads.
62 // The last thread returned from Wait can destroy the object
63 // (destruction-safety).
66 constexpr Semaphore() {}
67 Semaphore(const Semaphore &) = delete;
68 void operator=(const Semaphore &) = delete;
71 void Post(u32 count = 1);
74 atomic_uint32_t state_ = {0};
77 typedef int MutexType;
80 // Used as sentinel and to catch unassigned types
81 // (should not be used as real Mutex type).
84 // Each tool own mutexes must start at this number.
86 // Type for legacy mutexes that are not checked for deadlocks.
88 // Special marks that can be used in MutexMeta::can_lock table.
89 // The leaf mutexes can be locked under any other non-leaf mutex,
90 // but no other mutex can be locked while under a leaf mutex.
92 // Multiple mutexes of this type can be locked at the same time.
96 // Go linker does not support THREADLOCAL variables,
97 // so we can't use per-thread state.
98 #define SANITIZER_CHECK_DEADLOCKS (SANITIZER_DEBUG && !SANITIZER_GO)
100 #if SANITIZER_CHECK_DEADLOCKS
104 // The table fixes what mutexes can be locked under what mutexes.
105 // If the entry for MutexTypeFoo contains MutexTypeBar,
106 // then Bar mutex can be locked while under Foo mutex.
107 // Can also contain the special MutexLeaf/MutexMulti marks.
108 MutexType can_lock[10];
114 constexpr CheckedMutex(MutexType type)
115 #if SANITIZER_CHECK_DEADLOCKS
121 ALWAYS_INLINE void Lock() {
122 #if SANITIZER_CHECK_DEADLOCKS
123 LockImpl(GET_CALLER_PC());
127 ALWAYS_INLINE void Unlock() {
128 #if SANITIZER_CHECK_DEADLOCKS
133 // Checks that the current thread does not hold any mutexes
134 // (e.g. when returning from a runtime function to user code).
135 static void CheckNoLocks() {
136 #if SANITIZER_CHECK_DEADLOCKS
142 #if SANITIZER_CHECK_DEADLOCKS
143 const MutexType type_;
145 void LockImpl(uptr pc);
147 static void CheckNoLocksImpl();
151 // Reader-writer mutex.
152 // Derive from CheckedMutex for the purposes of EBO.
153 // We could make it a field marked with [[no_unique_address]],
154 // but this attribute is not supported by some older compilers.
155 class MUTEX Mutex : CheckedMutex {
157 constexpr Mutex(MutexType type = MutexUnchecked) : CheckedMutex(type) {}
159 void Lock() ACQUIRE() {
160 CheckedMutex::Lock();
161 u64 reset_mask = ~0ull;
162 u64 state = atomic_load_relaxed(&state_);
163 const uptr kMaxSpinIters = 1500;
164 for (uptr spin_iters = 0;; spin_iters++) {
166 bool locked = (state & (kWriterLock | kReaderLockMask)) != 0;
167 if (LIKELY(!locked)) {
168 // The mutex is not read-/write-locked, try to lock.
169 new_state = (state | kWriterLock) & reset_mask;
170 } else if (spin_iters > kMaxSpinIters) {
171 // We've spun enough, increment waiting writers count and block.
172 // The counter will be decremented by whoever wakes us.
173 new_state = (state + kWaitingWriterInc) & reset_mask;
174 } else if ((state & kWriterSpinWait) == 0) {
175 // Active spinning, but denote our presence so that unlocking
176 // thread does not wake up other threads.
177 new_state = state | kWriterSpinWait;
180 state = atomic_load(&state_, memory_order_relaxed);
183 if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
184 memory_order_acquire)))
187 return; // We've locked the mutex.
188 if (spin_iters > kMaxSpinIters) {
189 // We've incremented waiting writers, so now block.
192 state = atomic_load(&state_, memory_order_relaxed);
193 DCHECK_NE(state & kWriterSpinWait, 0);
195 // We've set kWriterSpinWait, but we are still in active spinning.
197 // We either blocked and were unblocked,
198 // or we just spun but set kWriterSpinWait.
199 // Either way we need to reset kWriterSpinWait
200 // next time we take the lock or block again.
201 reset_mask = ~kWriterSpinWait;
205 void Unlock() RELEASE() {
206 CheckedMutex::Unlock();
210 u64 state = atomic_load_relaxed(&state_);
212 DCHECK_NE(state & kWriterLock, 0);
213 DCHECK_EQ(state & kReaderLockMask, 0);
214 new_state = state & ~kWriterLock;
216 (state & kWriterSpinWait) == 0 && (state & kWaitingWriterMask) != 0;
218 new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
220 (state & (kWriterSpinWait | kWaitingWriterMask)) != 0
222 : ((state & kWaitingReaderMask) >> kWaitingReaderShift);
224 new_state = (new_state & ~kWaitingReaderMask) +
225 (wake_readers << kReaderLockShift);
226 } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
227 memory_order_release)));
228 if (UNLIKELY(wake_writer))
230 else if (UNLIKELY(wake_readers))
231 readers_.Post(wake_readers);
234 void ReadLock() ACQUIRE_SHARED() {
235 CheckedMutex::Lock();
238 u64 state = atomic_load_relaxed(&state_);
241 (state & kReaderLockMask) == 0 &&
242 (state & (kWriterLock | kWriterSpinWait | kWaitingWriterMask)) != 0;
244 new_state = state + kReaderLockInc;
246 new_state = state + kWaitingReaderInc;
247 } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
248 memory_order_acquire)));
249 if (UNLIKELY(locked))
251 DCHECK_EQ(atomic_load_relaxed(&state_) & kWriterLock, 0);
252 DCHECK_NE(atomic_load_relaxed(&state_) & kReaderLockMask, 0);
255 void ReadUnlock() RELEASE_SHARED() {
256 CheckedMutex::Unlock();
259 u64 state = atomic_load_relaxed(&state_);
261 DCHECK_NE(state & kReaderLockMask, 0);
262 DCHECK_EQ(state & (kWaitingReaderMask | kWriterLock), 0);
263 new_state = state - kReaderLockInc;
264 wake = (new_state & (kReaderLockMask | kWriterSpinWait)) == 0 &&
265 (new_state & kWaitingWriterMask) != 0;
267 new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
268 } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
269 memory_order_release)));
274 // This function does not guarantee an explicit check that the calling thread
275 // is the thread which owns the mutex. This behavior, while more strictly
276 // correct, causes problems in cases like StopTheWorld, where a parent thread
277 // owns the mutex but a child checks that it is locked. Rather than
278 // maintaining complex state to work around those situations, the check only
279 // checks that the mutex is owned.
280 void CheckWriteLocked() const CHECK_LOCKED() {
281 CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock);
284 void CheckLocked() const CHECK_LOCKED() { CheckWriteLocked(); }
286 void CheckReadLocked() const CHECK_LOCKED() {
287 CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask);
291 atomic_uint64_t state_ = {0};
295 // The state has 3 counters:
296 // - number of readers holding the lock,
297 // if non zero, the mutex is read-locked
298 // - number of waiting readers,
299 // if not zero, the mutex is write-locked
300 // - number of waiting writers,
301 // if non zero, the mutex is read- or write-locked
304 // if set, the mutex is write-locked
305 // - a writer is awake and spin-waiting
306 // the flag is used to prevent thundering herd problem
307 // (new writers are not woken if this flag is set)
309 // Writer support active spinning, readers does not.
310 // But readers are more aggressive and always take the mutex
311 // if there are any other readers.
312 // Writers hand off the mutex to readers: after wake up readers
313 // already assume ownership of the mutex (don't need to do any
314 // state updates). But the mutex is not handed off to writers,
315 // after wake up writers compete to lock the mutex again.
316 // This is needed to allow repeated write locks even in presence
317 // of other blocked writers.
318 static constexpr u64 kCounterWidth = 20;
319 static constexpr u64 kReaderLockShift = 0;
320 static constexpr u64 kReaderLockInc = 1ull << kReaderLockShift;
321 static constexpr u64 kReaderLockMask = ((1ull << kCounterWidth) - 1)
323 static constexpr u64 kWaitingReaderShift = kCounterWidth;
324 static constexpr u64 kWaitingReaderInc = 1ull << kWaitingReaderShift;
325 static constexpr u64 kWaitingReaderMask = ((1ull << kCounterWidth) - 1)
326 << kWaitingReaderShift;
327 static constexpr u64 kWaitingWriterShift = 2 * kCounterWidth;
328 static constexpr u64 kWaitingWriterInc = 1ull << kWaitingWriterShift;
329 static constexpr u64 kWaitingWriterMask = ((1ull << kCounterWidth) - 1)
330 << kWaitingWriterShift;
331 static constexpr u64 kWriterLock = 1ull << (3 * kCounterWidth);
332 static constexpr u64 kWriterSpinWait = 1ull << (3 * kCounterWidth + 1);
334 Mutex(const Mutex &) = delete;
335 void operator=(const Mutex &) = delete;
338 void FutexWait(atomic_uint32_t *p, u32 cmp);
339 void FutexWake(atomic_uint32_t *p, u32 count);
341 class MUTEX BlockingMutex {
343 explicit constexpr BlockingMutex(LinkerInitialized)
344 : opaque_storage_ {0, }, owner_ {0} {}
346 void Lock() ACQUIRE();
347 void Unlock() RELEASE();
349 // This function does not guarantee an explicit check that the calling thread
350 // is the thread which owns the mutex. This behavior, while more strictly
351 // correct, causes problems in cases like StopTheWorld, where a parent thread
352 // owns the mutex but a child checks that it is locked. Rather than
353 // maintaining complex state to work around those situations, the check only
354 // checks that the mutex is owned, and assumes callers to be generally
356 void CheckLocked() const CHECK_LOCKED();
359 // Solaris mutex_t has a member that requires 64-bit alignment.
360 ALIGNED(8) uptr opaque_storage_[10];
361 uptr owner_; // for debugging
364 // Reader-writer spin mutex.
365 class MUTEX RWMutex {
368 atomic_store(&state_, kUnlocked, memory_order_relaxed);
372 CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
375 void Lock() ACQUIRE() {
377 if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
378 memory_order_acquire))
383 void Unlock() RELEASE() {
384 u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
385 DCHECK_NE(prev & kWriteLock, 0);
389 void ReadLock() ACQUIRE_SHARED() {
390 u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
391 if ((prev & kWriteLock) == 0)
396 void ReadUnlock() RELEASE_SHARED() {
397 u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
398 DCHECK_EQ(prev & kWriteLock, 0);
399 DCHECK_GT(prev & ~kWriteLock, 0);
403 void CheckLocked() const CHECK_LOCKED() {
404 CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
408 atomic_uint32_t state_;
416 void NOINLINE LockSlow() {
417 for (int i = 0;; i++) {
421 internal_sched_yield();
422 u32 cmp = atomic_load(&state_, memory_order_relaxed);
423 if (cmp == kUnlocked &&
424 atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
425 memory_order_acquire))
430 void NOINLINE ReadLockSlow() {
431 for (int i = 0;; i++) {
435 internal_sched_yield();
436 u32 prev = atomic_load(&state_, memory_order_acquire);
437 if ((prev & kWriteLock) == 0)
442 RWMutex(const RWMutex &) = delete;
443 void operator=(const RWMutex &) = delete;
446 template <typename MutexType>
447 class SCOPED_LOCK GenericScopedLock {
449 explicit GenericScopedLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
453 ~GenericScopedLock() RELEASE() { mu_->Unlock(); }
458 GenericScopedLock(const GenericScopedLock &) = delete;
459 void operator=(const GenericScopedLock &) = delete;
462 template <typename MutexType>
463 class SCOPED_LOCK GenericScopedReadLock {
465 explicit GenericScopedReadLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
469 ~GenericScopedReadLock() RELEASE() { mu_->ReadUnlock(); }
474 GenericScopedReadLock(const GenericScopedReadLock &) = delete;
475 void operator=(const GenericScopedReadLock &) = delete;
478 typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
479 typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
480 typedef GenericScopedLock<RWMutex> RWMutexLock;
481 typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
482 typedef GenericScopedLock<Mutex> Lock;
483 typedef GenericScopedReadLock<Mutex> ReadLock;
485 } // namespace __sanitizer
487 #endif // SANITIZER_MUTEX_H