1 //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_MUTEX_H
14 #define SANITIZER_MUTEX_H
16 #include "sanitizer_atomic.h"
17 #include "sanitizer_internal_defs.h"
18 #include "sanitizer_libc.h"
20 namespace __sanitizer {
22 class StaticSpinMutex {
25 atomic_store(&state_, 0, memory_order_relaxed);
35 return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
39 atomic_store(&state_, 0, memory_order_release);
43 CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
47 atomic_uint8_t state_;
49 void NOINLINE LockSlow() {
50 for (int i = 0;; i++) {
54 internal_sched_yield();
55 if (atomic_load(&state_, memory_order_relaxed) == 0
56 && atomic_exchange(&state_, 1, memory_order_acquire) == 0)
62 class SpinMutex : public StaticSpinMutex {
69 SpinMutex(const SpinMutex&);
70 void operator=(const SpinMutex&);
75 explicit constexpr BlockingMutex(LinkerInitialized)
76 : opaque_storage_ {0, }, owner_ {0} {}
81 // This function does not guarantee an explicit check that the calling thread
82 // is the thread which owns the mutex. This behavior, while more strictly
83 // correct, causes problems in cases like StopTheWorld, where a parent thread
84 // owns the mutex but a child checks that it is locked. Rather than
85 // maintaining complex state to work around those situations, the check only
86 // checks that the mutex is owned, and assumes callers to be generally
91 // Solaris mutex_t has a member that requires 64-bit alignment.
92 ALIGNED(8) uptr opaque_storage_[10];
93 uptr owner_; // for debugging
96 // Reader-writer spin mutex.
100 atomic_store(&state_, kUnlocked, memory_order_relaxed);
104 CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
109 if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
110 memory_order_acquire))
116 u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
117 DCHECK_NE(prev & kWriteLock, 0);
122 u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
123 if ((prev & kWriteLock) == 0)
129 u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
130 DCHECK_EQ(prev & kWriteLock, 0);
131 DCHECK_GT(prev & ~kWriteLock, 0);
136 CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
140 atomic_uint32_t state_;
148 void NOINLINE LockSlow() {
149 for (int i = 0;; i++) {
153 internal_sched_yield();
154 u32 cmp = atomic_load(&state_, memory_order_relaxed);
155 if (cmp == kUnlocked &&
156 atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
157 memory_order_acquire))
162 void NOINLINE ReadLockSlow() {
163 for (int i = 0;; i++) {
167 internal_sched_yield();
168 u32 prev = atomic_load(&state_, memory_order_acquire);
169 if ((prev & kWriteLock) == 0)
174 RWMutex(const RWMutex&);
175 void operator = (const RWMutex&);
178 template<typename MutexType>
179 class GenericScopedLock {
181 explicit GenericScopedLock(MutexType *mu)
186 ~GenericScopedLock() {
193 GenericScopedLock(const GenericScopedLock&);
194 void operator=(const GenericScopedLock&);
197 template<typename MutexType>
198 class GenericScopedReadLock {
200 explicit GenericScopedReadLock(MutexType *mu)
205 ~GenericScopedReadLock() {
212 GenericScopedReadLock(const GenericScopedReadLock&);
213 void operator=(const GenericScopedReadLock&);
216 typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
217 typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
218 typedef GenericScopedLock<RWMutex> RWMutexLock;
219 typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
221 } // namespace __sanitizer
223 #endif // SANITIZER_MUTEX_H