1 //===-- tsan_sync.cc ------------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_placement_new.h"
14 #include "tsan_sync.h"
16 #include "tsan_mman.h"
20 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
23 : mtx(MutexTypeSyncVar, StatMtxSyncVar) {
27 void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
32 creation_stack_id = 0;
33 if (kCppMode) // Go does not use them
34 creation_stack_id = CurrentStackId(thr, pc);
35 if (common_flags()->detect_deadlocks)
36 DDMutexInit(thr, pc, this);
39 void SyncVar::Reset(ThreadState *thr) {
41 creation_stack_id = 0;
42 owner_tid = kInvalidTid;
51 CHECK_EQ(clock.size(), 0);
52 CHECK_EQ(read_clock.size(), 0);
54 clock.Reset(&thr->clock_cache);
55 read_clock.Reset(&thr->clock_cache);
60 atomic_store(&uid_gen_, 0, memory_order_relaxed);
63 void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
64 u32 idx = block_alloc_.Alloc(&thr->block_cache);
65 MBlock *b = block_alloc_.Map(idx);
68 b->stk = CurrentStackId(thr, pc);
69 u32 *meta = MemToMeta(p);
71 *meta = idx | kFlagBlock;
74 uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) {
75 MBlock* b = GetBlock(p);
78 uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
79 FreeRange(thr, pc, p, sz);
83 void MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
84 u32 *meta = MemToMeta(p);
85 u32 *end = MemToMeta(p + sz);
88 for (; meta < end; meta++) {
94 if (idx & kFlagBlock) {
95 block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask);
97 } else if (idx & kFlagSync) {
98 DCHECK(idx & kFlagSync);
99 SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
102 sync_alloc_.Free(&thr->sync_cache, idx & ~kFlagMask);
111 MBlock* MetaMap::GetBlock(uptr p) {
112 u32 *meta = MemToMeta(p);
117 if (idx & kFlagBlock)
118 return block_alloc_.Map(idx & ~kFlagMask);
119 DCHECK(idx & kFlagSync);
120 SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
125 SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
126 uptr addr, bool write_lock) {
127 return GetAndLock(thr, pc, addr, write_lock, true);
130 SyncVar* MetaMap::GetIfExistsAndLock(uptr addr) {
131 return GetAndLock(0, 0, addr, true, false);
134 SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
135 uptr addr, bool write_lock, bool create) {
136 u32 *meta = MemToMeta(addr);
145 if (idx & kFlagBlock)
147 DCHECK(idx & kFlagSync);
148 SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
149 if (s->addr == addr) {
152 sync_alloc_.Free(&thr->sync_cache, myidx);
170 const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
171 myidx = sync_alloc_.Alloc(&thr->sync_cache);
172 mys = sync_alloc_.Map(myidx);
173 mys->Init(thr, pc, addr, uid);
176 if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
177 myidx | kFlagSync, memory_order_release)) {
187 void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
188 // src and dst can overlap,
189 // there are no concurrent accesses to the regions (e.g. stop-the-world).
192 uptr diff = dst - src;
193 u32 *src_meta = MemToMeta(src);
194 u32 *dst_meta = MemToMeta(dst);
195 u32 *src_meta_end = MemToMeta(src + sz);
198 src_meta = MemToMeta(src + sz) - 1;
199 dst_meta = MemToMeta(dst + sz) - 1;
200 src_meta_end = MemToMeta(src) - 1;
203 for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) {
204 CHECK_EQ(*dst_meta, 0);
208 // Patch the addresses in sync objects.
210 if (idx & kFlagBlock)
212 CHECK(idx & kFlagSync);
213 SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
220 void MetaMap::OnThreadIdle(ThreadState *thr) {
221 block_alloc_.FlushCache(&thr->block_cache);
222 sync_alloc_.FlushCache(&thr->sync_cache);
225 } // namespace __tsan