1 //===-- tsan_rtl_mutex.cc -------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 //===----------------------------------------------------------------------===//
14 #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
15 #include <sanitizer_common/sanitizer_stackdepot.h>
18 #include "tsan_flags.h"
19 #include "tsan_sync.h"
20 #include "tsan_report.h"
21 #include "tsan_symbolize.h"
22 #include "tsan_platform.h"
26 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
28 struct Callback : DDCallback {
32 Callback(ThreadState *thr, uptr pc)
35 DDCallback::pt = thr->proc()->dd_pt;
36 DDCallback::lt = thr->dd_lt;
39 u32 Unwind() override { return CurrentStackId(thr, pc); }
40 int UniqueTid() override { return thr->unique_id; }
43 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
45 ctx->dd->MutexInit(&cb, &s->dd);
46 s->dd.ctx = s->GetId();
49 static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
51 // In Go, these misuses are either impossible, or detected by std lib,
52 // or false positives (e.g. unlock in a different thread).
55 ThreadRegistryLock l(ctx->thread_registry);
56 ScopedReport rep(typ);
58 VarSizeStackTrace trace;
59 ObtainCurrentStack(thr, pc, &trace);
60 rep.AddStack(trace, true);
61 rep.AddLocation(addr, 1);
62 OutputReport(thr, rep);
65 void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
66 DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz);
67 StatInc(thr, StatMutexCreate);
68 if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) {
69 CHECK(!thr->is_freeing);
70 thr->is_freeing = true;
71 MemoryWrite(thr, pc, addr, kSizeLog1);
72 thr->is_freeing = false;
74 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
75 s->SetFlags(flagz & MutexCreationFlagMask);
76 if (!SANITIZER_GO && s->creation_stack_id == 0)
77 s->creation_stack_id = CurrentStackId(thr, pc);
81 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
82 DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
83 StatInc(thr, StatMutexDestroy);
84 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
87 if ((flagz & MutexFlagLinkerInit) || s->IsFlagSet(MutexFlagLinkerInit)) {
88 // Destroy is no-op for linker-initialized mutexes.
92 if (common_flags()->detect_deadlocks) {
94 ctx->dd->MutexDestroy(&cb, &s->dd);
95 ctx->dd->MutexInit(&cb, &s->dd);
97 bool unlock_locked = false;
98 if (flags()->report_destroy_locked
99 && s->owner_tid != SyncVar::kInvalidTid
100 && !s->IsFlagSet(MutexFlagBroken)) {
101 s->SetFlags(MutexFlagBroken);
102 unlock_locked = true;
104 u64 mid = s->GetId();
105 u32 last_lock = s->last_lock;
107 s->Reset(thr->proc()); // must not reset it before the report is printed
110 ThreadRegistryLock l(ctx->thread_registry);
111 ScopedReport rep(ReportTypeMutexDestroyLocked);
113 VarSizeStackTrace trace;
114 ObtainCurrentStack(thr, pc, &trace);
116 FastState last(last_lock);
117 RestoreStack(last.tid(), last.epoch(), &trace, 0);
118 rep.AddStack(trace, true);
119 rep.AddLocation(addr, 1);
120 OutputReport(thr, rep);
122 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
124 s->Reset(thr->proc());
128 thr->mset.Remove(mid);
129 // Imitate a memory write to catch unlock-destroy races.
130 // Do this outside of sync mutex, because it can report a race which locks
132 if (IsAppMem(addr)) {
133 CHECK(!thr->is_freeing);
134 thr->is_freeing = true;
135 MemoryWrite(thr, pc, addr, kSizeLog1);
136 thr->is_freeing = false;
138 // s will be destroyed and freed in MetaMap::FreeBlock.
141 void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
142 DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
143 if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
144 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
145 s->UpdateFlags(flagz);
146 if (s->owner_tid != thr->tid) {
147 Callback cb(thr, pc);
148 ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
150 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
157 void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
158 DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
159 thr->tid, addr, flagz, rec);
160 if (flagz & MutexFlagRecursiveLock)
165 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
166 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
167 s->UpdateFlags(flagz);
168 thr->fast_state.IncrementEpoch();
169 TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
170 bool report_double_lock = false;
171 if (s->owner_tid == SyncVar::kInvalidTid) {
172 CHECK_EQ(s->recursion, 0);
173 s->owner_tid = thr->tid;
174 s->last_lock = thr->fast_state.raw();
175 } else if (s->owner_tid == thr->tid) {
176 CHECK_GT(s->recursion, 0);
177 } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
178 s->SetFlags(MutexFlagBroken);
179 report_double_lock = true;
181 const bool first = s->recursion == 0;
184 StatInc(thr, StatMutexLock);
185 AcquireImpl(thr, pc, &s->clock);
186 AcquireImpl(thr, pc, &s->read_clock);
187 } else if (!s->IsFlagSet(MutexFlagWriteReentrant)) {
188 StatInc(thr, StatMutexRecLock);
190 thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
191 bool pre_lock = false;
192 if (first && common_flags()->detect_deadlocks) {
193 pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
194 !(flagz & MutexFlagTryLock);
195 Callback cb(thr, pc);
197 ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
198 ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
200 u64 mid = s->GetId();
202 // Can't touch s after this point.
204 if (report_double_lock)
205 ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
206 if (first && pre_lock && common_flags()->detect_deadlocks) {
207 Callback cb(thr, pc);
208 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
212 int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
213 DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
215 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
216 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
217 thr->fast_state.IncrementEpoch();
218 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
220 bool report_bad_unlock = false;
221 if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
222 if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
223 s->SetFlags(MutexFlagBroken);
224 report_bad_unlock = true;
227 rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
229 if (s->recursion == 0) {
230 StatInc(thr, StatMutexUnlock);
231 s->owner_tid = SyncVar::kInvalidTid;
232 ReleaseStoreImpl(thr, pc, &s->clock);
234 StatInc(thr, StatMutexRecUnlock);
237 thr->mset.Del(s->GetId(), true);
238 if (common_flags()->detect_deadlocks && s->recursion == 0 &&
239 !report_bad_unlock) {
240 Callback cb(thr, pc);
241 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
243 u64 mid = s->GetId();
245 // Can't touch s after this point.
246 if (report_bad_unlock)
247 ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
248 if (common_flags()->detect_deadlocks && !report_bad_unlock) {
249 Callback cb(thr, pc);
250 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
255 void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
256 DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
257 if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
258 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
259 s->UpdateFlags(flagz);
260 Callback cb(thr, pc);
261 ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
263 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
267 void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
268 DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
269 StatInc(thr, StatMutexReadLock);
271 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
272 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
273 s->UpdateFlags(flagz);
274 thr->fast_state.IncrementEpoch();
275 TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
276 bool report_bad_lock = false;
277 if (s->owner_tid != SyncVar::kInvalidTid) {
278 if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
279 s->SetFlags(MutexFlagBroken);
280 report_bad_lock = true;
283 AcquireImpl(thr, pc, &s->clock);
284 s->last_lock = thr->fast_state.raw();
285 thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
286 bool pre_lock = false;
287 if (common_flags()->detect_deadlocks) {
288 pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
289 !(flagz & MutexFlagTryLock);
290 Callback cb(thr, pc);
292 ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
293 ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
295 u64 mid = s->GetId();
297 // Can't touch s after this point.
300 ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
301 if (pre_lock && common_flags()->detect_deadlocks) {
302 Callback cb(thr, pc);
303 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
307 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
308 DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
309 StatInc(thr, StatMutexReadUnlock);
311 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
312 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
313 thr->fast_state.IncrementEpoch();
314 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
315 bool report_bad_unlock = false;
316 if (s->owner_tid != SyncVar::kInvalidTid) {
317 if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
318 s->SetFlags(MutexFlagBroken);
319 report_bad_unlock = true;
322 ReleaseImpl(thr, pc, &s->read_clock);
323 if (common_flags()->detect_deadlocks && s->recursion == 0) {
324 Callback cb(thr, pc);
325 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
327 u64 mid = s->GetId();
329 // Can't touch s after this point.
330 thr->mset.Del(mid, false);
331 if (report_bad_unlock)
332 ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
333 if (common_flags()->detect_deadlocks) {
334 Callback cb(thr, pc);
335 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
339 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
340 DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
342 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
343 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
345 bool report_bad_unlock = false;
346 if (s->owner_tid == SyncVar::kInvalidTid) {
347 // Seems to be read unlock.
349 StatInc(thr, StatMutexReadUnlock);
350 thr->fast_state.IncrementEpoch();
351 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
352 ReleaseImpl(thr, pc, &s->read_clock);
353 } else if (s->owner_tid == thr->tid) {
354 // Seems to be write unlock.
355 thr->fast_state.IncrementEpoch();
356 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
357 CHECK_GT(s->recursion, 0);
359 if (s->recursion == 0) {
360 StatInc(thr, StatMutexUnlock);
361 s->owner_tid = SyncVar::kInvalidTid;
362 ReleaseImpl(thr, pc, &s->clock);
364 StatInc(thr, StatMutexRecUnlock);
366 } else if (!s->IsFlagSet(MutexFlagBroken)) {
367 s->SetFlags(MutexFlagBroken);
368 report_bad_unlock = true;
370 thr->mset.Del(s->GetId(), write);
371 if (common_flags()->detect_deadlocks && s->recursion == 0) {
372 Callback cb(thr, pc);
373 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
375 u64 mid = s->GetId();
377 // Can't touch s after this point.
378 if (report_bad_unlock)
379 ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
380 if (common_flags()->detect_deadlocks) {
381 Callback cb(thr, pc);
382 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
386 void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
387 DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
388 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
389 s->owner_tid = SyncVar::kInvalidTid;
394 void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
395 DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
396 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
397 u64 mid = s->GetId();
399 ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, mid);
402 void Acquire(ThreadState *thr, uptr pc, uptr addr) {
403 DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
404 if (thr->ignore_sync)
406 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, false);
409 AcquireImpl(thr, pc, &s->clock);
413 static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
414 ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
415 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
416 if (tctx->status == ThreadStatusRunning)
417 thr->clock.set(tctx->tid, tctx->thr->fast_state.epoch());
419 thr->clock.set(tctx->tid, tctx->epoch1);
422 void AcquireGlobal(ThreadState *thr, uptr pc) {
423 DPrintf("#%d: AcquireGlobal\n", thr->tid);
424 if (thr->ignore_sync)
426 ThreadRegistryLock l(ctx->thread_registry);
427 ctx->thread_registry->RunCallbackForEachThreadLocked(
428 UpdateClockCallback, thr);
431 void Release(ThreadState *thr, uptr pc, uptr addr) {
432 DPrintf("#%d: Release %zx\n", thr->tid, addr);
433 if (thr->ignore_sync)
435 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
436 thr->fast_state.IncrementEpoch();
437 // Can't increment epoch w/o writing to the trace as well.
438 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
439 ReleaseImpl(thr, pc, &s->clock);
443 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
444 DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
445 if (thr->ignore_sync)
447 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
448 thr->fast_state.IncrementEpoch();
449 // Can't increment epoch w/o writing to the trace as well.
450 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
451 ReleaseStoreImpl(thr, pc, &s->clock);
456 static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
457 ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
458 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
459 if (tctx->status == ThreadStatusRunning)
460 thr->last_sleep_clock.set(tctx->tid, tctx->thr->fast_state.epoch());
462 thr->last_sleep_clock.set(tctx->tid, tctx->epoch1);
465 void AfterSleep(ThreadState *thr, uptr pc) {
466 DPrintf("#%d: AfterSleep %zx\n", thr->tid);
467 if (thr->ignore_sync)
469 thr->last_sleep_stack_id = CurrentStackId(thr, pc);
470 ThreadRegistryLock l(ctx->thread_registry);
471 ctx->thread_registry->RunCallbackForEachThreadLocked(
472 UpdateSleepClockCallback, thr);
476 void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
477 if (thr->ignore_sync)
479 thr->clock.set(thr->fast_state.epoch());
480 thr->clock.acquire(&thr->proc()->clock_cache, c);
481 StatInc(thr, StatSyncAcquire);
484 void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
485 if (thr->ignore_sync)
487 thr->clock.set(thr->fast_state.epoch());
488 thr->fast_synch_epoch = thr->fast_state.epoch();
489 thr->clock.release(&thr->proc()->clock_cache, c);
490 StatInc(thr, StatSyncRelease);
493 void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
494 if (thr->ignore_sync)
496 thr->clock.set(thr->fast_state.epoch());
497 thr->fast_synch_epoch = thr->fast_state.epoch();
498 thr->clock.ReleaseStore(&thr->proc()->clock_cache, c);
499 StatInc(thr, StatSyncRelease);
502 void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
503 if (thr->ignore_sync)
505 thr->clock.set(thr->fast_state.epoch());
506 thr->fast_synch_epoch = thr->fast_state.epoch();
507 thr->clock.acq_rel(&thr->proc()->clock_cache, c);
508 StatInc(thr, StatSyncAcquire);
509 StatInc(thr, StatSyncRelease);
512 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
515 ThreadRegistryLock l(ctx->thread_registry);
516 ScopedReport rep(ReportTypeDeadlock);
517 for (int i = 0; i < r->n; i++) {
518 rep.AddMutex(r->loop[i].mtx_ctx0);
519 rep.AddUniqueTid((int)r->loop[i].thr_ctx);
520 rep.AddThread((int)r->loop[i].thr_ctx);
522 uptr dummy_pc = 0x42;
523 for (int i = 0; i < r->n; i++) {
524 for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
525 u32 stk = r->loop[i].stk[j];
526 if (stk && stk != 0xffffffff) {
527 rep.AddStack(StackDepotGet(stk), true);
529 // Sometimes we fail to extract the stack trace (FIXME: investigate),
530 // but we should still produce some stack trace in the report.
531 rep.AddStack(StackTrace(&dummy_pc, 1), true);
535 OutputReport(thr, rep);
538 } // namespace __tsan