1 //===-- tsan_rtl_mutex.cc -------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 //===----------------------------------------------------------------------===//
14 #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
15 #include <sanitizer_common/sanitizer_stackdepot.h>
18 #include "tsan_flags.h"
19 #include "tsan_sync.h"
20 #include "tsan_report.h"
21 #include "tsan_symbolize.h"
22 #include "tsan_platform.h"
26 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
28 struct Callback : DDCallback {
32 Callback(ThreadState *thr, uptr pc)
35 DDCallback::pt = thr->proc()->dd_pt;
36 DDCallback::lt = thr->dd_lt;
39 u32 Unwind() override { return CurrentStackId(thr, pc); }
40 int UniqueTid() override { return thr->unique_id; }
43 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
45 ctx->dd->MutexInit(&cb, &s->dd);
46 s->dd.ctx = s->GetId();
49 static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
51 // In Go, these misuses are either impossible, or detected by std lib,
52 // or false positives (e.g. unlock in a different thread).
55 ThreadRegistryLock l(ctx->thread_registry);
56 ScopedReport rep(typ);
58 VarSizeStackTrace trace;
59 ObtainCurrentStack(thr, pc, &trace);
60 rep.AddStack(trace, true);
61 rep.AddLocation(addr, 1);
62 OutputReport(thr, rep);
65 void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
66 DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz);
67 StatInc(thr, StatMutexCreate);
68 if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) {
69 CHECK(!thr->is_freeing);
70 thr->is_freeing = true;
71 MemoryWrite(thr, pc, addr, kSizeLog1);
72 thr->is_freeing = false;
74 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
75 s->SetFlags(flagz & MutexCreationFlagMask);
76 if (!SANITIZER_GO && s->creation_stack_id == 0)
77 s->creation_stack_id = CurrentStackId(thr, pc);
81 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
82 DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
83 StatInc(thr, StatMutexDestroy);
84 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
87 if ((flagz & MutexFlagLinkerInit)
88 || s->IsFlagSet(MutexFlagLinkerInit)
89 || ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(MutexFlagNotStatic))) {
90 // Destroy is no-op for linker-initialized mutexes.
94 if (common_flags()->detect_deadlocks) {
96 ctx->dd->MutexDestroy(&cb, &s->dd);
97 ctx->dd->MutexInit(&cb, &s->dd);
99 bool unlock_locked = false;
100 if (flags()->report_destroy_locked
101 && s->owner_tid != SyncVar::kInvalidTid
102 && !s->IsFlagSet(MutexFlagBroken)) {
103 s->SetFlags(MutexFlagBroken);
104 unlock_locked = true;
106 u64 mid = s->GetId();
107 u32 last_lock = s->last_lock;
109 s->Reset(thr->proc()); // must not reset it before the report is printed
112 ThreadRegistryLock l(ctx->thread_registry);
113 ScopedReport rep(ReportTypeMutexDestroyLocked);
115 VarSizeStackTrace trace;
116 ObtainCurrentStack(thr, pc, &trace);
118 FastState last(last_lock);
119 RestoreStack(last.tid(), last.epoch(), &trace, 0);
120 rep.AddStack(trace, true);
121 rep.AddLocation(addr, 1);
122 OutputReport(thr, rep);
124 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
126 s->Reset(thr->proc());
130 thr->mset.Remove(mid);
131 // Imitate a memory write to catch unlock-destroy races.
132 // Do this outside of sync mutex, because it can report a race which locks
134 if (IsAppMem(addr)) {
135 CHECK(!thr->is_freeing);
136 thr->is_freeing = true;
137 MemoryWrite(thr, pc, addr, kSizeLog1);
138 thr->is_freeing = false;
140 // s will be destroyed and freed in MetaMap::FreeBlock.
143 void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
144 DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
145 if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
146 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
147 s->UpdateFlags(flagz);
148 if (s->owner_tid != thr->tid) {
149 Callback cb(thr, pc);
150 ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
152 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
159 void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
160 DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
161 thr->tid, addr, flagz, rec);
162 if (flagz & MutexFlagRecursiveLock)
167 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
168 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
169 s->UpdateFlags(flagz);
170 thr->fast_state.IncrementEpoch();
171 TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
172 bool report_double_lock = false;
173 if (s->owner_tid == SyncVar::kInvalidTid) {
174 CHECK_EQ(s->recursion, 0);
175 s->owner_tid = thr->tid;
176 s->last_lock = thr->fast_state.raw();
177 } else if (s->owner_tid == thr->tid) {
178 CHECK_GT(s->recursion, 0);
179 } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
180 s->SetFlags(MutexFlagBroken);
181 report_double_lock = true;
183 const bool first = s->recursion == 0;
186 StatInc(thr, StatMutexLock);
187 AcquireImpl(thr, pc, &s->clock);
188 AcquireImpl(thr, pc, &s->read_clock);
189 } else if (!s->IsFlagSet(MutexFlagWriteReentrant)) {
190 StatInc(thr, StatMutexRecLock);
192 thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
193 bool pre_lock = false;
194 if (first && common_flags()->detect_deadlocks) {
195 pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
196 !(flagz & MutexFlagTryLock);
197 Callback cb(thr, pc);
199 ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
200 ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
202 u64 mid = s->GetId();
204 // Can't touch s after this point.
206 if (report_double_lock)
207 ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
208 if (first && pre_lock && common_flags()->detect_deadlocks) {
209 Callback cb(thr, pc);
210 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
214 int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
215 DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
217 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
218 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
219 thr->fast_state.IncrementEpoch();
220 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
222 bool report_bad_unlock = false;
223 if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
224 if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
225 s->SetFlags(MutexFlagBroken);
226 report_bad_unlock = true;
229 rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
231 if (s->recursion == 0) {
232 StatInc(thr, StatMutexUnlock);
233 s->owner_tid = SyncVar::kInvalidTid;
234 ReleaseStoreImpl(thr, pc, &s->clock);
236 StatInc(thr, StatMutexRecUnlock);
239 thr->mset.Del(s->GetId(), true);
240 if (common_flags()->detect_deadlocks && s->recursion == 0 &&
241 !report_bad_unlock) {
242 Callback cb(thr, pc);
243 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
245 u64 mid = s->GetId();
247 // Can't touch s after this point.
248 if (report_bad_unlock)
249 ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
250 if (common_flags()->detect_deadlocks && !report_bad_unlock) {
251 Callback cb(thr, pc);
252 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
257 void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
258 DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
259 if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
260 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
261 s->UpdateFlags(flagz);
262 Callback cb(thr, pc);
263 ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
265 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
269 void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
270 DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
271 StatInc(thr, StatMutexReadLock);
273 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
274 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
275 s->UpdateFlags(flagz);
276 thr->fast_state.IncrementEpoch();
277 TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
278 bool report_bad_lock = false;
279 if (s->owner_tid != SyncVar::kInvalidTid) {
280 if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
281 s->SetFlags(MutexFlagBroken);
282 report_bad_lock = true;
285 AcquireImpl(thr, pc, &s->clock);
286 s->last_lock = thr->fast_state.raw();
287 thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
288 bool pre_lock = false;
289 if (common_flags()->detect_deadlocks) {
290 pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
291 !(flagz & MutexFlagTryLock);
292 Callback cb(thr, pc);
294 ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
295 ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
297 u64 mid = s->GetId();
299 // Can't touch s after this point.
302 ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
303 if (pre_lock && common_flags()->detect_deadlocks) {
304 Callback cb(thr, pc);
305 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
309 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
310 DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
311 StatInc(thr, StatMutexReadUnlock);
313 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
314 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
315 thr->fast_state.IncrementEpoch();
316 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
317 bool report_bad_unlock = false;
318 if (s->owner_tid != SyncVar::kInvalidTid) {
319 if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
320 s->SetFlags(MutexFlagBroken);
321 report_bad_unlock = true;
324 ReleaseImpl(thr, pc, &s->read_clock);
325 if (common_flags()->detect_deadlocks && s->recursion == 0) {
326 Callback cb(thr, pc);
327 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
329 u64 mid = s->GetId();
331 // Can't touch s after this point.
332 thr->mset.Del(mid, false);
333 if (report_bad_unlock)
334 ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
335 if (common_flags()->detect_deadlocks) {
336 Callback cb(thr, pc);
337 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
341 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
342 DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
344 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
345 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
347 bool report_bad_unlock = false;
348 if (s->owner_tid == SyncVar::kInvalidTid) {
349 // Seems to be read unlock.
351 StatInc(thr, StatMutexReadUnlock);
352 thr->fast_state.IncrementEpoch();
353 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
354 ReleaseImpl(thr, pc, &s->read_clock);
355 } else if (s->owner_tid == thr->tid) {
356 // Seems to be write unlock.
357 thr->fast_state.IncrementEpoch();
358 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
359 CHECK_GT(s->recursion, 0);
361 if (s->recursion == 0) {
362 StatInc(thr, StatMutexUnlock);
363 s->owner_tid = SyncVar::kInvalidTid;
364 ReleaseImpl(thr, pc, &s->clock);
366 StatInc(thr, StatMutexRecUnlock);
368 } else if (!s->IsFlagSet(MutexFlagBroken)) {
369 s->SetFlags(MutexFlagBroken);
370 report_bad_unlock = true;
372 thr->mset.Del(s->GetId(), write);
373 if (common_flags()->detect_deadlocks && s->recursion == 0) {
374 Callback cb(thr, pc);
375 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
377 u64 mid = s->GetId();
379 // Can't touch s after this point.
380 if (report_bad_unlock)
381 ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
382 if (common_flags()->detect_deadlocks) {
383 Callback cb(thr, pc);
384 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
388 void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
389 DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
390 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
391 s->owner_tid = SyncVar::kInvalidTid;
396 void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
397 DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
398 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
399 u64 mid = s->GetId();
401 ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, mid);
404 void Acquire(ThreadState *thr, uptr pc, uptr addr) {
405 DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
406 if (thr->ignore_sync)
408 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, false);
411 AcquireImpl(thr, pc, &s->clock);
415 static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
416 ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
417 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
418 u64 epoch = tctx->epoch1;
419 if (tctx->status == ThreadStatusRunning)
420 epoch = tctx->thr->fast_state.epoch();
421 thr->clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
424 void AcquireGlobal(ThreadState *thr, uptr pc) {
425 DPrintf("#%d: AcquireGlobal\n", thr->tid);
426 if (thr->ignore_sync)
428 ThreadRegistryLock l(ctx->thread_registry);
429 ctx->thread_registry->RunCallbackForEachThreadLocked(
430 UpdateClockCallback, thr);
433 void Release(ThreadState *thr, uptr pc, uptr addr) {
434 DPrintf("#%d: Release %zx\n", thr->tid, addr);
435 if (thr->ignore_sync)
437 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
438 thr->fast_state.IncrementEpoch();
439 // Can't increment epoch w/o writing to the trace as well.
440 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
441 ReleaseImpl(thr, pc, &s->clock);
445 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
446 DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
447 if (thr->ignore_sync)
449 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
450 thr->fast_state.IncrementEpoch();
451 // Can't increment epoch w/o writing to the trace as well.
452 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
453 ReleaseStoreImpl(thr, pc, &s->clock);
458 static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
459 ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
460 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
461 u64 epoch = tctx->epoch1;
462 if (tctx->status == ThreadStatusRunning)
463 epoch = tctx->thr->fast_state.epoch();
464 thr->last_sleep_clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
467 void AfterSleep(ThreadState *thr, uptr pc) {
468 DPrintf("#%d: AfterSleep %zx\n", thr->tid);
469 if (thr->ignore_sync)
471 thr->last_sleep_stack_id = CurrentStackId(thr, pc);
472 ThreadRegistryLock l(ctx->thread_registry);
473 ctx->thread_registry->RunCallbackForEachThreadLocked(
474 UpdateSleepClockCallback, thr);
478 void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
479 if (thr->ignore_sync)
481 thr->clock.set(thr->fast_state.epoch());
482 thr->clock.acquire(&thr->proc()->clock_cache, c);
483 StatInc(thr, StatSyncAcquire);
486 void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
487 if (thr->ignore_sync)
489 thr->clock.set(thr->fast_state.epoch());
490 thr->fast_synch_epoch = thr->fast_state.epoch();
491 thr->clock.release(&thr->proc()->clock_cache, c);
492 StatInc(thr, StatSyncRelease);
495 void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
496 if (thr->ignore_sync)
498 thr->clock.set(thr->fast_state.epoch());
499 thr->fast_synch_epoch = thr->fast_state.epoch();
500 thr->clock.ReleaseStore(&thr->proc()->clock_cache, c);
501 StatInc(thr, StatSyncRelease);
504 void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
505 if (thr->ignore_sync)
507 thr->clock.set(thr->fast_state.epoch());
508 thr->fast_synch_epoch = thr->fast_state.epoch();
509 thr->clock.acq_rel(&thr->proc()->clock_cache, c);
510 StatInc(thr, StatSyncAcquire);
511 StatInc(thr, StatSyncRelease);
514 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
517 ThreadRegistryLock l(ctx->thread_registry);
518 ScopedReport rep(ReportTypeDeadlock);
519 for (int i = 0; i < r->n; i++) {
520 rep.AddMutex(r->loop[i].mtx_ctx0);
521 rep.AddUniqueTid((int)r->loop[i].thr_ctx);
522 rep.AddThread((int)r->loop[i].thr_ctx);
524 uptr dummy_pc = 0x42;
525 for (int i = 0; i < r->n; i++) {
526 for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
527 u32 stk = r->loop[i].stk[j];
528 if (stk && stk != 0xffffffff) {
529 rep.AddStack(StackDepotGet(stk), true);
531 // Sometimes we fail to extract the stack trace (FIXME: investigate),
532 // but we should still produce some stack trace in the report.
533 rep.AddStack(StackTrace(&dummy_pc, 1), true);
537 OutputReport(thr, rep);
540 } // namespace __tsan