1 //===-- tsan_rtl_mutex.cc -------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 //===----------------------------------------------------------------------===//
14 #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
15 #include <sanitizer_common/sanitizer_stackdepot.h>
18 #include "tsan_flags.h"
19 #include "tsan_sync.h"
20 #include "tsan_report.h"
21 #include "tsan_symbolize.h"
22 #include "tsan_platform.h"
26 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
28 struct Callback : DDCallback {
32 Callback(ThreadState *thr, uptr pc)
35 DDCallback::pt = thr->dd_pt;
36 DDCallback::lt = thr->dd_lt;
39 virtual u32 Unwind() {
40 return CurrentStackId(thr, pc);
42 virtual int UniqueTid() {
43 return thr->unique_id;
47 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
49 ctx->dd->MutexInit(&cb, &s->dd);
50 s->dd.ctx = s->GetId();
53 static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
55 // In Go, these misuses are either impossible, or detected by std lib,
56 // or false positives (e.g. unlock in a different thread).
59 ThreadRegistryLock l(ctx->thread_registry);
60 ScopedReport rep(typ);
62 VarSizeStackTrace trace;
63 ObtainCurrentStack(thr, pc, &trace);
64 rep.AddStack(trace, true);
65 rep.AddLocation(addr, 1);
66 OutputReport(thr, rep);
69 void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
70 bool rw, bool recursive, bool linker_init) {
71 DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
72 StatInc(thr, StatMutexCreate);
73 if (!linker_init && IsAppMem(addr)) {
74 CHECK(!thr->is_freeing);
75 thr->is_freeing = true;
76 MemoryWrite(thr, pc, addr, kSizeLog1);
77 thr->is_freeing = false;
79 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
81 s->is_recursive = recursive;
82 s->is_linker_init = linker_init;
83 if (kCppMode && s->creation_stack_id == 0)
84 s->creation_stack_id = CurrentStackId(thr, pc);
88 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
89 DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
90 StatInc(thr, StatMutexDestroy);
92 // Global mutexes not marked as LINKER_INITIALIZED
93 // cause tons of not interesting reports, so just ignore it.
94 if (IsGlobalVar(addr))
98 CHECK(!thr->is_freeing);
99 thr->is_freeing = true;
100 MemoryWrite(thr, pc, addr, kSizeLog1);
101 thr->is_freeing = false;
103 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
106 if (common_flags()->detect_deadlocks) {
107 Callback cb(thr, pc);
108 ctx->dd->MutexDestroy(&cb, &s->dd);
109 ctx->dd->MutexInit(&cb, &s->dd);
111 bool unlock_locked = false;
112 if (flags()->report_destroy_locked
113 && s->owner_tid != SyncVar::kInvalidTid
116 unlock_locked = true;
118 u64 mid = s->GetId();
119 u32 last_lock = s->last_lock;
121 s->Reset(thr); // must not reset it before the report is printed
124 ThreadRegistryLock l(ctx->thread_registry);
125 ScopedReport rep(ReportTypeMutexDestroyLocked);
127 VarSizeStackTrace trace;
128 ObtainCurrentStack(thr, pc, &trace);
130 FastState last(last_lock);
131 RestoreStack(last.tid(), last.epoch(), &trace, 0);
132 rep.AddStack(trace, true);
133 rep.AddLocation(addr, 1);
134 OutputReport(thr, rep);
137 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
143 thr->mset.Remove(mid);
144 // s will be destroyed and freed in MetaMap::FreeBlock.
147 void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) {
148 DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec);
151 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
152 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
153 thr->fast_state.IncrementEpoch();
154 TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
155 bool report_double_lock = false;
156 if (s->owner_tid == SyncVar::kInvalidTid) {
157 CHECK_EQ(s->recursion, 0);
158 s->owner_tid = thr->tid;
159 s->last_lock = thr->fast_state.raw();
160 } else if (s->owner_tid == thr->tid) {
161 CHECK_GT(s->recursion, 0);
162 } else if (flags()->report_mutex_bugs && !s->is_broken) {
164 report_double_lock = true;
166 if (s->recursion == 0) {
167 StatInc(thr, StatMutexLock);
168 AcquireImpl(thr, pc, &s->clock);
169 AcquireImpl(thr, pc, &s->read_clock);
170 } else if (!s->is_recursive) {
171 StatInc(thr, StatMutexRecLock);
174 thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
175 if (common_flags()->detect_deadlocks && (s->recursion - rec) == 0) {
176 Callback cb(thr, pc);
178 ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
179 ctx->dd->MutexAfterLock(&cb, &s->dd, true, try_lock);
181 u64 mid = s->GetId();
183 // Can't touch s after this point.
184 if (report_double_lock)
185 ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
186 if (common_flags()->detect_deadlocks) {
187 Callback cb(thr, pc);
188 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
192 int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
193 DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
195 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
196 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
197 thr->fast_state.IncrementEpoch();
198 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
200 bool report_bad_unlock = false;
201 if (kCppMode && (s->recursion == 0 || s->owner_tid != thr->tid)) {
202 if (flags()->report_mutex_bugs && !s->is_broken) {
204 report_bad_unlock = true;
207 rec = all ? s->recursion : 1;
209 if (s->recursion == 0) {
210 StatInc(thr, StatMutexUnlock);
211 s->owner_tid = SyncVar::kInvalidTid;
212 ReleaseStoreImpl(thr, pc, &s->clock);
214 StatInc(thr, StatMutexRecUnlock);
217 thr->mset.Del(s->GetId(), true);
218 if (common_flags()->detect_deadlocks && s->recursion == 0 &&
219 !report_bad_unlock) {
220 Callback cb(thr, pc);
221 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
223 u64 mid = s->GetId();
225 // Can't touch s after this point.
226 if (report_bad_unlock)
227 ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
228 if (common_flags()->detect_deadlocks && !report_bad_unlock) {
229 Callback cb(thr, pc);
230 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
235 void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool trylock) {
236 DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
237 StatInc(thr, StatMutexReadLock);
239 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
240 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
241 thr->fast_state.IncrementEpoch();
242 TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
243 bool report_bad_lock = false;
244 if (s->owner_tid != SyncVar::kInvalidTid) {
245 if (flags()->report_mutex_bugs && !s->is_broken) {
247 report_bad_lock = true;
250 AcquireImpl(thr, pc, &s->clock);
251 s->last_lock = thr->fast_state.raw();
252 thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
253 if (common_flags()->detect_deadlocks && s->recursion == 0) {
254 Callback cb(thr, pc);
256 ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
257 ctx->dd->MutexAfterLock(&cb, &s->dd, false, trylock);
259 u64 mid = s->GetId();
261 // Can't touch s after this point.
263 ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
264 if (common_flags()->detect_deadlocks) {
265 Callback cb(thr, pc);
266 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
270 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
271 DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
272 StatInc(thr, StatMutexReadUnlock);
274 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
275 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
276 thr->fast_state.IncrementEpoch();
277 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
278 bool report_bad_unlock = false;
279 if (s->owner_tid != SyncVar::kInvalidTid) {
280 if (flags()->report_mutex_bugs && !s->is_broken) {
282 report_bad_unlock = true;
285 ReleaseImpl(thr, pc, &s->read_clock);
286 if (common_flags()->detect_deadlocks && s->recursion == 0) {
287 Callback cb(thr, pc);
288 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
290 u64 mid = s->GetId();
292 // Can't touch s after this point.
293 thr->mset.Del(mid, false);
294 if (report_bad_unlock)
295 ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
296 if (common_flags()->detect_deadlocks) {
297 Callback cb(thr, pc);
298 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
302 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
303 DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
305 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
306 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
308 bool report_bad_unlock = false;
309 if (s->owner_tid == SyncVar::kInvalidTid) {
310 // Seems to be read unlock.
312 StatInc(thr, StatMutexReadUnlock);
313 thr->fast_state.IncrementEpoch();
314 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
315 ReleaseImpl(thr, pc, &s->read_clock);
316 } else if (s->owner_tid == thr->tid) {
317 // Seems to be write unlock.
318 thr->fast_state.IncrementEpoch();
319 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
320 CHECK_GT(s->recursion, 0);
322 if (s->recursion == 0) {
323 StatInc(thr, StatMutexUnlock);
324 s->owner_tid = SyncVar::kInvalidTid;
325 ReleaseImpl(thr, pc, &s->clock);
327 StatInc(thr, StatMutexRecUnlock);
329 } else if (!s->is_broken) {
331 report_bad_unlock = true;
333 thr->mset.Del(s->GetId(), write);
334 if (common_flags()->detect_deadlocks && s->recursion == 0) {
335 Callback cb(thr, pc);
336 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
338 u64 mid = s->GetId();
340 // Can't touch s after this point.
341 if (report_bad_unlock)
342 ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
343 if (common_flags()->detect_deadlocks) {
344 Callback cb(thr, pc);
345 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
349 void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
350 DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
351 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
352 s->owner_tid = SyncVar::kInvalidTid;
357 void Acquire(ThreadState *thr, uptr pc, uptr addr) {
358 DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
359 if (thr->ignore_sync)
361 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
362 AcquireImpl(thr, pc, &s->clock);
366 static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
367 ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
368 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
369 if (tctx->status == ThreadStatusRunning)
370 thr->clock.set(tctx->tid, tctx->thr->fast_state.epoch());
372 thr->clock.set(tctx->tid, tctx->epoch1);
375 void AcquireGlobal(ThreadState *thr, uptr pc) {
376 DPrintf("#%d: AcquireGlobal\n", thr->tid);
377 if (thr->ignore_sync)
379 ThreadRegistryLock l(ctx->thread_registry);
380 ctx->thread_registry->RunCallbackForEachThreadLocked(
381 UpdateClockCallback, thr);
384 void Release(ThreadState *thr, uptr pc, uptr addr) {
385 DPrintf("#%d: Release %zx\n", thr->tid, addr);
386 if (thr->ignore_sync)
388 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
389 thr->fast_state.IncrementEpoch();
390 // Can't increment epoch w/o writing to the trace as well.
391 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
392 ReleaseImpl(thr, pc, &s->clock);
396 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
397 DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
398 if (thr->ignore_sync)
400 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
401 thr->fast_state.IncrementEpoch();
402 // Can't increment epoch w/o writing to the trace as well.
403 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
404 ReleaseStoreImpl(thr, pc, &s->clock);
409 static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
410 ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
411 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
412 if (tctx->status == ThreadStatusRunning)
413 thr->last_sleep_clock.set(tctx->tid, tctx->thr->fast_state.epoch());
415 thr->last_sleep_clock.set(tctx->tid, tctx->epoch1);
418 void AfterSleep(ThreadState *thr, uptr pc) {
419 DPrintf("#%d: AfterSleep %zx\n", thr->tid);
420 if (thr->ignore_sync)
422 thr->last_sleep_stack_id = CurrentStackId(thr, pc);
423 ThreadRegistryLock l(ctx->thread_registry);
424 ctx->thread_registry->RunCallbackForEachThreadLocked(
425 UpdateSleepClockCallback, thr);
429 void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
430 if (thr->ignore_sync)
432 thr->clock.set(thr->fast_state.epoch());
433 thr->clock.acquire(&thr->clock_cache, c);
434 StatInc(thr, StatSyncAcquire);
437 void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
438 if (thr->ignore_sync)
440 thr->clock.set(thr->fast_state.epoch());
441 thr->fast_synch_epoch = thr->fast_state.epoch();
442 thr->clock.release(&thr->clock_cache, c);
443 StatInc(thr, StatSyncRelease);
446 void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
447 if (thr->ignore_sync)
449 thr->clock.set(thr->fast_state.epoch());
450 thr->fast_synch_epoch = thr->fast_state.epoch();
451 thr->clock.ReleaseStore(&thr->clock_cache, c);
452 StatInc(thr, StatSyncRelease);
455 void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
456 if (thr->ignore_sync)
458 thr->clock.set(thr->fast_state.epoch());
459 thr->fast_synch_epoch = thr->fast_state.epoch();
460 thr->clock.acq_rel(&thr->clock_cache, c);
461 StatInc(thr, StatSyncAcquire);
462 StatInc(thr, StatSyncRelease);
465 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
468 ThreadRegistryLock l(ctx->thread_registry);
469 ScopedReport rep(ReportTypeDeadlock);
470 for (int i = 0; i < r->n; i++) {
471 rep.AddMutex(r->loop[i].mtx_ctx0);
472 rep.AddUniqueTid((int)r->loop[i].thr_ctx);
473 rep.AddThread((int)r->loop[i].thr_ctx);
475 uptr dummy_pc = 0x42;
476 for (int i = 0; i < r->n; i++) {
477 for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
478 u32 stk = r->loop[i].stk[j];
480 rep.AddStack(StackDepotGet(stk), true);
482 // Sometimes we fail to extract the stack trace (FIXME: investigate),
483 // but we should still produce some stack trace in the report.
484 rep.AddStack(StackTrace(&dummy_pc, 1), true);
488 OutputReport(thr, rep);
491 } // namespace __tsan