1 //===-- tsan_rtl_mutex.cc -------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 //===----------------------------------------------------------------------===//
14 #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
15 #include <sanitizer_common/sanitizer_stackdepot.h>
18 #include "tsan_flags.h"
19 #include "tsan_sync.h"
20 #include "tsan_report.h"
21 #include "tsan_symbolize.h"
22 #include "tsan_platform.h"
26 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
28 struct Callback : DDCallback {
32 Callback(ThreadState *thr, uptr pc)
35 DDCallback::pt = thr->dd_pt;
36 DDCallback::lt = thr->dd_lt;
39 u32 Unwind() override { return CurrentStackId(thr, pc); }
40 int UniqueTid() override { return thr->unique_id; }
43 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
45 ctx->dd->MutexInit(&cb, &s->dd);
46 s->dd.ctx = s->GetId();
49 static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
51 // In Go, these misuses are either impossible, or detected by std lib,
52 // or false positives (e.g. unlock in a different thread).
55 ThreadRegistryLock l(ctx->thread_registry);
56 ScopedReport rep(typ);
58 VarSizeStackTrace trace;
59 ObtainCurrentStack(thr, pc, &trace);
60 rep.AddStack(trace, true);
61 rep.AddLocation(addr, 1);
62 OutputReport(thr, rep);
65 void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
66 bool rw, bool recursive, bool linker_init) {
67 DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
68 StatInc(thr, StatMutexCreate);
69 if (!linker_init && IsAppMem(addr)) {
70 CHECK(!thr->is_freeing);
71 thr->is_freeing = true;
72 MemoryWrite(thr, pc, addr, kSizeLog1);
73 thr->is_freeing = false;
75 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
77 s->is_recursive = recursive;
78 s->is_linker_init = linker_init;
79 if (kCppMode && s->creation_stack_id == 0)
80 s->creation_stack_id = CurrentStackId(thr, pc);
84 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
85 DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
86 StatInc(thr, StatMutexDestroy);
88 // Global mutexes not marked as LINKER_INITIALIZED
89 // cause tons of not interesting reports, so just ignore it.
90 if (IsGlobalVar(addr))
94 CHECK(!thr->is_freeing);
95 thr->is_freeing = true;
96 MemoryWrite(thr, pc, addr, kSizeLog1);
97 thr->is_freeing = false;
99 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
102 if (common_flags()->detect_deadlocks) {
103 Callback cb(thr, pc);
104 ctx->dd->MutexDestroy(&cb, &s->dd);
105 ctx->dd->MutexInit(&cb, &s->dd);
107 bool unlock_locked = false;
108 if (flags()->report_destroy_locked
109 && s->owner_tid != SyncVar::kInvalidTid
112 unlock_locked = true;
114 u64 mid = s->GetId();
115 u32 last_lock = s->last_lock;
117 s->Reset(thr); // must not reset it before the report is printed
120 ThreadRegistryLock l(ctx->thread_registry);
121 ScopedReport rep(ReportTypeMutexDestroyLocked);
123 VarSizeStackTrace trace;
124 ObtainCurrentStack(thr, pc, &trace);
126 FastState last(last_lock);
127 RestoreStack(last.tid(), last.epoch(), &trace, 0);
128 rep.AddStack(trace, true);
129 rep.AddLocation(addr, 1);
130 OutputReport(thr, rep);
133 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
139 thr->mset.Remove(mid);
140 // s will be destroyed and freed in MetaMap::FreeBlock.
143 void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) {
144 DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec);
147 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
148 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
149 thr->fast_state.IncrementEpoch();
150 TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
151 bool report_double_lock = false;
152 if (s->owner_tid == SyncVar::kInvalidTid) {
153 CHECK_EQ(s->recursion, 0);
154 s->owner_tid = thr->tid;
155 s->last_lock = thr->fast_state.raw();
156 } else if (s->owner_tid == thr->tid) {
157 CHECK_GT(s->recursion, 0);
158 } else if (flags()->report_mutex_bugs && !s->is_broken) {
160 report_double_lock = true;
162 if (s->recursion == 0) {
163 StatInc(thr, StatMutexLock);
164 AcquireImpl(thr, pc, &s->clock);
165 AcquireImpl(thr, pc, &s->read_clock);
166 } else if (!s->is_recursive) {
167 StatInc(thr, StatMutexRecLock);
170 thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
171 if (common_flags()->detect_deadlocks && (s->recursion - rec) == 0) {
172 Callback cb(thr, pc);
174 ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
175 ctx->dd->MutexAfterLock(&cb, &s->dd, true, try_lock);
177 u64 mid = s->GetId();
179 // Can't touch s after this point.
180 if (report_double_lock)
181 ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
182 if (common_flags()->detect_deadlocks) {
183 Callback cb(thr, pc);
184 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
188 int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
189 DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
191 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
192 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
193 thr->fast_state.IncrementEpoch();
194 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
196 bool report_bad_unlock = false;
197 if (kCppMode && (s->recursion == 0 || s->owner_tid != thr->tid)) {
198 if (flags()->report_mutex_bugs && !s->is_broken) {
200 report_bad_unlock = true;
203 rec = all ? s->recursion : 1;
205 if (s->recursion == 0) {
206 StatInc(thr, StatMutexUnlock);
207 s->owner_tid = SyncVar::kInvalidTid;
208 ReleaseStoreImpl(thr, pc, &s->clock);
210 StatInc(thr, StatMutexRecUnlock);
213 thr->mset.Del(s->GetId(), true);
214 if (common_flags()->detect_deadlocks && s->recursion == 0 &&
215 !report_bad_unlock) {
216 Callback cb(thr, pc);
217 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
219 u64 mid = s->GetId();
221 // Can't touch s after this point.
222 if (report_bad_unlock)
223 ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
224 if (common_flags()->detect_deadlocks && !report_bad_unlock) {
225 Callback cb(thr, pc);
226 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
231 void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool trylock) {
232 DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
233 StatInc(thr, StatMutexReadLock);
235 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
236 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
237 thr->fast_state.IncrementEpoch();
238 TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
239 bool report_bad_lock = false;
240 if (s->owner_tid != SyncVar::kInvalidTid) {
241 if (flags()->report_mutex_bugs && !s->is_broken) {
243 report_bad_lock = true;
246 AcquireImpl(thr, pc, &s->clock);
247 s->last_lock = thr->fast_state.raw();
248 thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
249 if (common_flags()->detect_deadlocks && s->recursion == 0) {
250 Callback cb(thr, pc);
252 ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
253 ctx->dd->MutexAfterLock(&cb, &s->dd, false, trylock);
255 u64 mid = s->GetId();
257 // Can't touch s after this point.
259 ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
260 if (common_flags()->detect_deadlocks) {
261 Callback cb(thr, pc);
262 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
266 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
267 DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
268 StatInc(thr, StatMutexReadUnlock);
270 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
271 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
272 thr->fast_state.IncrementEpoch();
273 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
274 bool report_bad_unlock = false;
275 if (s->owner_tid != SyncVar::kInvalidTid) {
276 if (flags()->report_mutex_bugs && !s->is_broken) {
278 report_bad_unlock = true;
281 ReleaseImpl(thr, pc, &s->read_clock);
282 if (common_flags()->detect_deadlocks && s->recursion == 0) {
283 Callback cb(thr, pc);
284 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
286 u64 mid = s->GetId();
288 // Can't touch s after this point.
289 thr->mset.Del(mid, false);
290 if (report_bad_unlock)
291 ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
292 if (common_flags()->detect_deadlocks) {
293 Callback cb(thr, pc);
294 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
298 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
299 DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
301 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
302 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
304 bool report_bad_unlock = false;
305 if (s->owner_tid == SyncVar::kInvalidTid) {
306 // Seems to be read unlock.
308 StatInc(thr, StatMutexReadUnlock);
309 thr->fast_state.IncrementEpoch();
310 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
311 ReleaseImpl(thr, pc, &s->read_clock);
312 } else if (s->owner_tid == thr->tid) {
313 // Seems to be write unlock.
314 thr->fast_state.IncrementEpoch();
315 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
316 CHECK_GT(s->recursion, 0);
318 if (s->recursion == 0) {
319 StatInc(thr, StatMutexUnlock);
320 s->owner_tid = SyncVar::kInvalidTid;
321 ReleaseImpl(thr, pc, &s->clock);
323 StatInc(thr, StatMutexRecUnlock);
325 } else if (!s->is_broken) {
327 report_bad_unlock = true;
329 thr->mset.Del(s->GetId(), write);
330 if (common_flags()->detect_deadlocks && s->recursion == 0) {
331 Callback cb(thr, pc);
332 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
334 u64 mid = s->GetId();
336 // Can't touch s after this point.
337 if (report_bad_unlock)
338 ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
339 if (common_flags()->detect_deadlocks) {
340 Callback cb(thr, pc);
341 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
345 void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
346 DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
347 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
348 s->owner_tid = SyncVar::kInvalidTid;
353 void Acquire(ThreadState *thr, uptr pc, uptr addr) {
354 DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
355 if (thr->ignore_sync)
357 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
358 AcquireImpl(thr, pc, &s->clock);
362 static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
363 ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
364 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
365 if (tctx->status == ThreadStatusRunning)
366 thr->clock.set(tctx->tid, tctx->thr->fast_state.epoch());
368 thr->clock.set(tctx->tid, tctx->epoch1);
371 void AcquireGlobal(ThreadState *thr, uptr pc) {
372 DPrintf("#%d: AcquireGlobal\n", thr->tid);
373 if (thr->ignore_sync)
375 ThreadRegistryLock l(ctx->thread_registry);
376 ctx->thread_registry->RunCallbackForEachThreadLocked(
377 UpdateClockCallback, thr);
380 void Release(ThreadState *thr, uptr pc, uptr addr) {
381 DPrintf("#%d: Release %zx\n", thr->tid, addr);
382 if (thr->ignore_sync)
384 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
385 thr->fast_state.IncrementEpoch();
386 // Can't increment epoch w/o writing to the trace as well.
387 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
388 ReleaseImpl(thr, pc, &s->clock);
392 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
393 DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
394 if (thr->ignore_sync)
396 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
397 thr->fast_state.IncrementEpoch();
398 // Can't increment epoch w/o writing to the trace as well.
399 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
400 ReleaseStoreImpl(thr, pc, &s->clock);
405 static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
406 ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
407 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
408 if (tctx->status == ThreadStatusRunning)
409 thr->last_sleep_clock.set(tctx->tid, tctx->thr->fast_state.epoch());
411 thr->last_sleep_clock.set(tctx->tid, tctx->epoch1);
414 void AfterSleep(ThreadState *thr, uptr pc) {
415 DPrintf("#%d: AfterSleep %zx\n", thr->tid);
416 if (thr->ignore_sync)
418 thr->last_sleep_stack_id = CurrentStackId(thr, pc);
419 ThreadRegistryLock l(ctx->thread_registry);
420 ctx->thread_registry->RunCallbackForEachThreadLocked(
421 UpdateSleepClockCallback, thr);
425 void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
426 if (thr->ignore_sync)
428 thr->clock.set(thr->fast_state.epoch());
429 thr->clock.acquire(&thr->clock_cache, c);
430 StatInc(thr, StatSyncAcquire);
433 void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
434 if (thr->ignore_sync)
436 thr->clock.set(thr->fast_state.epoch());
437 thr->fast_synch_epoch = thr->fast_state.epoch();
438 thr->clock.release(&thr->clock_cache, c);
439 StatInc(thr, StatSyncRelease);
442 void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
443 if (thr->ignore_sync)
445 thr->clock.set(thr->fast_state.epoch());
446 thr->fast_synch_epoch = thr->fast_state.epoch();
447 thr->clock.ReleaseStore(&thr->clock_cache, c);
448 StatInc(thr, StatSyncRelease);
451 void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
452 if (thr->ignore_sync)
454 thr->clock.set(thr->fast_state.epoch());
455 thr->fast_synch_epoch = thr->fast_state.epoch();
456 thr->clock.acq_rel(&thr->clock_cache, c);
457 StatInc(thr, StatSyncAcquire);
458 StatInc(thr, StatSyncRelease);
461 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
464 ThreadRegistryLock l(ctx->thread_registry);
465 ScopedReport rep(ReportTypeDeadlock);
466 for (int i = 0; i < r->n; i++) {
467 rep.AddMutex(r->loop[i].mtx_ctx0);
468 rep.AddUniqueTid((int)r->loop[i].thr_ctx);
469 rep.AddThread((int)r->loop[i].thr_ctx);
471 uptr dummy_pc = 0x42;
472 for (int i = 0; i < r->n; i++) {
473 for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
474 u32 stk = r->loop[i].stk[j];
476 rep.AddStack(StackDepotGet(stk), true);
478 // Sometimes we fail to extract the stack trace (FIXME: investigate),
479 // but we should still produce some stack trace in the report.
480 rep.AddStack(StackTrace(&dummy_pc, 1), true);
484 OutputReport(thr, rep);
487 } // namespace __tsan