1 //===-- tsan_rtl_report.cc ------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_libc.h"
15 #include "sanitizer_common/sanitizer_placement_new.h"
16 #include "sanitizer_common/sanitizer_stackdepot.h"
17 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_common/sanitizer_stacktrace.h"
19 #include "tsan_platform.h"
21 #include "tsan_suppressions.h"
22 #include "tsan_symbolize.h"
23 #include "tsan_report.h"
24 #include "tsan_sync.h"
25 #include "tsan_mman.h"
26 #include "tsan_flags.h"
31 using namespace __sanitizer; // NOLINT
33 static ReportStack *SymbolizeStack(StackTrace trace);
35 void TsanCheckFailed(const char *file, int line, const char *cond,
37 // There is high probability that interceptors will check-fail as well,
38 // on the other hand there is no sense in processing interceptors
39 // since we are going to die soon.
40 ScopedIgnoreInterceptors ignore;
41 Printf("FATAL: ThreadSanitizer CHECK failed: "
42 "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
43 file, line, cond, (uptr)v1, (uptr)v2);
44 PrintCurrentStackSlow(StackTrace::GetCurrentPc());
48 // Can be overriden by an application/test to intercept reports.
49 #ifdef TSAN_EXTERNAL_HOOKS
50 bool OnReport(const ReportDesc *rep, bool suppressed);
52 SANITIZER_INTERFACE_ATTRIBUTE
53 bool WEAK OnReport(const ReportDesc *rep, bool suppressed) {
59 static void StackStripMain(SymbolizedStack *frames) {
60 SymbolizedStack *last_frame = nullptr;
61 SymbolizedStack *last_frame2 = nullptr;
62 for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
63 last_frame2 = last_frame;
70 const char *last = last_frame->info.function;
71 const char *last2 = last_frame2->info.function;
72 // Strip frame above 'main'
73 if (last2 && 0 == internal_strcmp(last2, "main")) {
74 last_frame->ClearAll();
75 last_frame2->next = nullptr;
76 // Strip our internal thread start routine.
77 } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
78 last_frame->ClearAll();
79 last_frame2->next = nullptr;
80 // Strip global ctors init.
81 } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
82 last_frame->ClearAll();
83 last_frame2->next = nullptr;
84 // If both are 0, then we probably just failed to symbolize.
85 } else if (last || last2) {
86 // Ensure that we recovered stack completely. Trimmed stack
87 // can actually happen if we do not instrument some code,
88 // so it's only a debug print. However we must try hard to not miss it
90 DPrintf("Bottom stack frame of stack %zx is missed\n", stack->info.address);
93 // The last frame always point into runtime (gosched0, goexit0, runtime.main).
94 last_frame->ClearAll();
95 last_frame2->next = nullptr;
99 ReportStack *SymbolizeStackId(u32 stack_id) {
102 StackTrace stack = StackDepotGet(stack_id);
103 if (stack.trace == nullptr)
105 return SymbolizeStack(stack);
108 static ReportStack *SymbolizeStack(StackTrace trace) {
111 SymbolizedStack *top = nullptr;
112 for (uptr si = 0; si < trace.size; si++) {
113 const uptr pc = trace.trace[si];
116 // We obtain the return address, but we're interested in the previous
118 if ((pc & kExternalPCBit) == 0)
119 pc1 = StackTrace::GetPreviousInstructionPc(pc);
121 // FIXME(dvyukov): Go sometimes uses address of a function as top pc.
122 if (si != trace.size - 1)
125 SymbolizedStack *ent = SymbolizeCode(pc1);
127 SymbolizedStack *last = ent;
129 last->info.address = pc; // restore original pc for report
132 last->info.address = pc; // restore original pc for report
138 ReportStack *stack = ReportStack::New();
143 ScopedReport::ScopedReport(ReportType typ) {
144 ctx->thread_registry->CheckLocked();
145 void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
146 rep_ = new(mem) ReportDesc;
148 ctx->report_mtx.Lock();
149 CommonSanitizerReportMutex.Lock();
152 ScopedReport::~ScopedReport() {
153 CommonSanitizerReportMutex.Unlock();
154 ctx->report_mtx.Unlock();
155 DestroyAndFree(rep_);
158 void ScopedReport::AddStack(StackTrace stack, bool suppressable) {
159 ReportStack **rs = rep_->stacks.PushBack();
160 *rs = SymbolizeStack(stack);
161 (*rs)->suppressable = suppressable;
164 void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
165 const MutexSet *mset) {
166 void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
167 ReportMop *mop = new(mem) ReportMop;
168 rep_->mops.PushBack(mop);
170 mop->addr = addr + s.addr0();
171 mop->size = s.size();
172 mop->write = s.IsWrite();
173 mop->atomic = s.IsAtomic();
174 mop->stack = SymbolizeStack(stack);
176 mop->stack->suppressable = true;
177 for (uptr i = 0; i < mset->Size(); i++) {
178 MutexSet::Desc d = mset->Get(i);
179 u64 mid = this->AddMutex(d.id);
180 ReportMopMutex mtx = {mid, d.write};
181 mop->mset.PushBack(mtx);
185 void ScopedReport::AddUniqueTid(int unique_tid) {
186 rep_->unique_tids.PushBack(unique_tid);
189 void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) {
190 for (uptr i = 0; i < rep_->threads.Size(); i++) {
191 if ((u32)rep_->threads[i]->id == tctx->tid)
194 void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
195 ReportThread *rt = new(mem) ReportThread();
196 rep_->threads.PushBack(rt);
198 rt->pid = tctx->os_id;
199 rt->running = (tctx->status == ThreadStatusRunning);
200 rt->name = internal_strdup(tctx->name);
201 rt->parent_tid = tctx->parent_tid;
203 rt->stack = SymbolizeStackId(tctx->creation_stack_id);
205 rt->stack->suppressable = suppressable;
209 static ThreadContext *FindThreadByUidLocked(int unique_id) {
210 ctx->thread_registry->CheckLocked();
211 for (unsigned i = 0; i < kMaxTid; i++) {
212 ThreadContext *tctx = static_cast<ThreadContext*>(
213 ctx->thread_registry->GetThreadLocked(i));
214 if (tctx && tctx->unique_id == (u32)unique_id) {
221 static ThreadContext *FindThreadByTidLocked(int tid) {
222 ctx->thread_registry->CheckLocked();
223 return static_cast<ThreadContext*>(
224 ctx->thread_registry->GetThreadLocked(tid));
227 static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
228 uptr addr = (uptr)arg;
229 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
230 if (tctx->status != ThreadStatusRunning)
232 ThreadState *thr = tctx->thr;
234 return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) ||
235 (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size));
238 ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
239 ctx->thread_registry->CheckLocked();
240 ThreadContext *tctx = static_cast<ThreadContext*>(
241 ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls,
245 ThreadState *thr = tctx->thr;
247 *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size);
252 void ScopedReport::AddThread(int unique_tid, bool suppressable) {
254 AddThread(FindThreadByUidLocked(unique_tid), suppressable);
258 void ScopedReport::AddMutex(const SyncVar *s) {
259 for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
260 if (rep_->mutexes[i]->id == s->uid)
263 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
264 ReportMutex *rm = new(mem) ReportMutex();
265 rep_->mutexes.PushBack(rm);
268 rm->destroyed = false;
269 rm->stack = SymbolizeStackId(s->creation_stack_id);
272 u64 ScopedReport::AddMutex(u64 id) {
275 uptr addr = SyncVar::SplitId(id, &uid);
276 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
277 // Check that the mutex is still alive.
278 // Another mutex can be created at the same address,
279 // so check uid as well.
280 if (s && s->CheckId(uid)) {
291 void ScopedReport::AddDeadMutex(u64 id) {
292 for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
293 if (rep_->mutexes[i]->id == id)
296 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
297 ReportMutex *rm = new(mem) ReportMutex();
298 rep_->mutexes.PushBack(rm);
301 rm->destroyed = true;
305 void ScopedReport::AddLocation(uptr addr, uptr size) {
312 if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
313 ReportLocation *loc = ReportLocation::New(ReportLocationFD);
315 loc->tid = creat_tid;
316 loc->stack = SymbolizeStackId(creat_stack);
317 rep_->locs.PushBack(loc);
318 ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
324 Allocator *a = allocator();
325 if (a->PointerIsMine((void*)addr)) {
326 void *block_begin = a->GetBlockBegin((void*)addr);
328 b = ctx->metamap.GetBlock((uptr)block_begin);
331 ThreadContext *tctx = FindThreadByTidLocked(b->tid);
332 ReportLocation *loc = ReportLocation::New(ReportLocationHeap);
333 loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr);
334 loc->heap_chunk_size = b->siz;
335 loc->tid = tctx ? tctx->tid : b->tid;
336 loc->stack = SymbolizeStackId(b->stk);
337 rep_->locs.PushBack(loc);
342 bool is_stack = false;
343 if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
344 ReportLocation *loc =
345 ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS);
346 loc->tid = tctx->tid;
347 rep_->locs.PushBack(loc);
350 if (ReportLocation *loc = SymbolizeData(addr)) {
351 loc->suppressable = true;
352 rep_->locs.PushBack(loc);
359 void ScopedReport::AddSleep(u32 stack_id) {
360 rep_->sleep = SymbolizeStackId(stack_id);
364 void ScopedReport::SetCount(int count) {
368 const ReportDesc *ScopedReport::GetReport() const {
372 void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
374 // This function restores stack trace and mutex set for the thread/epoch.
375 // It does so by getting stack trace and mutex set at the beginning of
376 // trace part, and then replaying the trace till the given epoch.
377 ctx->thread_registry->CheckLocked();
378 ThreadContext *tctx = static_cast<ThreadContext*>(
379 ctx->thread_registry->GetThreadLocked(tid));
382 if (tctx->status != ThreadStatusRunning
383 && tctx->status != ThreadStatusFinished
384 && tctx->status != ThreadStatusDead)
386 Trace* trace = ThreadTrace(tctx->tid);
388 const int partidx = (epoch / kTracePartSize) % TraceParts();
389 TraceHeader* hdr = &trace->headers[partidx];
390 if (epoch < hdr->epoch0)
392 const u64 epoch0 = RoundDown(epoch, TraceSize());
393 const u64 eend = epoch % TraceSize();
394 const u64 ebegin = RoundDown(eend, kTracePartSize);
395 DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
396 tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
397 InternalScopedBuffer<uptr> stack(kShadowStackSize);
398 for (uptr i = 0; i < hdr->stack0.size; i++) {
399 stack[i] = hdr->stack0.trace[i];
400 DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]);
404 uptr pos = hdr->stack0.size;
405 Event *events = (Event*)GetThreadTrace(tid);
406 for (uptr i = ebegin; i <= eend; i++) {
407 Event ev = events[i];
408 EventType typ = (EventType)(ev >> 61);
409 uptr pc = (uptr)(ev & ((1ull << 61) - 1));
410 DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
411 if (typ == EventTypeMop) {
413 } else if (typ == EventTypeFuncEnter) {
415 } else if (typ == EventTypeFuncExit) {
420 if (typ == EventTypeLock) {
421 mset->Add(pc, true, epoch0 + i);
422 } else if (typ == EventTypeUnlock) {
424 } else if (typ == EventTypeRLock) {
425 mset->Add(pc, false, epoch0 + i);
426 } else if (typ == EventTypeRUnlock) {
427 mset->Del(pc, false);
430 for (uptr j = 0; j <= pos; j++)
431 DPrintf2(" #%zu: %zx\n", j, stack[j]);
433 if (pos == 0 && stack[0] == 0)
436 stk->Init(stack.data(), pos);
439 static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
440 uptr addr_min, uptr addr_max) {
441 bool equal_stack = false;
443 if (flags()->suppress_equal_stacks) {
444 hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
445 hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
446 for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
447 if (hash == ctx->racy_stacks[i]) {
448 DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n");
454 bool equal_address = false;
455 RacyAddress ra0 = {addr_min, addr_max};
456 if (flags()->suppress_equal_addresses) {
457 for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
458 RacyAddress ra2 = ctx->racy_addresses[i];
459 uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
460 uptr minend = min(ra0.addr_max, ra2.addr_max);
461 if (maxbeg < minend) {
462 DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n");
463 equal_address = true;
468 if (equal_stack || equal_address) {
470 ctx->racy_stacks.PushBack(hash);
472 ctx->racy_addresses.PushBack(ra0);
478 static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
479 uptr addr_min, uptr addr_max) {
480 if (flags()->suppress_equal_stacks) {
482 hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
483 hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
484 ctx->racy_stacks.PushBack(hash);
486 if (flags()->suppress_equal_addresses) {
487 RacyAddress ra0 = {addr_min, addr_max};
488 ctx->racy_addresses.PushBack(ra0);
492 bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
493 atomic_store(&ctx->last_symbolize_time_ns, NanoTime(), memory_order_relaxed);
494 const ReportDesc *rep = srep.GetReport();
495 Suppression *supp = 0;
496 uptr suppress_pc = 0;
497 for (uptr i = 0; suppress_pc == 0 && i < rep->mops.Size(); i++)
498 suppress_pc = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
499 for (uptr i = 0; suppress_pc == 0 && i < rep->stacks.Size(); i++)
500 suppress_pc = IsSuppressed(rep->typ, rep->stacks[i], &supp);
501 for (uptr i = 0; suppress_pc == 0 && i < rep->threads.Size(); i++)
502 suppress_pc = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
503 for (uptr i = 0; suppress_pc == 0 && i < rep->locs.Size(); i++)
504 suppress_pc = IsSuppressed(rep->typ, rep->locs[i], &supp);
505 if (suppress_pc != 0) {
506 FiredSuppression s = {srep.GetReport()->typ, suppress_pc, supp};
507 ctx->fired_suppressions.push_back(s);
510 bool old_is_freeing = thr->is_freeing;
511 thr->is_freeing = false;
512 bool suppressed = OnReport(rep, suppress_pc != 0);
513 thr->is_freeing = old_is_freeing;
519 if (flags()->halt_on_error)
520 internal__exit(flags()->exitcode);
524 bool IsFiredSuppression(Context *ctx, const ScopedReport &srep,
526 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
527 if (ctx->fired_suppressions[k].type != srep.GetReport()->typ)
529 for (uptr j = 0; j < trace.size; j++) {
530 FiredSuppression *s = &ctx->fired_suppressions[k];
531 if (trace.trace[j] == s->pc) {
533 s->supp->hit_count++;
541 static bool IsFiredSuppression(Context *ctx,
542 const ScopedReport &srep,
544 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
545 if (ctx->fired_suppressions[k].type != srep.GetReport()->typ)
547 FiredSuppression *s = &ctx->fired_suppressions[k];
550 s->supp->hit_count++;
557 static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
558 Shadow s0(thr->racy_state[0]);
559 Shadow s1(thr->racy_state[1]);
560 CHECK(!(s0.IsAtomic() && s1.IsAtomic()));
561 if (!s0.IsAtomic() && !s1.IsAtomic())
563 if (s0.IsAtomic() && s1.IsFreed())
565 if (s1.IsAtomic() && thr->is_freeing)
570 void ReportRace(ThreadState *thr) {
573 // Symbolizer makes lots of intercepted calls. If we try to process them,
574 // at best it will cause deadlocks on internal mutexes.
575 ScopedIgnoreInterceptors ignore;
577 if (!flags()->report_bugs)
579 if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
584 Shadow s(thr->racy_state[1]);
585 freed = s.GetFreedAndReset();
586 thr->racy_state[1] = s.raw();
589 uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr);
593 uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
594 uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
595 uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
596 uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
597 addr_min = min(a0, a1);
598 addr_max = max(e0, e1);
599 if (IsExpectedReport(addr_min, addr_max - addr_min))
603 ThreadRegistryLock l0(ctx->thread_registry);
605 ReportType typ = ReportTypeRace;
606 if (thr->is_vptr_access && freed)
607 typ = ReportTypeVptrUseAfterFree;
608 else if (thr->is_vptr_access)
609 typ = ReportTypeVptrRace;
611 typ = ReportTypeUseAfterFree;
612 ScopedReport rep(typ);
613 if (IsFiredSuppression(ctx, rep, addr))
616 VarSizeStackTrace traces[kMop];
617 const uptr toppc = TraceTopPC(thr);
618 ObtainCurrentStack(thr, toppc, &traces[0]);
619 if (IsFiredSuppression(ctx, rep, traces[0]))
621 InternalScopedBuffer<MutexSet> mset2(1);
622 new(mset2.data()) MutexSet();
623 Shadow s2(thr->racy_state[1]);
624 RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2.data());
625 if (IsFiredSuppression(ctx, rep, traces[1]))
628 if (HandleRacyStacks(thr, traces, addr_min, addr_max))
631 for (uptr i = 0; i < kMop; i++) {
632 Shadow s(thr->racy_state[i]);
633 rep.AddMemoryAccess(addr, s, traces[i],
634 i == 0 ? &thr->mset : mset2.data());
637 for (uptr i = 0; i < kMop; i++) {
638 FastState s(thr->racy_state[i]);
639 ThreadContext *tctx = static_cast<ThreadContext*>(
640 ctx->thread_registry->GetThreadLocked(s.tid()));
641 if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
646 rep.AddLocation(addr_min, addr_max - addr_min);
650 Shadow s(thr->racy_state[1]);
651 if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
652 rep.AddSleep(thr->last_sleep_stack_id);
656 if (!OutputReport(thr, rep))
659 AddRacyStacks(thr, traces, addr_min, addr_max);
662 void PrintCurrentStack(ThreadState *thr, uptr pc) {
663 VarSizeStackTrace trace;
664 ObtainCurrentStack(thr, pc, &trace);
665 PrintStack(SymbolizeStack(trace));
668 void PrintCurrentStackSlow(uptr pc) {
670 BufferedStackTrace *ptrace =
671 new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
672 BufferedStackTrace();
673 ptrace->Unwind(kStackTraceMax, pc, 0, 0, 0, 0, false);
674 for (uptr i = 0; i < ptrace->size / 2; i++) {
675 uptr tmp = ptrace->trace_buffer[i];
676 ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
677 ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
679 PrintStack(SymbolizeStack(*ptrace));
683 } // namespace __tsan
685 using namespace __tsan;
688 SANITIZER_INTERFACE_ATTRIBUTE
689 void __sanitizer_print_stack_trace() {
690 PrintCurrentStackSlow(StackTrace::GetCurrentPc());