1 //===-- tsan_rtl_report.cc ------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_libc.h"
15 #include "sanitizer_common/sanitizer_placement_new.h"
16 #include "sanitizer_common/sanitizer_stackdepot.h"
17 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_common/sanitizer_stacktrace.h"
19 #include "tsan_platform.h"
21 #include "tsan_suppressions.h"
22 #include "tsan_symbolize.h"
23 #include "tsan_report.h"
24 #include "tsan_sync.h"
25 #include "tsan_mman.h"
26 #include "tsan_flags.h"
31 using namespace __sanitizer; // NOLINT
33 static ReportStack *SymbolizeStack(StackTrace trace);
35 void TsanCheckFailed(const char *file, int line, const char *cond,
37 // There is high probability that interceptors will check-fail as well,
38 // on the other hand there is no sense in processing interceptors
39 // since we are going to die soon.
40 ScopedIgnoreInterceptors ignore;
42 cur_thread()->ignore_sync++;
43 cur_thread()->ignore_reads_and_writes++;
45 Printf("FATAL: ThreadSanitizer CHECK failed: "
46 "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
47 file, line, cond, (uptr)v1, (uptr)v2);
48 PrintCurrentStackSlow(StackTrace::GetCurrentPc());
52 // Can be overriden by an application/test to intercept reports.
53 #ifdef TSAN_EXTERNAL_HOOKS
54 bool OnReport(const ReportDesc *rep, bool suppressed);
56 SANITIZER_WEAK_CXX_DEFAULT_IMPL
57 bool OnReport(const ReportDesc *rep, bool suppressed) {
63 SANITIZER_WEAK_DEFAULT_IMPL
64 void __tsan_on_report(const ReportDesc *rep) {
68 static void StackStripMain(SymbolizedStack *frames) {
69 SymbolizedStack *last_frame = nullptr;
70 SymbolizedStack *last_frame2 = nullptr;
71 for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
72 last_frame2 = last_frame;
79 const char *last = last_frame->info.function;
80 const char *last2 = last_frame2->info.function;
81 // Strip frame above 'main'
82 if (last2 && 0 == internal_strcmp(last2, "main")) {
83 last_frame->ClearAll();
84 last_frame2->next = nullptr;
85 // Strip our internal thread start routine.
86 } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
87 last_frame->ClearAll();
88 last_frame2->next = nullptr;
89 // Strip global ctors init.
90 } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
91 last_frame->ClearAll();
92 last_frame2->next = nullptr;
93 // If both are 0, then we probably just failed to symbolize.
94 } else if (last || last2) {
95 // Ensure that we recovered stack completely. Trimmed stack
96 // can actually happen if we do not instrument some code,
97 // so it's only a debug print. However we must try hard to not miss it
99 DPrintf("Bottom stack frame is missed\n");
102 // The last frame always point into runtime (gosched0, goexit0, runtime.main).
103 last_frame->ClearAll();
104 last_frame2->next = nullptr;
108 ReportStack *SymbolizeStackId(u32 stack_id) {
111 StackTrace stack = StackDepotGet(stack_id);
112 if (stack.trace == nullptr)
114 return SymbolizeStack(stack);
117 static ReportStack *SymbolizeStack(StackTrace trace) {
120 SymbolizedStack *top = nullptr;
121 for (uptr si = 0; si < trace.size; si++) {
122 const uptr pc = trace.trace[si];
124 // We obtain the return address, but we're interested in the previous
126 if ((pc & kExternalPCBit) == 0)
127 pc1 = StackTrace::GetPreviousInstructionPc(pc);
128 SymbolizedStack *ent = SymbolizeCode(pc1);
130 SymbolizedStack *last = ent;
132 last->info.address = pc; // restore original pc for report
135 last->info.address = pc; // restore original pc for report
141 ReportStack *stack = ReportStack::New();
146 ScopedReport::ScopedReport(ReportType typ) {
147 ctx->thread_registry->CheckLocked();
148 void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
149 rep_ = new(mem) ReportDesc;
151 ctx->report_mtx.Lock();
152 CommonSanitizerReportMutex.Lock();
155 ScopedReport::~ScopedReport() {
156 CommonSanitizerReportMutex.Unlock();
157 ctx->report_mtx.Unlock();
158 DestroyAndFree(rep_);
161 void ScopedReport::AddStack(StackTrace stack, bool suppressable) {
162 ReportStack **rs = rep_->stacks.PushBack();
163 *rs = SymbolizeStack(stack);
164 (*rs)->suppressable = suppressable;
167 void ScopedReport::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s,
168 StackTrace stack, const MutexSet *mset) {
169 void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
170 ReportMop *mop = new(mem) ReportMop;
171 rep_->mops.PushBack(mop);
173 mop->addr = addr + s.addr0();
174 mop->size = s.size();
175 mop->write = s.IsWrite();
176 mop->atomic = s.IsAtomic();
177 mop->stack = SymbolizeStack(stack);
178 mop->external_tag = external_tag;
180 mop->stack->suppressable = true;
181 for (uptr i = 0; i < mset->Size(); i++) {
182 MutexSet::Desc d = mset->Get(i);
183 u64 mid = this->AddMutex(d.id);
184 ReportMopMutex mtx = {mid, d.write};
185 mop->mset.PushBack(mtx);
189 void ScopedReport::AddUniqueTid(int unique_tid) {
190 rep_->unique_tids.PushBack(unique_tid);
193 void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) {
194 for (uptr i = 0; i < rep_->threads.Size(); i++) {
195 if ((u32)rep_->threads[i]->id == tctx->tid)
198 void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
199 ReportThread *rt = new(mem) ReportThread;
200 rep_->threads.PushBack(rt);
202 rt->os_id = tctx->os_id;
203 rt->running = (tctx->status == ThreadStatusRunning);
204 rt->name = internal_strdup(tctx->name);
205 rt->parent_tid = tctx->parent_tid;
206 rt->workerthread = tctx->workerthread;
208 rt->stack = SymbolizeStackId(tctx->creation_stack_id);
210 rt->stack->suppressable = suppressable;
214 static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) {
215 int unique_id = *(int *)arg;
216 return tctx->unique_id == (u32)unique_id;
219 static ThreadContext *FindThreadByUidLocked(int unique_id) {
220 ctx->thread_registry->CheckLocked();
221 return static_cast<ThreadContext *>(
222 ctx->thread_registry->FindThreadContextLocked(
223 FindThreadByUidLockedCallback, &unique_id));
226 static ThreadContext *FindThreadByTidLocked(int tid) {
227 ctx->thread_registry->CheckLocked();
228 return static_cast<ThreadContext*>(
229 ctx->thread_registry->GetThreadLocked(tid));
232 static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
233 uptr addr = (uptr)arg;
234 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
235 if (tctx->status != ThreadStatusRunning)
237 ThreadState *thr = tctx->thr;
239 return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) ||
240 (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size));
243 ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
244 ctx->thread_registry->CheckLocked();
245 ThreadContext *tctx = static_cast<ThreadContext*>(
246 ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls,
250 ThreadState *thr = tctx->thr;
252 *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size);
257 void ScopedReport::AddThread(int unique_tid, bool suppressable) {
259 if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
260 AddThread(tctx, suppressable);
264 void ScopedReport::AddMutex(const SyncVar *s) {
265 for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
266 if (rep_->mutexes[i]->id == s->uid)
269 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
270 ReportMutex *rm = new(mem) ReportMutex;
271 rep_->mutexes.PushBack(rm);
274 rm->destroyed = false;
275 rm->stack = SymbolizeStackId(s->creation_stack_id);
278 u64 ScopedReport::AddMutex(u64 id) {
281 uptr addr = SyncVar::SplitId(id, &uid);
282 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
283 // Check that the mutex is still alive.
284 // Another mutex can be created at the same address,
285 // so check uid as well.
286 if (s && s->CheckId(uid)) {
297 void ScopedReport::AddDeadMutex(u64 id) {
298 for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
299 if (rep_->mutexes[i]->id == id)
302 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
303 ReportMutex *rm = new(mem) ReportMutex;
304 rep_->mutexes.PushBack(rm);
307 rm->destroyed = true;
311 void ScopedReport::AddLocation(uptr addr, uptr size) {
318 if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
319 ReportLocation *loc = ReportLocation::New(ReportLocationFD);
321 loc->tid = creat_tid;
322 loc->stack = SymbolizeStackId(creat_stack);
323 rep_->locs.PushBack(loc);
324 ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
330 Allocator *a = allocator();
331 if (a->PointerIsMine((void*)addr)) {
332 void *block_begin = a->GetBlockBegin((void*)addr);
334 b = ctx->metamap.GetBlock((uptr)block_begin);
337 ThreadContext *tctx = FindThreadByTidLocked(b->tid);
338 ReportLocation *loc = ReportLocation::New(ReportLocationHeap);
339 loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr);
340 loc->heap_chunk_size = b->siz;
341 loc->external_tag = b->tag;
342 loc->tid = tctx ? tctx->tid : b->tid;
343 loc->stack = SymbolizeStackId(b->stk);
344 rep_->locs.PushBack(loc);
349 bool is_stack = false;
350 if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
351 ReportLocation *loc =
352 ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS);
353 loc->tid = tctx->tid;
354 rep_->locs.PushBack(loc);
358 if (ReportLocation *loc = SymbolizeData(addr)) {
359 loc->suppressable = true;
360 rep_->locs.PushBack(loc);
366 void ScopedReport::AddSleep(u32 stack_id) {
367 rep_->sleep = SymbolizeStackId(stack_id);
371 void ScopedReport::SetCount(int count) {
375 const ReportDesc *ScopedReport::GetReport() const {
379 void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
381 // This function restores stack trace and mutex set for the thread/epoch.
382 // It does so by getting stack trace and mutex set at the beginning of
383 // trace part, and then replaying the trace till the given epoch.
384 Trace* trace = ThreadTrace(tid);
385 ReadLock l(&trace->mtx);
386 const int partidx = (epoch / kTracePartSize) % TraceParts();
387 TraceHeader* hdr = &trace->headers[partidx];
388 if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize)
390 CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0);
391 const u64 epoch0 = RoundDown(epoch, TraceSize());
392 const u64 eend = epoch % TraceSize();
393 const u64 ebegin = RoundDown(eend, kTracePartSize);
394 DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
395 tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
396 Vector<uptr> stack(MBlockReportStack);
397 stack.Resize(hdr->stack0.size + 64);
398 for (uptr i = 0; i < hdr->stack0.size; i++) {
399 stack[i] = hdr->stack0.trace[i];
400 DPrintf2(" #%02zu: pc=%zx\n", i, stack[i]);
404 uptr pos = hdr->stack0.size;
405 Event *events = (Event*)GetThreadTrace(tid);
406 for (uptr i = ebegin; i <= eend; i++) {
407 Event ev = events[i];
408 EventType typ = (EventType)(ev >> 61);
409 uptr pc = (uptr)(ev & ((1ull << 61) - 1));
410 DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
411 if (typ == EventTypeMop) {
413 } else if (typ == EventTypeFuncEnter) {
414 if (stack.Size() < pos + 2)
415 stack.Resize(pos + 2);
417 } else if (typ == EventTypeFuncExit) {
422 if (typ == EventTypeLock) {
423 mset->Add(pc, true, epoch0 + i);
424 } else if (typ == EventTypeUnlock) {
426 } else if (typ == EventTypeRLock) {
427 mset->Add(pc, false, epoch0 + i);
428 } else if (typ == EventTypeRUnlock) {
429 mset->Del(pc, false);
432 for (uptr j = 0; j <= pos; j++)
433 DPrintf2(" #%zu: %zx\n", j, stack[j]);
435 if (pos == 0 && stack[0] == 0)
438 stk->Init(&stack[0], pos);
441 static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
442 uptr addr_min, uptr addr_max) {
443 bool equal_stack = false;
445 bool equal_address = false;
446 RacyAddress ra0 = {addr_min, addr_max};
448 ReadLock lock(&ctx->racy_mtx);
449 if (flags()->suppress_equal_stacks) {
450 hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
451 hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
452 for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
453 if (hash == ctx->racy_stacks[i]) {
455 "ThreadSanitizer: suppressing report as doubled (stack)\n");
461 if (flags()->suppress_equal_addresses) {
462 for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
463 RacyAddress ra2 = ctx->racy_addresses[i];
464 uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
465 uptr minend = min(ra0.addr_max, ra2.addr_max);
466 if (maxbeg < minend) {
467 VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n");
468 equal_address = true;
474 if (!equal_stack && !equal_address)
477 Lock lock(&ctx->racy_mtx);
478 ctx->racy_stacks.PushBack(hash);
480 if (!equal_address) {
481 Lock lock(&ctx->racy_mtx);
482 ctx->racy_addresses.PushBack(ra0);
487 static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
488 uptr addr_min, uptr addr_max) {
489 Lock lock(&ctx->racy_mtx);
490 if (flags()->suppress_equal_stacks) {
492 hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
493 hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
494 ctx->racy_stacks.PushBack(hash);
496 if (flags()->suppress_equal_addresses) {
497 RacyAddress ra0 = {addr_min, addr_max};
498 ctx->racy_addresses.PushBack(ra0);
502 bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
503 if (!flags()->report_bugs || thr->suppress_reports)
505 atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime());
506 const ReportDesc *rep = srep.GetReport();
507 CHECK_EQ(thr->current_report, nullptr);
508 thr->current_report = rep;
509 Suppression *supp = 0;
511 for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++)
512 pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
513 for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++)
514 pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp);
515 for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++)
516 pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
517 for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++)
518 pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp);
519 if (pc_or_addr != 0) {
520 Lock lock(&ctx->fired_suppressions_mtx);
521 FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp};
522 ctx->fired_suppressions.push_back(s);
525 bool old_is_freeing = thr->is_freeing;
526 thr->is_freeing = false;
527 bool suppressed = OnReport(rep, pc_or_addr != 0);
528 thr->is_freeing = old_is_freeing;
530 thr->current_report = nullptr;
535 __tsan_on_report(rep);
537 if (flags()->halt_on_error)
539 thr->current_report = nullptr;
543 bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) {
544 ReadLock lock(&ctx->fired_suppressions_mtx);
545 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
546 if (ctx->fired_suppressions[k].type != type)
548 for (uptr j = 0; j < trace.size; j++) {
549 FiredSuppression *s = &ctx->fired_suppressions[k];
550 if (trace.trace[j] == s->pc_or_addr) {
552 atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
560 static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) {
561 ReadLock lock(&ctx->fired_suppressions_mtx);
562 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
563 if (ctx->fired_suppressions[k].type != type)
565 FiredSuppression *s = &ctx->fired_suppressions[k];
566 if (addr == s->pc_or_addr) {
568 atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
575 static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
576 Shadow s0(thr->racy_state[0]);
577 Shadow s1(thr->racy_state[1]);
578 CHECK(!(s0.IsAtomic() && s1.IsAtomic()));
579 if (!s0.IsAtomic() && !s1.IsAtomic())
581 if (s0.IsAtomic() && s1.IsFreed())
583 if (s1.IsAtomic() && thr->is_freeing)
588 void ReportRace(ThreadState *thr) {
591 // Symbolizer makes lots of intercepted calls. If we try to process them,
592 // at best it will cause deadlocks on internal mutexes.
593 ScopedIgnoreInterceptors ignore;
595 if (!flags()->report_bugs)
597 if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
602 Shadow s(thr->racy_state[1]);
603 freed = s.GetFreedAndReset();
604 thr->racy_state[1] = s.raw();
607 uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr);
611 uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
612 uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
613 uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
614 uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
615 addr_min = min(a0, a1);
616 addr_max = max(e0, e1);
617 if (IsExpectedReport(addr_min, addr_max - addr_min))
621 ReportType typ = ReportTypeRace;
622 if (thr->is_vptr_access && freed)
623 typ = ReportTypeVptrUseAfterFree;
624 else if (thr->is_vptr_access)
625 typ = ReportTypeVptrRace;
627 typ = ReportTypeUseAfterFree;
628 else if (thr->external_tag > 0)
629 typ = ReportTypeExternalRace;
631 if (IsFiredSuppression(ctx, typ, addr))
635 VarSizeStackTrace traces[kMop];
636 const uptr toppc = TraceTopPC(thr);
637 ObtainCurrentStack(thr, toppc, &traces[0]);
638 if (IsFiredSuppression(ctx, typ, traces[0]))
641 // MutexSet is too large to live on stack.
642 Vector<u64> mset_buffer(MBlockScopedBuf);
643 mset_buffer.Resize(sizeof(MutexSet) / sizeof(u64) + 1);
644 MutexSet *mset2 = new(&mset_buffer[0]) MutexSet();
646 Shadow s2(thr->racy_state[1]);
647 RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2);
648 if (IsFiredSuppression(ctx, typ, traces[1]))
651 if (HandleRacyStacks(thr, traces, addr_min, addr_max))
654 ThreadRegistryLock l0(ctx->thread_registry);
655 ScopedReport rep(typ);
656 for (uptr i = 0; i < kMop; i++) {
657 Shadow s(thr->racy_state[i]);
658 rep.AddMemoryAccess(addr, thr->external_tag, s, traces[i],
659 i == 0 ? &thr->mset : mset2);
662 for (uptr i = 0; i < kMop; i++) {
663 FastState s(thr->racy_state[i]);
664 ThreadContext *tctx = static_cast<ThreadContext*>(
665 ctx->thread_registry->GetThreadLocked(s.tid()));
666 if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
671 rep.AddLocation(addr_min, addr_max - addr_min);
675 Shadow s(thr->racy_state[1]);
676 if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
677 rep.AddSleep(thr->last_sleep_stack_id);
681 if (!OutputReport(thr, rep))
684 AddRacyStacks(thr, traces, addr_min, addr_max);
687 void PrintCurrentStack(ThreadState *thr, uptr pc) {
688 VarSizeStackTrace trace;
689 ObtainCurrentStack(thr, pc, &trace);
690 PrintStack(SymbolizeStack(trace));
693 // Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes
694 // __sanitizer_print_stack_trace exists in the actual unwinded stack, but
695 // tail-call to PrintCurrentStackSlow breaks this assumption because
696 // __sanitizer_print_stack_trace disappears after tail-call.
697 // However, this solution is not reliable enough, please see dvyukov's comment
698 // http://reviews.llvm.org/D19148#406208
699 // Also see PR27280 comment 2 and 3 for breaking examples and analysis.
701 void PrintCurrentStackSlow(uptr pc) {
703 BufferedStackTrace *ptrace =
704 new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
705 BufferedStackTrace();
706 ptrace->Unwind(kStackTraceMax, pc, 0, 0, 0, 0, false);
707 for (uptr i = 0; i < ptrace->size / 2; i++) {
708 uptr tmp = ptrace->trace_buffer[i];
709 ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
710 ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
712 PrintStack(SymbolizeStack(*ptrace));
716 } // namespace __tsan
718 using namespace __tsan;
721 SANITIZER_INTERFACE_ATTRIBUTE
722 void __sanitizer_print_stack_trace() {
723 PrintCurrentStackSlow(StackTrace::GetCurrentPc());