1 //===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 // Main internal TSan header file.
15 // - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
16 // function-scope locals)
17 // - All functions/classes/etc reside in namespace __tsan, except for those
18 // declared in tsan_interface.h.
19 // - Platform-specific files should be used instead of ifdefs (*).
20 // - No system headers included in header files (*).
21 // - Platform specific headres included only into platform-specific files (*).
23 // (*) Except when inlining is critical for performance.
24 //===----------------------------------------------------------------------===//
29 #include "sanitizer_common/sanitizer_allocator.h"
30 #include "sanitizer_common/sanitizer_allocator_internal.h"
31 #include "sanitizer_common/sanitizer_asm.h"
32 #include "sanitizer_common/sanitizer_common.h"
33 #include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
34 #include "sanitizer_common/sanitizer_libignore.h"
35 #include "sanitizer_common/sanitizer_suppressions.h"
36 #include "sanitizer_common/sanitizer_thread_registry.h"
37 #include "tsan_clock.h"
38 #include "tsan_defs.h"
39 #include "tsan_flags.h"
40 #include "tsan_sync.h"
41 #include "tsan_trace.h"
42 #include "tsan_vector.h"
43 #include "tsan_report.h"
44 #include "tsan_platform.h"
45 #include "tsan_mutexset.h"
46 #include "tsan_ignoreset.h"
47 #include "tsan_stack_trace.h"
49 #if SANITIZER_WORDSIZE != 64
50 # error "ThreadSanitizer is supported only on 64-bit platforms"
56 struct MapUnmapCallback;
57 #if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__)
58 static const uptr kAllocatorSpace = 0;
59 static const uptr kAllocatorSize = SANITIZER_MMAP_RANGE_SIZE;
60 static const uptr kAllocatorRegionSizeLog = 20;
61 static const uptr kAllocatorNumRegions =
62 kAllocatorSize >> kAllocatorRegionSizeLog;
63 typedef TwoLevelByteMap<(kAllocatorNumRegions >> 12), 1 << 12,
64 MapUnmapCallback> ByteMap;
65 typedef SizeClassAllocator32<kAllocatorSpace, kAllocatorSize, 0,
66 CompactSizeClassMap, kAllocatorRegionSizeLog, ByteMap,
67 MapUnmapCallback> PrimaryAllocator;
69 struct AP64 { // Allocator64 parameters. Deliberately using a short name.
70 static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
71 static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg;
72 static const uptr kMetadataSize = 0;
73 typedef DefaultSizeClassMap SizeClassMap;
74 typedef __tsan::MapUnmapCallback MapUnmapCallback;
75 static const uptr kFlags = 0;
77 typedef SizeClassAllocator64<AP64> PrimaryAllocator;
79 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
80 typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator;
81 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
82 SecondaryAllocator> Allocator;
83 Allocator *allocator();
86 void TsanCheckFailed(const char *file, int line, const char *cond,
89 const u64 kShadowRodata = (u64)-1; // .rodata shadow marker
91 // FastState (from most significant bit):
99 FastState(u64 tid, u64 epoch) {
100 x_ = tid << kTidShift;
102 DCHECK_EQ(tid, this->tid());
103 DCHECK_EQ(epoch, this->epoch());
104 DCHECK_EQ(GetIgnoreBit(), false);
107 explicit FastState(u64 x)
116 u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
120 u64 TidWithIgnore() const {
121 u64 res = x_ >> kTidShift;
126 u64 res = x_ & ((1ull << kClkBits) - 1);
130 void IncrementEpoch() {
131 u64 old_epoch = epoch();
133 DCHECK_EQ(old_epoch + 1, epoch());
137 void SetIgnoreBit() { x_ |= kIgnoreBit; }
138 void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
139 bool GetIgnoreBit() const { return (s64)x_ < 0; }
141 void SetHistorySize(int hs) {
144 x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
148 int GetHistorySize() const {
149 return (int)((x_ >> kHistoryShift) & kHistoryMask);
152 void ClearHistorySize() {
157 u64 GetTracePos() const {
158 const int hs = GetHistorySize();
159 // When hs == 0, the trace consists of 2 parts.
160 const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
161 return epoch() & mask;
166 static const int kTidShift = 64 - kTidBits - 1;
167 static const u64 kIgnoreBit = 1ull << 63;
168 static const u64 kFreedBit = 1ull << 63;
169 static const u64 kHistoryShift = kClkBits;
170 static const u64 kHistoryMask = 7;
174 // Shadow (from most significant bit):
182 class Shadow : public FastState {
184 explicit Shadow(u64 x)
188 explicit Shadow(const FastState &s)
193 void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
194 DCHECK_EQ((x_ >> kClkBits) & 31, 0);
196 DCHECK_LE(kAccessSizeLog, 3);
197 x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
198 DCHECK_EQ(kAccessSizeLog, size_log());
199 DCHECK_EQ(addr0, this->addr0());
202 void SetWrite(unsigned kAccessIsWrite) {
203 DCHECK_EQ(x_ & kReadBit, 0);
206 DCHECK_EQ(kAccessIsWrite, IsWrite());
209 void SetAtomic(bool kIsAtomic) {
213 DCHECK_EQ(IsAtomic(), kIsAtomic);
216 bool IsAtomic() const {
217 return x_ & kAtomicBit;
220 bool IsZero() const {
224 static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
225 u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
226 DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
227 return shifted_xor == 0;
231 bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
232 u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
233 return masked_xor == 0;
236 static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
237 unsigned kS2AccessSize) {
239 u64 diff = s1.addr0() - s2.addr0();
240 if ((s64)diff < 0) { // s1.addr0 < s2.addr0 // NOLINT
241 // if (s1.addr0() + size1) > s2.addr0()) return true;
242 if (s1.size() > -diff)
245 // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
246 if (kS2AccessSize > diff)
249 DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
250 DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
254 u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
255 u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
256 bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
257 bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
259 // The idea behind the freed bit is as follows.
260 // When the memory is freed (or otherwise unaccessible) we write to the shadow
261 // values with tid/epoch related to the free and the freed bit set.
262 // During memory accesses processing the freed bit is considered
263 // as msb of tid. So any access races with shadow with freed bit set
264 // (it is as if write from a thread with which we never synchronized before).
265 // This allows us to detect accesses to freed memory w/o additional
266 // overheads in memory access processing and at the same time restore
267 // tid/epoch of free.
272 bool IsFreed() const {
273 return x_ & kFreedBit;
276 bool GetFreedAndReset() {
277 bool res = x_ & kFreedBit;
282 bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
283 bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift)
284 | (u64(kIsAtomic) << kAtomicShift));
285 DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
289 bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
290 bool v = ((x_ >> kReadShift) & 3)
291 <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
292 DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
293 (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
297 bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
298 bool v = ((x_ >> kReadShift) & 3)
299 >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
300 DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
301 (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
306 static const u64 kReadShift = 5 + kClkBits;
307 static const u64 kReadBit = 1ull << kReadShift;
308 static const u64 kAtomicShift = 6 + kClkBits;
309 static const u64 kAtomicBit = 1ull << kAtomicShift;
311 u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
313 static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
314 if (s1.addr0() == s2.addr0()) return true;
315 if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
317 if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
323 struct ThreadSignalContext;
329 bool in_blocking_func;
330 uptr in_signal_handler;
331 uptr *shadow_stack_pos;
334 // A Processor represents a physical thread, or a P for Go.
335 // It is used to store internal resources like allocate cache, and does not
336 // participate in race-detection logic (invisible to end user).
337 // In C++ it is tied to an OS thread just like ThreadState, however ideally
338 // it should be tied to a CPU (this way we will have fewer allocator caches).
339 // In Go it is tied to a P, so there are significantly fewer Processor's than
340 // ThreadState's (which are tied to Gs).
341 // A ThreadState must be wired with a Processor to handle events.
343 ThreadState *thr; // currently wired thread, or nullptr
345 AllocatorCache alloc_cache;
346 InternalAllocatorCache internal_alloc_cache;
348 DenseSlabAllocCache block_cache;
349 DenseSlabAllocCache sync_cache;
350 DenseSlabAllocCache clock_cache;
351 DDPhysicalThread *dd_pt;
355 // ScopedGlobalProcessor temporary setups a global processor for the current
356 // thread, if it does not have one. Intended for interceptors that can run
357 // at the very thread end, when we already destroyed the thread processor.
358 struct ScopedGlobalProcessor {
359 ScopedGlobalProcessor();
360 ~ScopedGlobalProcessor();
364 // This struct is stored in TLS.
366 FastState fast_state;
367 // Synch epoch represents the threads's epoch before the last synchronization
368 // action. It allows to reduce number of shadow state updates.
369 // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
370 // if we are processing write to X from the same thread at epoch=200,
371 // we do nothing, because both writes happen in the same 'synch epoch'.
372 // That is, if another memory access does not race with the former write,
373 // it does not race with the latter as well.
374 // QUESTION: can we can squeeze this into ThreadState::Fast?
375 // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
376 // taken by epoch between synchs.
377 // This way we can save one load from tls.
378 u64 fast_synch_epoch;
379 // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
380 // We do not distinguish beteween ignoring reads and writes
381 // for better performance.
382 int ignore_reads_and_writes;
384 // Go does not support ignores.
386 IgnoreSet mop_ignore_set;
387 IgnoreSet sync_ignore_set;
389 // C/C++ uses fixed size shadow stack embed into Trace.
390 // Go uses malloc-allocated shadow stack with dynamic size.
392 uptr *shadow_stack_end;
393 uptr *shadow_stack_pos;
394 u64 *racy_shadow_addr;
399 Vector<JmpBuf> jmp_bufs;
400 int ignore_interceptors;
402 #if TSAN_COLLECT_STATS
419 #if SANITIZER_DEBUG && !SANITIZER_GO
420 InternalDeadlockDetector internal_deadlock_detector;
422 DDLogicalThread *dd_lt;
424 // Current wired Processor, or nullptr. Required to handle any events.
427 Processor *proc() { return proc1; }
432 atomic_uintptr_t in_signal_handler;
433 ThreadSignalContext *signal_ctx;
436 u32 last_sleep_stack_id;
437 ThreadClock last_sleep_clock;
440 // Set in regions of runtime that must be signal-safe and fork-safe.
441 // If set, malloc must not be called.
444 const ReportDesc *current_report;
446 explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
447 unsigned reuse_count,
448 uptr stk_addr, uptr stk_size,
449 uptr tls_addr, uptr tls_size);
453 #if SANITIZER_MAC || SANITIZER_ANDROID
454 ThreadState *cur_thread();
455 void cur_thread_finalize();
457 __attribute__((tls_model("initial-exec")))
458 extern THREADLOCAL char cur_thread_placeholder[];
459 INLINE ThreadState *cur_thread() {
460 return reinterpret_cast<ThreadState *>(&cur_thread_placeholder);
462 INLINE void cur_thread_finalize() { }
463 #endif // SANITIZER_MAC || SANITIZER_ANDROID
464 #endif // SANITIZER_GO
466 class ThreadContext : public ThreadContextBase {
468 explicit ThreadContext(int tid);
471 u32 creation_stack_id;
473 // Epoch at which the thread had started.
474 // If we see an event from the thread stamped by an older epoch,
475 // the event is from a dead thread that shared tid with this thread.
479 // Override superclass callbacks.
480 void OnDead() override;
481 void OnJoined(void *arg) override;
482 void OnFinished() override;
483 void OnStarted(void *arg) override;
484 void OnCreated(void *arg) override;
485 void OnReset() override;
486 void OnDetached(void *arg) override;
491 bool operator==(const RacyStacks &other) const {
492 if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
494 if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
505 struct FiredSuppression {
515 bool after_multithreaded_fork;
521 int nmissed_expected;
522 atomic_uint64_t last_symbolize_time_ns;
524 void *background_thread;
525 atomic_uint32_t stop_background_thread;
527 ThreadRegistry *thread_registry;
530 Vector<RacyStacks> racy_stacks;
531 Vector<RacyAddress> racy_addresses;
532 // Number of fired suppressions may be large enough.
533 Mutex fired_suppressions_mtx;
534 InternalMmapVector<FiredSuppression> fired_suppressions;
537 ClockAlloc clock_alloc;
542 u64 int_alloc_cnt[MBlockTypeCount];
543 u64 int_alloc_siz[MBlockTypeCount];
546 extern Context *ctx; // The one and the only global runtime context.
548 struct ScopedIgnoreInterceptors {
549 ScopedIgnoreInterceptors() {
551 cur_thread()->ignore_interceptors++;
555 ~ScopedIgnoreInterceptors() {
557 cur_thread()->ignore_interceptors--;
564 explicit ScopedReport(ReportType typ);
567 void AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
568 const MutexSet *mset);
569 void AddStack(StackTrace stack, bool suppressable = false);
570 void AddThread(const ThreadContext *tctx, bool suppressable = false);
571 void AddThread(int unique_tid, bool suppressable = false);
572 void AddUniqueTid(int unique_tid);
573 void AddMutex(const SyncVar *s);
574 u64 AddMutex(u64 id);
575 void AddLocation(uptr addr, uptr size);
576 void AddSleep(u32 stack_id);
577 void SetCount(int count);
579 const ReportDesc *GetReport() const;
583 // Symbolizer makes lots of intercepted calls. If we try to process them,
584 // at best it will cause deadlocks on internal mutexes.
585 ScopedIgnoreInterceptors ignore_interceptors_;
587 void AddDeadMutex(u64 id);
589 ScopedReport(const ScopedReport&);
590 void operator = (const ScopedReport&);
593 ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
594 void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
597 template<typename StackTraceTy>
598 void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack) {
599 uptr size = thr->shadow_stack_pos - thr->shadow_stack;
601 if (size + !!toppc > kStackTraceMax) {
602 start = size + !!toppc - kStackTraceMax;
603 size = kStackTraceMax - !!toppc;
605 stack->Init(&thr->shadow_stack[start], size, toppc);
609 #if TSAN_COLLECT_STATS
610 void StatAggregate(u64 *dst, u64 *src);
611 void StatOutput(u64 *stat);
614 void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
615 #if TSAN_COLLECT_STATS
619 void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) {
620 #if TSAN_COLLECT_STATS
625 void MapShadow(uptr addr, uptr size);
626 void MapThreadTrace(uptr addr, uptr size, const char *name);
627 void DontNeedShadowFor(uptr addr, uptr size);
628 void InitializeShadowMemory();
629 void InitializeInterceptors();
630 void InitializeLibIgnore();
631 void InitializeDynamicAnnotations();
633 void ForkBefore(ThreadState *thr, uptr pc);
634 void ForkParentAfter(ThreadState *thr, uptr pc);
635 void ForkChildAfter(ThreadState *thr, uptr pc);
637 void ReportRace(ThreadState *thr);
638 bool OutputReport(ThreadState *thr, const ScopedReport &srep);
639 bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
640 bool IsExpectedReport(uptr addr, uptr size);
641 void PrintMatchedBenignRaces();
643 #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
644 # define DPrintf Printf
646 # define DPrintf(...)
649 #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
650 # define DPrintf2 Printf
652 # define DPrintf2(...)
655 u32 CurrentStackId(ThreadState *thr, uptr pc);
656 ReportStack *SymbolizeStackId(u32 stack_id);
657 void PrintCurrentStack(ThreadState *thr, uptr pc);
658 void PrintCurrentStackSlow(uptr pc); // uses libunwind
660 void Initialize(ThreadState *thr);
661 int Finalize(ThreadState *thr);
663 void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
664 void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
666 void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
667 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
668 void MemoryAccessImpl(ThreadState *thr, uptr addr,
669 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
670 u64 *shadow_mem, Shadow cur);
671 void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
672 uptr size, bool is_write);
673 void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr,
674 uptr size, uptr step, bool is_write);
675 void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
676 int size, bool kAccessIsWrite, bool kIsAtomic);
678 const int kSizeLog1 = 0;
679 const int kSizeLog2 = 1;
680 const int kSizeLog4 = 2;
681 const int kSizeLog8 = 3;
683 void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc,
684 uptr addr, int kAccessSizeLog) {
685 MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false);
688 void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc,
689 uptr addr, int kAccessSizeLog) {
690 MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false);
693 void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc,
694 uptr addr, int kAccessSizeLog) {
695 MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true);
698 void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc,
699 uptr addr, int kAccessSizeLog) {
700 MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true);
703 void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
704 void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
705 void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
707 void ThreadIgnoreBegin(ThreadState *thr, uptr pc);
708 void ThreadIgnoreEnd(ThreadState *thr, uptr pc);
709 void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
710 void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc);
712 void FuncEntry(ThreadState *thr, uptr pc);
713 void FuncExit(ThreadState *thr);
715 int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
716 void ThreadStart(ThreadState *thr, int tid, uptr os_id);
717 void ThreadFinish(ThreadState *thr);
718 int ThreadTid(ThreadState *thr, uptr pc, uptr uid);
719 void ThreadJoin(ThreadState *thr, uptr pc, int tid);
720 void ThreadDetach(ThreadState *thr, uptr pc, int tid);
721 void ThreadFinalize(ThreadState *thr);
722 void ThreadSetName(ThreadState *thr, const char *name);
723 int ThreadCount(ThreadState *thr);
724 void ProcessPendingSignals(ThreadState *thr);
726 Processor *ProcCreate();
727 void ProcDestroy(Processor *proc);
728 void ProcWire(Processor *proc, ThreadState *thr);
729 void ProcUnwire(Processor *proc, ThreadState *thr);
731 void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
732 bool rw, bool recursive, bool linker_init);
733 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr);
734 void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1,
735 bool try_lock = false);
736 int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false);
737 void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool try_lock = false);
738 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
739 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
740 void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD
741 void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr);
743 void Acquire(ThreadState *thr, uptr pc, uptr addr);
744 // AcquireGlobal synchronizes the current thread with all other threads.
745 // In terms of happens-before relation, it draws a HB edge from all threads
746 // (where they happen to execute right now) to the current thread. We use it to
747 // handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal
748 // right before executing finalizers. This provides a coarse, but simple
749 // approximation of the actual required synchronization.
750 void AcquireGlobal(ThreadState *thr, uptr pc);
751 void Release(ThreadState *thr, uptr pc, uptr addr);
752 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
753 void AfterSleep(ThreadState *thr, uptr pc);
754 void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
755 void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
756 void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c);
757 void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
759 // The hacky call uses custom calling convention and an assembly thunk.
760 // It is considerably faster that a normal call for the caller
761 // if it is not executed (it is intended for slow paths from hot functions).
762 // The trick is that the call preserves all registers and the compiler
763 // does not treat it as a call.
764 // If it does not work for you, use normal call.
765 #if !SANITIZER_DEBUG && defined(__x86_64__) && !SANITIZER_MAC
766 // The caller may not create the stack frame for itself at all,
767 // so we create a reserve stack frame for it (1024b must be enough).
768 #define HACKY_CALL(f) \
769 __asm__ __volatile__("sub $1024, %%rsp;" \
770 CFI_INL_ADJUST_CFA_OFFSET(1024) \
771 ".hidden " #f "_thunk;" \
772 "call " #f "_thunk;" \
773 "add $1024, %%rsp;" \
774 CFI_INL_ADJUST_CFA_OFFSET(-1024) \
777 #define HACKY_CALL(f) f()
780 void TraceSwitch(ThreadState *thr);
781 uptr TraceTopPC(ThreadState *thr);
784 Trace *ThreadTrace(int tid);
786 extern "C" void __tsan_trace_switch();
787 void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
788 EventType typ, u64 addr) {
789 if (!kCollectHistory)
791 DCHECK_GE((int)typ, 0);
792 DCHECK_LE((int)typ, 7);
793 DCHECK_EQ(GetLsb(addr, 61), addr);
794 StatInc(thr, StatEvents);
795 u64 pos = fs.GetTracePos();
796 if (UNLIKELY((pos % kTracePartSize) == 0)) {
798 HACKY_CALL(__tsan_trace_switch);
803 Event *trace = (Event*)GetThreadTrace(fs.tid());
804 Event *evp = &trace[pos];
805 Event ev = (u64)addr | ((u64)typ << 61);
810 uptr ALWAYS_INLINE HeapEnd() {
811 return HeapMemEnd() + PrimaryAllocator::AdditionalSize();
815 } // namespace __tsan