1 //===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 // Main internal TSan header file.
14 // - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
15 // function-scope locals)
16 // - All functions/classes/etc reside in namespace __tsan, except for those
17 // declared in tsan_interface.h.
18 // - Platform-specific files should be used instead of ifdefs (*).
19 // - No system headers included in header files (*).
20 // - Platform specific headres included only into platform-specific files (*).
22 // (*) Except when inlining is critical for performance.
23 //===----------------------------------------------------------------------===//
28 #include "sanitizer_common/sanitizer_allocator.h"
29 #include "sanitizer_common/sanitizer_allocator_internal.h"
30 #include "sanitizer_common/sanitizer_asm.h"
31 #include "sanitizer_common/sanitizer_common.h"
32 #include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
33 #include "sanitizer_common/sanitizer_libignore.h"
34 #include "sanitizer_common/sanitizer_suppressions.h"
35 #include "sanitizer_common/sanitizer_thread_registry.h"
36 #include "sanitizer_common/sanitizer_vector.h"
37 #include "tsan_clock.h"
38 #include "tsan_defs.h"
39 #include "tsan_flags.h"
40 #include "tsan_mman.h"
41 #include "tsan_sync.h"
42 #include "tsan_trace.h"
43 #include "tsan_report.h"
44 #include "tsan_platform.h"
45 #include "tsan_mutexset.h"
46 #include "tsan_ignoreset.h"
47 #include "tsan_stack_trace.h"
49 #if SANITIZER_WORDSIZE != 64
50 # error "ThreadSanitizer is supported only on 64-bit platforms"
56 struct MapUnmapCallback;
57 #if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__)
60 static const uptr kSpaceBeg = 0;
61 static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
62 static const uptr kMetadataSize = 0;
63 typedef __sanitizer::CompactSizeClassMap SizeClassMap;
64 static const uptr kRegionSizeLog = 20;
65 using AddressSpaceView = LocalAddressSpaceView;
66 typedef __tsan::MapUnmapCallback MapUnmapCallback;
67 static const uptr kFlags = 0;
69 typedef SizeClassAllocator32<AP32> PrimaryAllocator;
71 struct AP64 { // Allocator64 parameters. Deliberately using a short name.
72 static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
73 static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg;
74 static const uptr kMetadataSize = 0;
75 typedef DefaultSizeClassMap SizeClassMap;
76 typedef __tsan::MapUnmapCallback MapUnmapCallback;
77 static const uptr kFlags = 0;
78 using AddressSpaceView = LocalAddressSpaceView;
80 typedef SizeClassAllocator64<AP64> PrimaryAllocator;
82 typedef CombinedAllocator<PrimaryAllocator> Allocator;
83 typedef Allocator::AllocatorCache AllocatorCache;
84 Allocator *allocator();
87 void TsanCheckFailed(const char *file, int line, const char *cond,
90 const u64 kShadowRodata = (u64)-1; // .rodata shadow marker
92 // FastState (from most significant bit):
100 FastState(u64 tid, u64 epoch) {
101 x_ = tid << kTidShift;
103 DCHECK_EQ(tid, this->tid());
104 DCHECK_EQ(epoch, this->epoch());
105 DCHECK_EQ(GetIgnoreBit(), false);
108 explicit FastState(u64 x)
117 u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
121 u64 TidWithIgnore() const {
122 u64 res = x_ >> kTidShift;
127 u64 res = x_ & ((1ull << kClkBits) - 1);
131 void IncrementEpoch() {
132 u64 old_epoch = epoch();
134 DCHECK_EQ(old_epoch + 1, epoch());
138 void SetIgnoreBit() { x_ |= kIgnoreBit; }
139 void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
140 bool GetIgnoreBit() const { return (s64)x_ < 0; }
142 void SetHistorySize(int hs) {
145 x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
149 int GetHistorySize() const {
150 return (int)((x_ >> kHistoryShift) & kHistoryMask);
153 void ClearHistorySize() {
158 u64 GetTracePos() const {
159 const int hs = GetHistorySize();
160 // When hs == 0, the trace consists of 2 parts.
161 const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
162 return epoch() & mask;
167 static const int kTidShift = 64 - kTidBits - 1;
168 static const u64 kIgnoreBit = 1ull << 63;
169 static const u64 kFreedBit = 1ull << 63;
170 static const u64 kHistoryShift = kClkBits;
171 static const u64 kHistoryMask = 7;
175 // Shadow (from most significant bit):
183 class Shadow : public FastState {
185 explicit Shadow(u64 x)
189 explicit Shadow(const FastState &s)
194 void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
195 DCHECK_EQ((x_ >> kClkBits) & 31, 0);
197 DCHECK_LE(kAccessSizeLog, 3);
198 x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
199 DCHECK_EQ(kAccessSizeLog, size_log());
200 DCHECK_EQ(addr0, this->addr0());
203 void SetWrite(unsigned kAccessIsWrite) {
204 DCHECK_EQ(x_ & kReadBit, 0);
207 DCHECK_EQ(kAccessIsWrite, IsWrite());
210 void SetAtomic(bool kIsAtomic) {
214 DCHECK_EQ(IsAtomic(), kIsAtomic);
217 bool IsAtomic() const {
218 return x_ & kAtomicBit;
221 bool IsZero() const {
225 static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
226 u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
227 DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
228 return shifted_xor == 0;
232 bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
233 u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
234 return masked_xor == 0;
237 static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
238 unsigned kS2AccessSize) {
240 u64 diff = s1.addr0() - s2.addr0();
241 if ((s64)diff < 0) { // s1.addr0 < s2.addr0 // NOLINT
242 // if (s1.addr0() + size1) > s2.addr0()) return true;
243 if (s1.size() > -diff)
246 // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
247 if (kS2AccessSize > diff)
250 DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
251 DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
255 u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
256 u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
257 bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
258 bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
260 // The idea behind the freed bit is as follows.
261 // When the memory is freed (or otherwise unaccessible) we write to the shadow
262 // values with tid/epoch related to the free and the freed bit set.
263 // During memory accesses processing the freed bit is considered
264 // as msb of tid. So any access races with shadow with freed bit set
265 // (it is as if write from a thread with which we never synchronized before).
266 // This allows us to detect accesses to freed memory w/o additional
267 // overheads in memory access processing and at the same time restore
268 // tid/epoch of free.
273 bool IsFreed() const {
274 return x_ & kFreedBit;
277 bool GetFreedAndReset() {
278 bool res = x_ & kFreedBit;
283 bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
284 bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift)
285 | (u64(kIsAtomic) << kAtomicShift));
286 DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
290 bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
291 bool v = ((x_ >> kReadShift) & 3)
292 <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
293 DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
294 (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
298 bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
299 bool v = ((x_ >> kReadShift) & 3)
300 >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
301 DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
302 (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
307 static const u64 kReadShift = 5 + kClkBits;
308 static const u64 kReadBit = 1ull << kReadShift;
309 static const u64 kAtomicShift = 6 + kClkBits;
310 static const u64 kAtomicBit = 1ull << kAtomicShift;
312 u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
314 static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
315 if (s1.addr0() == s2.addr0()) return true;
316 if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
318 if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
324 struct ThreadSignalContext;
329 bool in_blocking_func;
330 uptr in_signal_handler;
331 uptr *shadow_stack_pos;
334 // A Processor represents a physical thread, or a P for Go.
335 // It is used to store internal resources like allocate cache, and does not
336 // participate in race-detection logic (invisible to end user).
337 // In C++ it is tied to an OS thread just like ThreadState, however ideally
338 // it should be tied to a CPU (this way we will have fewer allocator caches).
339 // In Go it is tied to a P, so there are significantly fewer Processor's than
340 // ThreadState's (which are tied to Gs).
341 // A ThreadState must be wired with a Processor to handle events.
343 ThreadState *thr; // currently wired thread, or nullptr
345 AllocatorCache alloc_cache;
346 InternalAllocatorCache internal_alloc_cache;
348 DenseSlabAllocCache block_cache;
349 DenseSlabAllocCache sync_cache;
350 DenseSlabAllocCache clock_cache;
351 DDPhysicalThread *dd_pt;
355 // ScopedGlobalProcessor temporary setups a global processor for the current
356 // thread, if it does not have one. Intended for interceptors that can run
357 // at the very thread end, when we already destroyed the thread processor.
358 struct ScopedGlobalProcessor {
359 ScopedGlobalProcessor();
360 ~ScopedGlobalProcessor();
364 // This struct is stored in TLS.
366 FastState fast_state;
367 // Synch epoch represents the threads's epoch before the last synchronization
368 // action. It allows to reduce number of shadow state updates.
369 // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
370 // if we are processing write to X from the same thread at epoch=200,
371 // we do nothing, because both writes happen in the same 'synch epoch'.
372 // That is, if another memory access does not race with the former write,
373 // it does not race with the latter as well.
374 // QUESTION: can we can squeeze this into ThreadState::Fast?
375 // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
376 // taken by epoch between synchs.
377 // This way we can save one load from tls.
378 u64 fast_synch_epoch;
379 // Technically `current` should be a separate THREADLOCAL variable;
380 // but it is placed here in order to share cache line with previous fields.
381 ThreadState* current;
382 // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
383 // We do not distinguish beteween ignoring reads and writes
384 // for better performance.
385 int ignore_reads_and_writes;
387 int suppress_reports;
388 // Go does not support ignores.
390 IgnoreSet mop_ignore_set;
391 IgnoreSet sync_ignore_set;
393 // C/C++ uses fixed size shadow stack embed into Trace.
394 // Go uses malloc-allocated shadow stack with dynamic size.
396 uptr *shadow_stack_end;
397 uptr *shadow_stack_pos;
398 u64 *racy_shadow_addr;
403 Vector<JmpBuf> jmp_bufs;
404 int ignore_interceptors;
406 #if TSAN_COLLECT_STATS
423 #if SANITIZER_DEBUG && !SANITIZER_GO
424 InternalDeadlockDetector internal_deadlock_detector;
426 DDLogicalThread *dd_lt;
428 // Current wired Processor, or nullptr. Required to handle any events.
431 Processor *proc() { return proc1; }
436 atomic_uintptr_t in_signal_handler;
437 ThreadSignalContext *signal_ctx;
440 u32 last_sleep_stack_id;
441 ThreadClock last_sleep_clock;
444 // Set in regions of runtime that must be signal-safe and fork-safe.
445 // If set, malloc must not be called.
448 const ReportDesc *current_report;
450 explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
451 unsigned reuse_count,
452 uptr stk_addr, uptr stk_size,
453 uptr tls_addr, uptr tls_size);
457 #if SANITIZER_MAC || SANITIZER_ANDROID
458 ThreadState *cur_thread();
459 void set_cur_thread(ThreadState *thr);
460 void cur_thread_finalize();
461 INLINE void cur_thread_init() { }
463 __attribute__((tls_model("initial-exec")))
464 extern THREADLOCAL char cur_thread_placeholder[];
465 INLINE ThreadState *cur_thread() {
466 return reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current;
468 INLINE void cur_thread_init() {
469 ThreadState *thr = reinterpret_cast<ThreadState *>(cur_thread_placeholder);
470 if (UNLIKELY(!thr->current))
473 INLINE void set_cur_thread(ThreadState *thr) {
474 reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr;
476 INLINE void cur_thread_finalize() { }
477 #endif // SANITIZER_MAC || SANITIZER_ANDROID
478 #endif // SANITIZER_GO
480 class ThreadContext : public ThreadContextBase {
482 explicit ThreadContext(int tid);
485 u32 creation_stack_id;
487 // Epoch at which the thread had started.
488 // If we see an event from the thread stamped by an older epoch,
489 // the event is from a dead thread that shared tid with this thread.
493 // Override superclass callbacks.
494 void OnDead() override;
495 void OnJoined(void *arg) override;
496 void OnFinished() override;
497 void OnStarted(void *arg) override;
498 void OnCreated(void *arg) override;
499 void OnReset() override;
500 void OnDetached(void *arg) override;
505 bool operator==(const RacyStacks &other) const {
506 if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
508 if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
519 struct FiredSuppression {
530 bool after_multithreaded_fork;
537 int nmissed_expected;
538 atomic_uint64_t last_symbolize_time_ns;
540 void *background_thread;
541 atomic_uint32_t stop_background_thread;
543 ThreadRegistry *thread_registry;
546 Vector<RacyStacks> racy_stacks;
547 Vector<RacyAddress> racy_addresses;
548 // Number of fired suppressions may be large enough.
549 Mutex fired_suppressions_mtx;
550 InternalMmapVector<FiredSuppression> fired_suppressions;
553 ClockAlloc clock_alloc;
558 u64 int_alloc_cnt[MBlockTypeCount];
559 u64 int_alloc_siz[MBlockTypeCount];
562 extern Context *ctx; // The one and the only global runtime context.
564 ALWAYS_INLINE Flags *flags() {
568 struct ScopedIgnoreInterceptors {
569 ScopedIgnoreInterceptors() {
571 cur_thread()->ignore_interceptors++;
575 ~ScopedIgnoreInterceptors() {
577 cur_thread()->ignore_interceptors--;
582 const char *GetObjectTypeFromTag(uptr tag);
583 const char *GetReportHeaderFromTag(uptr tag);
584 uptr TagFromShadowStackFrame(uptr pc);
586 class ScopedReportBase {
588 void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, StackTrace stack,
589 const MutexSet *mset);
590 void AddStack(StackTrace stack, bool suppressable = false);
591 void AddThread(const ThreadContext *tctx, bool suppressable = false);
592 void AddThread(int unique_tid, bool suppressable = false);
593 void AddUniqueTid(int unique_tid);
594 void AddMutex(const SyncVar *s);
595 u64 AddMutex(u64 id);
596 void AddLocation(uptr addr, uptr size);
597 void AddSleep(u32 stack_id);
598 void SetCount(int count);
600 const ReportDesc *GetReport() const;
603 ScopedReportBase(ReportType typ, uptr tag);
608 // Symbolizer makes lots of intercepted calls. If we try to process them,
609 // at best it will cause deadlocks on internal mutexes.
610 ScopedIgnoreInterceptors ignore_interceptors_;
612 void AddDeadMutex(u64 id);
614 ScopedReportBase(const ScopedReportBase &) = delete;
615 void operator=(const ScopedReportBase &) = delete;
618 class ScopedReport : public ScopedReportBase {
620 explicit ScopedReport(ReportType typ, uptr tag = kExternalTagNone);
624 ScopedErrorReportLock lock_;
627 ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
628 void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
629 MutexSet *mset, uptr *tag = nullptr);
631 // The stack could look like:
632 // <start> | <main> | <foo> | tag | <bar>
633 // This will extract the tag and keep:
634 // <start> | <main> | <foo> | <bar>
635 template<typename StackTraceTy>
636 void ExtractTagFromStack(StackTraceTy *stack, uptr *tag = nullptr) {
637 if (stack->size < 2) return;
638 uptr possible_tag_pc = stack->trace[stack->size - 2];
639 uptr possible_tag = TagFromShadowStackFrame(possible_tag_pc);
640 if (possible_tag == kExternalTagNone) return;
641 stack->trace_buffer[stack->size - 2] = stack->trace_buffer[stack->size - 1];
643 if (tag) *tag = possible_tag;
646 template<typename StackTraceTy>
647 void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack,
648 uptr *tag = nullptr) {
649 uptr size = thr->shadow_stack_pos - thr->shadow_stack;
651 if (size + !!toppc > kStackTraceMax) {
652 start = size + !!toppc - kStackTraceMax;
653 size = kStackTraceMax - !!toppc;
655 stack->Init(&thr->shadow_stack[start], size, toppc);
656 ExtractTagFromStack(stack, tag);
659 #define GET_STACK_TRACE_FATAL(thr, pc) \
660 VarSizeStackTrace stack; \
661 ObtainCurrentStack(thr, pc, &stack); \
662 stack.ReverseOrder();
664 #if TSAN_COLLECT_STATS
665 void StatAggregate(u64 *dst, u64 *src);
666 void StatOutput(u64 *stat);
669 void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
670 #if TSAN_COLLECT_STATS
674 void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) {
675 #if TSAN_COLLECT_STATS
680 void MapShadow(uptr addr, uptr size);
681 void MapThreadTrace(uptr addr, uptr size, const char *name);
682 void DontNeedShadowFor(uptr addr, uptr size);
683 void InitializeShadowMemory();
684 void InitializeInterceptors();
685 void InitializeLibIgnore();
686 void InitializeDynamicAnnotations();
688 void ForkBefore(ThreadState *thr, uptr pc);
689 void ForkParentAfter(ThreadState *thr, uptr pc);
690 void ForkChildAfter(ThreadState *thr, uptr pc);
692 void ReportRace(ThreadState *thr);
693 bool OutputReport(ThreadState *thr, const ScopedReport &srep);
694 bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
695 bool IsExpectedReport(uptr addr, uptr size);
696 void PrintMatchedBenignRaces();
698 #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
699 # define DPrintf Printf
701 # define DPrintf(...)
704 #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
705 # define DPrintf2 Printf
707 # define DPrintf2(...)
710 u32 CurrentStackId(ThreadState *thr, uptr pc);
711 ReportStack *SymbolizeStackId(u32 stack_id);
712 void PrintCurrentStack(ThreadState *thr, uptr pc);
713 void PrintCurrentStackSlow(uptr pc); // uses libunwind
715 void Initialize(ThreadState *thr);
716 void MaybeSpawnBackgroundThread();
717 int Finalize(ThreadState *thr);
719 void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
720 void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
722 void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
723 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
724 void MemoryAccessImpl(ThreadState *thr, uptr addr,
725 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
726 u64 *shadow_mem, Shadow cur);
727 void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
728 uptr size, bool is_write);
729 void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr,
730 uptr size, uptr step, bool is_write);
731 void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
732 int size, bool kAccessIsWrite, bool kIsAtomic);
734 const int kSizeLog1 = 0;
735 const int kSizeLog2 = 1;
736 const int kSizeLog4 = 2;
737 const int kSizeLog8 = 3;
739 void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc,
740 uptr addr, int kAccessSizeLog) {
741 MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false);
744 void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc,
745 uptr addr, int kAccessSizeLog) {
746 MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false);
749 void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc,
750 uptr addr, int kAccessSizeLog) {
751 MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true);
754 void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc,
755 uptr addr, int kAccessSizeLog) {
756 MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true);
759 void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
760 void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
761 void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
763 void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack = true);
764 void ThreadIgnoreEnd(ThreadState *thr, uptr pc);
765 void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack = true);
766 void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc);
768 void FuncEntry(ThreadState *thr, uptr pc);
769 void FuncExit(ThreadState *thr);
771 int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
772 void ThreadStart(ThreadState *thr, int tid, tid_t os_id,
773 ThreadType thread_type);
774 void ThreadFinish(ThreadState *thr);
775 int ThreadTid(ThreadState *thr, uptr pc, uptr uid);
776 void ThreadJoin(ThreadState *thr, uptr pc, int tid);
777 void ThreadDetach(ThreadState *thr, uptr pc, int tid);
778 void ThreadFinalize(ThreadState *thr);
779 void ThreadSetName(ThreadState *thr, const char *name);
780 int ThreadCount(ThreadState *thr);
781 void ProcessPendingSignals(ThreadState *thr);
782 void ThreadNotJoined(ThreadState *thr, uptr pc, int tid, uptr uid);
784 Processor *ProcCreate();
785 void ProcDestroy(Processor *proc);
786 void ProcWire(Processor *proc, ThreadState *thr);
787 void ProcUnwire(Processor *proc, ThreadState *thr);
789 // Note: the parameter is called flagz, because flags is already taken
790 // by the global function that returns flags.
791 void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
792 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
793 void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
794 void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0,
796 int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
797 void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
798 void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
799 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
800 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
801 void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD
802 void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr);
804 void Acquire(ThreadState *thr, uptr pc, uptr addr);
805 // AcquireGlobal synchronizes the current thread with all other threads.
806 // In terms of happens-before relation, it draws a HB edge from all threads
807 // (where they happen to execute right now) to the current thread. We use it to
808 // handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal
809 // right before executing finalizers. This provides a coarse, but simple
810 // approximation of the actual required synchronization.
811 void AcquireGlobal(ThreadState *thr, uptr pc);
812 void Release(ThreadState *thr, uptr pc, uptr addr);
813 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
814 void AfterSleep(ThreadState *thr, uptr pc);
815 void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
816 void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
817 void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c);
818 void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
820 // The hacky call uses custom calling convention and an assembly thunk.
821 // It is considerably faster that a normal call for the caller
822 // if it is not executed (it is intended for slow paths from hot functions).
823 // The trick is that the call preserves all registers and the compiler
824 // does not treat it as a call.
825 // If it does not work for you, use normal call.
826 #if !SANITIZER_DEBUG && defined(__x86_64__) && !SANITIZER_MAC
827 // The caller may not create the stack frame for itself at all,
828 // so we create a reserve stack frame for it (1024b must be enough).
829 #define HACKY_CALL(f) \
830 __asm__ __volatile__("sub $1024, %%rsp;" \
831 CFI_INL_ADJUST_CFA_OFFSET(1024) \
832 ".hidden " #f "_thunk;" \
833 "call " #f "_thunk;" \
834 "add $1024, %%rsp;" \
835 CFI_INL_ADJUST_CFA_OFFSET(-1024) \
838 #define HACKY_CALL(f) f()
841 void TraceSwitch(ThreadState *thr);
842 uptr TraceTopPC(ThreadState *thr);
845 Trace *ThreadTrace(int tid);
847 extern "C" void __tsan_trace_switch();
848 void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
849 EventType typ, u64 addr) {
850 if (!kCollectHistory)
852 DCHECK_GE((int)typ, 0);
853 DCHECK_LE((int)typ, 7);
854 DCHECK_EQ(GetLsb(addr, kEventPCBits), addr);
855 StatInc(thr, StatEvents);
856 u64 pos = fs.GetTracePos();
857 if (UNLIKELY((pos % kTracePartSize) == 0)) {
859 HACKY_CALL(__tsan_trace_switch);
864 Event *trace = (Event*)GetThreadTrace(fs.tid());
865 Event *evp = &trace[pos];
866 Event ev = (u64)addr | ((u64)typ << kEventPCBits);
871 uptr ALWAYS_INLINE HeapEnd() {
872 return HeapMemEnd() + PrimaryAllocator::AdditionalSize();
876 ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags);
877 void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber);
878 void FiberSwitch(ThreadState *thr, uptr pc, ThreadState *fiber, unsigned flags);
880 // These need to match __tsan_switch_to_fiber_* flags defined in
881 // tsan_interface.h. See documentation there as well.
882 enum FiberSwitchFlags {
883 FiberSwitchFlagNoSync = 1 << 0, // __tsan_switch_to_fiber_no_sync
886 } // namespace __tsan