1 //===-- tsan_rtl.cc -------------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 // Main file (entry points) for the TSan run-time.
13 //===----------------------------------------------------------------------===//
15 #include "sanitizer_common/sanitizer_atomic.h"
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_libc.h"
18 #include "sanitizer_common/sanitizer_stackdepot.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_symbolizer.h"
21 #include "tsan_defs.h"
22 #include "tsan_platform.h"
24 #include "tsan_mman.h"
25 #include "tsan_suppressions.h"
26 #include "tsan_symbolize.h"
29 // <emmintrin.h> transitively includes <stdlib.h>,
30 // and it's prohibited to include std headers into tsan runtime.
31 // So we do this dirty trick.
32 #define _MM_MALLOC_H_INCLUDED
34 #include <emmintrin.h>
38 volatile int __tsan_resumed = 0;
40 extern "C" void __tsan_resume() {
47 THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
49 static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
52 // Can be overriden by a front-end.
53 #ifdef TSAN_EXTERNAL_HOOKS
54 bool OnFinalize(bool failed);
57 SANITIZER_INTERFACE_ATTRIBUTE
58 bool WEAK OnFinalize(bool failed) {
61 SANITIZER_INTERFACE_ATTRIBUTE
62 void WEAK OnInitialize() {}
65 static char thread_registry_placeholder[sizeof(ThreadRegistry)];
67 static ThreadContextBase *CreateThreadContext(u32 tid) {
68 // Map thread trace when context is created.
69 MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event));
70 MapThreadTrace(GetThreadTraceHeader(tid), sizeof(Trace));
71 new(ThreadTrace(tid)) Trace();
72 void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
73 return new(mem) ThreadContext(tid);
77 static const u32 kThreadQuarantineSize = 16;
79 static const u32 kThreadQuarantineSize = 64;
84 , report_mtx(MutexTypeReport, StatMtxReport)
87 , thread_registry(new(thread_registry_placeholder) ThreadRegistry(
88 CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse))
89 , racy_stacks(MBlockRacyStacks)
90 , racy_addresses(MBlockRacyAddresses)
91 , fired_suppressions(8) {
94 // The objects are allocated in TLS, so one may rely on zero-initialization.
95 ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
97 uptr stk_addr, uptr stk_size,
98 uptr tls_addr, uptr tls_size)
99 : fast_state(tid, epoch)
100 // Do not touch these, rely on zero initialization,
101 // they may be accessed before the ctor.
102 // , ignore_reads_and_writes()
103 // , ignore_interceptors()
104 , clock(tid, reuse_count)
106 , jmp_bufs(MBlockJmpBuf)
109 , unique_id(unique_id)
115 , last_sleep_clock(tid)
120 static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
122 uptr n_running_threads;
123 ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads);
124 InternalScopedBuffer<char> buf(4096);
125 WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads);
126 internal_write(fd, buf.data(), internal_strlen(buf.data()));
129 static void BackgroundThread(void *arg) {
131 // This is a non-initialized non-user thread, nothing to see here.
132 // We don't use ScopedIgnoreInterceptors, because we want ignores to be
133 // enabled even when the thread function exits (e.g. during pthread thread
135 cur_thread()->ignore_interceptors++;
137 const u64 kMs2Ns = 1000 * 1000;
139 fd_t mprof_fd = kInvalidFd;
140 if (flags()->profile_memory && flags()->profile_memory[0]) {
141 if (internal_strcmp(flags()->profile_memory, "stdout") == 0) {
143 } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) {
146 InternalScopedString filename(kMaxPathLength);
147 filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid());
148 uptr openrv = OpenFile(filename.data(), true);
149 if (internal_iserror(openrv)) {
150 Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
158 u64 last_flush = NanoTime();
161 atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0;
164 u64 now = NanoTime();
166 // Flush memory if requested.
167 if (flags()->flush_memory_ms > 0) {
168 if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
169 VPrintf(1, "ThreadSanitizer: periodic memory flush\n");
171 last_flush = NanoTime();
174 // GetRSS can be expensive on huge programs, so don't do it every 100ms.
175 if (flags()->memory_limit_mb > 0) {
177 uptr limit = uptr(flags()->memory_limit_mb) << 20;
178 VPrintf(1, "ThreadSanitizer: memory flush check"
179 " RSS=%llu LAST=%llu LIMIT=%llu\n",
180 (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
181 if (2 * rss > limit + last_rss) {
182 VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n");
185 VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
190 // Write memory profile if requested.
191 if (mprof_fd != kInvalidFd)
192 MemoryProfiler(ctx, mprof_fd, i);
195 // Flush symbolizer cache if requested.
196 if (flags()->flush_symbolizer_ms > 0) {
197 u64 last = atomic_load(&ctx->last_symbolize_time_ns,
198 memory_order_relaxed);
199 if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
200 Lock l(&ctx->report_mtx);
201 SpinMutexLock l2(&CommonSanitizerReportMutex);
203 atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
210 static void StartBackgroundThread() {
211 ctx->background_thread = internal_start_thread(&BackgroundThread, 0);
215 static void StopBackgroundThread() {
216 atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);
217 internal_join_thread(ctx->background_thread);
218 ctx->background_thread = 0;
222 void DontNeedShadowFor(uptr addr, uptr size) {
223 uptr shadow_beg = MemToShadow(addr);
224 uptr shadow_end = MemToShadow(addr + size);
225 FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
228 void MapShadow(uptr addr, uptr size) {
229 // Global data is not 64K aligned, but there are no adjacent mappings,
230 // so we can get away with unaligned mapping.
231 // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
232 MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier);
234 // Meta shadow is 2:1, so tread carefully.
235 static bool data_mapped = false;
236 static uptr mapped_meta_end = 0;
237 uptr meta_begin = (uptr)MemToMeta(addr);
238 uptr meta_end = (uptr)MemToMeta(addr + size);
239 meta_begin = RoundDownTo(meta_begin, 64 << 10);
240 meta_end = RoundUpTo(meta_end, 64 << 10);
242 // First call maps data+bss.
244 MmapFixedNoReserve(meta_begin, meta_end - meta_begin);
246 // Mapping continous heap.
247 // Windows wants 64K alignment.
248 meta_begin = RoundDownTo(meta_begin, 64 << 10);
249 meta_end = RoundUpTo(meta_end, 64 << 10);
250 if (meta_end <= mapped_meta_end)
252 if (meta_begin < mapped_meta_end)
253 meta_begin = mapped_meta_end;
254 MmapFixedNoReserve(meta_begin, meta_end - meta_begin);
255 mapped_meta_end = meta_end;
257 VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n",
258 addr, addr+size, meta_begin, meta_end);
261 void MapThreadTrace(uptr addr, uptr size) {
262 DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
263 CHECK_GE(addr, kTraceMemBeg);
264 CHECK_LE(addr + size, kTraceMemEnd);
265 CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
266 uptr addr1 = (uptr)MmapFixedNoReserve(addr, size);
268 Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p->%p)\n",
274 static void CheckShadowMapping() {
275 for (uptr i = 0; i < ARRAY_SIZE(UserRegions); i += 2) {
276 const uptr beg = UserRegions[i];
277 const uptr end = UserRegions[i + 1];
278 VPrintf(3, "checking shadow region %p-%p\n", beg, end);
279 for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) {
280 for (int x = -1; x <= 1; x++) {
281 const uptr p = p0 + x;
282 if (p < beg || p >= end)
284 const uptr s = MemToShadow(p);
285 VPrintf(3, " checking pointer %p -> %p\n", p, s);
287 CHECK(IsShadowMem(s));
288 CHECK_EQ(p & ~(kShadowCell - 1), ShadowToMem(s));
289 const uptr m = (uptr)MemToMeta(p);
296 void Initialize(ThreadState *thr) {
297 // Thread safe because done before all threads exist.
298 static bool is_initialized = false;
301 is_initialized = true;
302 // We are not ready to handle interceptors yet.
303 ScopedIgnoreInterceptors ignore;
304 SanitizerToolName = "ThreadSanitizer";
305 // Install tool-specific callbacks in sanitizer_common.
306 SetCheckFailedCallback(TsanCheckFailed);
308 ctx = new(ctx_placeholder) Context;
309 const char *options = GetEnv(kTsanOptionsEnv);
310 InitializeFlags(&ctx->flags, options);
312 InitializeAllocator();
314 InitializeInterceptors();
315 CheckShadowMapping();
316 InitializePlatform();
318 InitializeDynamicAnnotations();
320 InitializeShadowMemory();
322 // Setup correct file descriptor for error reports.
323 __sanitizer_set_report_path(common_flags()->log_path);
324 InitializeSuppressions();
326 InitializeLibIgnore();
327 Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
329 StartBackgroundThread();
331 SetSandboxingCallback(StopBackgroundThread);
333 if (common_flags()->detect_deadlocks)
334 ctx->dd = DDetector::Create(flags());
336 VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
337 (int)internal_getpid());
339 // Initialize thread 0.
340 int tid = ThreadCreate(thr, 0, 0, true);
342 ThreadStart(thr, tid, internal_getpid());
343 ctx->initialized = true;
345 if (flags()->stop_on_start) {
346 Printf("ThreadSanitizer is suspended at startup (pid %d)."
347 " Call __tsan_resume().\n",
348 (int)internal_getpid());
349 while (__tsan_resumed == 0) {}
355 int Finalize(ThreadState *thr) {
358 if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
359 SleepForMillis(flags()->atexit_sleep_ms);
361 // Wait for pending reports.
362 ctx->report_mtx.Lock();
363 CommonSanitizerReportMutex.Lock();
364 CommonSanitizerReportMutex.Unlock();
365 ctx->report_mtx.Unlock();
368 if (common_flags()->verbosity)
369 AllocatorPrintStats();
374 if (ctx->nreported) {
377 Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
379 Printf("Found %d data race(s)\n", ctx->nreported);
383 if (ctx->nmissed_expected) {
385 Printf("ThreadSanitizer: missed %d expected races\n",
386 ctx->nmissed_expected);
389 if (common_flags()->print_suppressions)
390 PrintMatchedSuppressions();
392 if (flags()->print_benign)
393 PrintMatchedBenignRaces();
396 failed = OnFinalize(failed);
398 StatAggregate(ctx->stat, thr->stat);
399 StatOutput(ctx->stat);
400 return failed ? flags()->exitcode : 0;
404 void ForkBefore(ThreadState *thr, uptr pc) {
405 ctx->thread_registry->Lock();
406 ctx->report_mtx.Lock();
409 void ForkParentAfter(ThreadState *thr, uptr pc) {
410 ctx->report_mtx.Unlock();
411 ctx->thread_registry->Unlock();
414 void ForkChildAfter(ThreadState *thr, uptr pc) {
415 ctx->report_mtx.Unlock();
416 ctx->thread_registry->Unlock();
419 ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread /* alive threads */);
420 VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
421 " parent had %d threads\n", (int)internal_getpid(), (int)nthread);
423 internal_start_thread(&BackgroundThread, 0);
425 // We've just forked a multi-threaded process. We cannot reasonably function
426 // after that (some mutexes may be locked before fork). So just enable
427 // ignores for everything in the hope that we will exec soon.
428 ctx->after_multithreaded_fork = true;
429 thr->ignore_interceptors++;
430 ThreadIgnoreBegin(thr, pc);
431 ThreadIgnoreSyncBegin(thr, pc);
438 void GrowShadowStack(ThreadState *thr) {
439 const int sz = thr->shadow_stack_end - thr->shadow_stack;
440 const int newsz = 2 * sz;
441 uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
442 newsz * sizeof(uptr));
443 internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
444 internal_free(thr->shadow_stack);
445 thr->shadow_stack = newstack;
446 thr->shadow_stack_pos = newstack + sz;
447 thr->shadow_stack_end = newstack + newsz;
451 u32 CurrentStackId(ThreadState *thr, uptr pc) {
452 if (thr->shadow_stack_pos == 0) // May happen during bootstrap.
456 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
458 if (thr->shadow_stack_pos == thr->shadow_stack_end)
459 GrowShadowStack(thr);
461 thr->shadow_stack_pos[0] = pc;
462 thr->shadow_stack_pos++;
464 u32 id = StackDepotPut(
465 StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
467 thr->shadow_stack_pos--;
471 void TraceSwitch(ThreadState *thr) {
473 Trace *thr_trace = ThreadTrace(thr->tid);
474 Lock l(&thr_trace->mtx);
475 unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
476 TraceHeader *hdr = &thr_trace->headers[trace];
477 hdr->epoch0 = thr->fast_state.epoch();
478 ObtainCurrentStack(thr, 0, &hdr->stack0);
479 hdr->mset0 = thr->mset;
483 Trace *ThreadTrace(int tid) {
484 return (Trace*)GetThreadTraceHeader(tid);
487 uptr TraceTopPC(ThreadState *thr) {
488 Event *events = (Event*)GetThreadTrace(thr->tid);
489 uptr pc = events[thr->fast_state.GetTracePos()];
494 return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
498 return TraceSize() / kTracePartSize;
502 extern "C" void __tsan_trace_switch() {
503 TraceSwitch(cur_thread());
506 extern "C" void __tsan_report_race() {
507 ReportRace(cur_thread());
512 Shadow LoadShadow(u64 *p) {
513 u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
518 void StoreShadow(u64 *sp, u64 s) {
519 atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
523 void StoreIfNotYetStored(u64 *sp, u64 *s) {
529 void HandleRace(ThreadState *thr, u64 *shadow_mem,
530 Shadow cur, Shadow old) {
531 thr->racy_state[0] = cur.raw();
532 thr->racy_state[1] = old.raw();
533 thr->racy_shadow_addr = shadow_mem;
535 HACKY_CALL(__tsan_report_race);
541 static inline bool HappensBefore(Shadow old, ThreadState *thr) {
542 return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
546 void MemoryAccessImpl1(ThreadState *thr, uptr addr,
547 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
548 u64 *shadow_mem, Shadow cur) {
549 StatInc(thr, StatMop);
550 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
551 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
553 // This potentially can live in an MMX/SSE scratch register.
554 // The required intrinsics are:
555 // __m128i _mm_move_epi64(__m128i*);
556 // _mm_storel_epi64(u64*, __m128i);
557 u64 store_word = cur.raw();
559 // scan all the shadow values and dispatch to 4 categories:
560 // same, replace, candidate and race (see comments below).
561 // we consider only 3 cases regarding access sizes:
562 // equal, intersect and not intersect. initially I considered
563 // larger and smaller as well, it allowed to replace some
564 // 'candidates' with 'same' or 'replace', but I think
565 // it's just not worth it (performance- and complexity-wise).
568 if (kShadowCnt == 1) {
570 #include "tsan_update_shadow_word_inl.h"
571 } else if (kShadowCnt == 2) {
573 #include "tsan_update_shadow_word_inl.h"
575 #include "tsan_update_shadow_word_inl.h"
576 } else if (kShadowCnt == 4) {
578 #include "tsan_update_shadow_word_inl.h"
580 #include "tsan_update_shadow_word_inl.h"
582 #include "tsan_update_shadow_word_inl.h"
584 #include "tsan_update_shadow_word_inl.h"
585 } else if (kShadowCnt == 8) {
587 #include "tsan_update_shadow_word_inl.h"
589 #include "tsan_update_shadow_word_inl.h"
591 #include "tsan_update_shadow_word_inl.h"
593 #include "tsan_update_shadow_word_inl.h"
595 #include "tsan_update_shadow_word_inl.h"
597 #include "tsan_update_shadow_word_inl.h"
599 #include "tsan_update_shadow_word_inl.h"
601 #include "tsan_update_shadow_word_inl.h"
606 // we did not find any races and had already stored
607 // the current access info, so we are done
608 if (LIKELY(store_word == 0))
610 // choose a random candidate slot and replace it
611 StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
612 StatInc(thr, StatShadowReplace);
615 HandleRace(thr, shadow_mem, cur, old);
619 void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
620 int size, bool kAccessIsWrite, bool kIsAtomic) {
623 int kAccessSizeLog = kSizeLog1;
624 if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) {
626 kAccessSizeLog = kSizeLog8;
627 } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) {
629 kAccessSizeLog = kSizeLog4;
630 } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) {
632 kAccessSizeLog = kSizeLog2;
634 MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic);
641 bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
643 for (uptr i = 0; i < kShadowCnt; i++) {
644 Shadow old(LoadShadow(&s[i]));
645 if (Shadow::Addr0AndSizeAreEqual(cur, old) &&
646 old.TidWithIgnore() == cur.TidWithIgnore() &&
647 old.epoch() > sync_epoch &&
648 old.IsAtomic() == cur.IsAtomic() &&
649 old.IsRead() <= cur.IsRead())
655 #if defined(__SSE3__) && TSAN_SHADOW_COUNT == 4
656 #define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \
657 _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \
658 (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))
660 bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
661 // This is an optimized version of ContainsSameAccessSlow.
662 // load current access into access[0:63]
663 const m128 access = _mm_cvtsi64_si128(a);
664 // duplicate high part of access in addr0:
665 // addr0[0:31] = access[32:63]
666 // addr0[32:63] = access[32:63]
667 // addr0[64:95] = access[32:63]
668 // addr0[96:127] = access[32:63]
669 const m128 addr0 = SHUF(access, access, 1, 1, 1, 1);
670 // load 4 shadow slots
671 const m128 shadow0 = _mm_load_si128((__m128i*)s);
672 const m128 shadow1 = _mm_load_si128((__m128i*)s + 1);
673 // load high parts of 4 shadow slots into addr_vect:
674 // addr_vect[0:31] = shadow0[32:63]
675 // addr_vect[32:63] = shadow0[96:127]
676 // addr_vect[64:95] = shadow1[32:63]
677 // addr_vect[96:127] = shadow1[96:127]
678 m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3);
680 // set IsRead bit in addr_vect
681 const m128 rw_mask1 = _mm_cvtsi64_si128(1<<15);
682 const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0);
683 addr_vect = _mm_or_si128(addr_vect, rw_mask);
685 // addr0 == addr_vect?
686 const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect);
687 // epoch1[0:63] = sync_epoch
688 const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch);
689 // epoch[0:31] = sync_epoch[0:31]
690 // epoch[32:63] = sync_epoch[0:31]
691 // epoch[64:95] = sync_epoch[0:31]
692 // epoch[96:127] = sync_epoch[0:31]
693 const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0);
694 // load low parts of shadow cell epochs into epoch_vect:
695 // epoch_vect[0:31] = shadow0[0:31]
696 // epoch_vect[32:63] = shadow0[64:95]
697 // epoch_vect[64:95] = shadow1[0:31]
698 // epoch_vect[96:127] = shadow1[64:95]
699 const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2);
700 // epoch_vect >= sync_epoch?
701 const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch);
702 // addr_res & epoch_res
703 const m128 res = _mm_and_si128(addr_res, epoch_res);
707 // mask[15] = res[127]
708 const int mask = _mm_movemask_epi8(res);
714 bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
715 #if defined(__SSE3__) && TSAN_SHADOW_COUNT == 4
716 bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write);
717 // NOTE: this check can fail if the shadow is concurrently mutated
719 DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
722 return ContainsSameAccessSlow(s, a, sync_epoch, is_write);
727 void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
728 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
729 u64 *shadow_mem = (u64*)MemToShadow(addr);
730 DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
731 " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
732 (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
733 (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
734 (uptr)shadow_mem[0], (uptr)shadow_mem[1],
735 (uptr)shadow_mem[2], (uptr)shadow_mem[3]);
737 if (!IsAppMem(addr)) {
738 Printf("Access to non app mem %zx\n", addr);
739 DCHECK(IsAppMem(addr));
741 if (!IsShadowMem((uptr)shadow_mem)) {
742 Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
743 DCHECK(IsShadowMem((uptr)shadow_mem));
747 if (kCppMode && *shadow_mem == kShadowRodata) {
748 // Access to .rodata section, no races here.
749 // Measurements show that it can be 10-20% of all memory accesses.
750 StatInc(thr, StatMop);
751 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
752 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
753 StatInc(thr, StatMopRodata);
757 FastState fast_state = thr->fast_state;
758 if (fast_state.GetIgnoreBit()) {
759 StatInc(thr, StatMop);
760 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
761 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
762 StatInc(thr, StatMopIgnored);
766 Shadow cur(fast_state);
767 cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
768 cur.SetWrite(kAccessIsWrite);
769 cur.SetAtomic(kIsAtomic);
771 if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
772 thr->fast_synch_epoch, kAccessIsWrite))) {
773 StatInc(thr, StatMop);
774 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
775 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
776 StatInc(thr, StatMopSame);
780 if (kCollectHistory) {
781 fast_state.IncrementEpoch();
782 thr->fast_state = fast_state;
783 TraceAddEvent(thr, fast_state, EventTypeMop, pc);
784 cur.IncrementEpoch();
787 MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
791 // Called by MemoryAccessRange in tsan_rtl_thread.cc
793 void MemoryAccessImpl(ThreadState *thr, uptr addr,
794 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
795 u64 *shadow_mem, Shadow cur) {
796 if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
797 thr->fast_synch_epoch, kAccessIsWrite))) {
798 StatInc(thr, StatMop);
799 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
800 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
801 StatInc(thr, StatMopSame);
805 MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
809 static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
816 uptr offset = addr % kShadowCell;
818 offset = kShadowCell - offset;
824 DCHECK_EQ(addr % 8, 0);
825 // If a user passes some insane arguments (memset(0)),
826 // let it just crash as usual.
827 if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
829 // Don't want to touch lots of shadow memory.
830 // If a program maps 10MB stack, there is no need reset the whole range.
831 size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
832 // UnmapOrDie/MmapFixedNoReserve does not work on Windows,
833 // so we do it only for C/C++.
834 if (kGoMode || size < common_flags()->clear_shadow_mmap_threshold) {
835 u64 *p = (u64*)MemToShadow(addr);
836 CHECK(IsShadowMem((uptr)p));
837 CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
838 // FIXME: may overwrite a part outside the region
839 for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) {
841 for (uptr j = 1; j < kShadowCnt; j++)
845 // The region is big, reset only beginning and end.
846 const uptr kPageSize = 4096;
847 u64 *begin = (u64*)MemToShadow(addr);
848 u64 *end = begin + size / kShadowCell * kShadowCnt;
850 // Set at least first kPageSize/2 to page boundary.
851 while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) {
853 for (uptr j = 1; j < kShadowCnt; j++)
856 // Reset middle part.
858 p = RoundDown(end, kPageSize);
859 UnmapOrDie((void*)p1, (uptr)p - (uptr)p1);
860 MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1);
864 for (uptr j = 1; j < kShadowCnt; j++)
870 void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
871 MemoryRangeSet(thr, pc, addr, size, 0);
874 void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
875 // Processing more than 1k (4k of shadow) is expensive,
876 // can cause excessive memory consumption (user does not necessary touch
877 // the whole range) and most likely unnecessary.
880 CHECK_EQ(thr->is_freeing, false);
881 thr->is_freeing = true;
882 MemoryAccessRange(thr, pc, addr, size, true);
883 thr->is_freeing = false;
884 if (kCollectHistory) {
885 thr->fast_state.IncrementEpoch();
886 TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
888 Shadow s(thr->fast_state);
892 s.SetAddr0AndSizeLog(0, 3);
893 MemoryRangeSet(thr, pc, addr, size, s.raw());
896 void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
897 if (kCollectHistory) {
898 thr->fast_state.IncrementEpoch();
899 TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
901 Shadow s(thr->fast_state);
904 s.SetAddr0AndSizeLog(0, 3);
905 MemoryRangeSet(thr, pc, addr, size, s.raw());
909 void FuncEntry(ThreadState *thr, uptr pc) {
910 StatInc(thr, StatFuncEnter);
911 DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
912 if (kCollectHistory) {
913 thr->fast_state.IncrementEpoch();
914 TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
917 // Shadow stack maintenance can be replaced with
918 // stack unwinding during trace switch (which presumably must be faster).
919 DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
921 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
923 if (thr->shadow_stack_pos == thr->shadow_stack_end)
924 GrowShadowStack(thr);
926 thr->shadow_stack_pos[0] = pc;
927 thr->shadow_stack_pos++;
931 void FuncExit(ThreadState *thr) {
932 StatInc(thr, StatFuncExit);
933 DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
934 if (kCollectHistory) {
935 thr->fast_state.IncrementEpoch();
936 TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
939 DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
941 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
943 thr->shadow_stack_pos--;
946 void ThreadIgnoreBegin(ThreadState *thr, uptr pc) {
947 DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
948 thr->ignore_reads_and_writes++;
949 CHECK_GT(thr->ignore_reads_and_writes, 0);
950 thr->fast_state.SetIgnoreBit();
952 if (!ctx->after_multithreaded_fork)
953 thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
957 void ThreadIgnoreEnd(ThreadState *thr, uptr pc) {
958 DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
959 thr->ignore_reads_and_writes--;
960 CHECK_GE(thr->ignore_reads_and_writes, 0);
961 if (thr->ignore_reads_and_writes == 0) {
962 thr->fast_state.ClearIgnoreBit();
964 thr->mop_ignore_set.Reset();
969 void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
970 DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
972 CHECK_GT(thr->ignore_sync, 0);
974 if (!ctx->after_multithreaded_fork)
975 thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
979 void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) {
980 DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
982 CHECK_GE(thr->ignore_sync, 0);
984 if (thr->ignore_sync == 0)
985 thr->sync_ignore_set.Reset();
989 bool MD5Hash::operator==(const MD5Hash &other) const {
990 return hash[0] == other.hash[0] && hash[1] == other.hash[1];
994 void build_consistency_debug() {}
996 void build_consistency_release() {}
999 #if TSAN_COLLECT_STATS
1000 void build_consistency_stats() {}
1002 void build_consistency_nostats() {}
1005 #if TSAN_SHADOW_COUNT == 1
1006 void build_consistency_shadow1() {}
1007 #elif TSAN_SHADOW_COUNT == 2
1008 void build_consistency_shadow2() {}
1009 #elif TSAN_SHADOW_COUNT == 4
1010 void build_consistency_shadow4() {}
1012 void build_consistency_shadow8() {}
1015 } // namespace __tsan
1017 #ifndef SANITIZER_GO
1018 // Must be included in this file to make sure everything is inlined.
1019 #include "tsan_interface_inl.h"