1 //===-- tsan_rtl.cpp ------------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 // Main file (entry points) for the TSan run-time.
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_atomic.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_file.h"
17 #include "sanitizer_common/sanitizer_libc.h"
18 #include "sanitizer_common/sanitizer_stackdepot.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_symbolizer.h"
21 #include "tsan_defs.h"
22 #include "tsan_platform.h"
24 #include "tsan_mman.h"
25 #include "tsan_suppressions.h"
26 #include "tsan_symbolize.h"
27 #include "ubsan/ubsan_init.h"
30 // <emmintrin.h> transitively includes <stdlib.h>,
31 // and it's prohibited to include std headers into tsan runtime.
32 // So we do this dirty trick.
33 #define _MM_MALLOC_H_INCLUDED
35 #include <emmintrin.h>
39 volatile int __tsan_resumed = 0;
41 extern "C" void __tsan_resume() {
47 #if !SANITIZER_GO && !SANITIZER_MAC
48 __attribute__((tls_model("initial-exec")))
49 THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
51 static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
54 // Can be overriden by a front-end.
55 #ifdef TSAN_EXTERNAL_HOOKS
56 bool OnFinalize(bool failed);
59 SANITIZER_WEAK_CXX_DEFAULT_IMPL
60 bool OnFinalize(bool failed) {
63 SANITIZER_WEAK_CXX_DEFAULT_IMPL
64 void OnInitialize() {}
67 static char thread_registry_placeholder[sizeof(ThreadRegistry)];
69 static ThreadContextBase *CreateThreadContext(u32 tid) {
70 // Map thread trace when context is created.
72 internal_snprintf(name, sizeof(name), "trace %u", tid);
73 MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name);
74 const uptr hdr = GetThreadTraceHeader(tid);
75 internal_snprintf(name, sizeof(name), "trace header %u", tid);
76 MapThreadTrace(hdr, sizeof(Trace), name);
77 new((void*)hdr) Trace();
78 // We are going to use only a small part of the trace with the default
79 // value of history_size. However, the constructor writes to the whole trace.
80 // Unmap the unused part.
81 uptr hdr_end = hdr + sizeof(Trace);
82 hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts());
83 hdr_end = RoundUp(hdr_end, GetPageSizeCached());
84 if (hdr_end < hdr + sizeof(Trace))
85 UnmapOrDie((void*)hdr_end, hdr + sizeof(Trace) - hdr_end);
86 void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
87 return new(mem) ThreadContext(tid);
91 static const u32 kThreadQuarantineSize = 16;
93 static const u32 kThreadQuarantineSize = 64;
98 , report_mtx(MutexTypeReport, StatMtxReport)
101 , thread_registry(new(thread_registry_placeholder) ThreadRegistry(
102 CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse))
103 , racy_mtx(MutexTypeRacy, StatMtxRacy)
106 , fired_suppressions_mtx(MutexTypeFired, StatMtxFired)
107 , clock_alloc("clock allocator") {
108 fired_suppressions.reserve(8);
111 // The objects are allocated in TLS, so one may rely on zero-initialization.
112 ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
113 unsigned reuse_count,
114 uptr stk_addr, uptr stk_size,
115 uptr tls_addr, uptr tls_size)
116 : fast_state(tid, epoch)
117 // Do not touch these, rely on zero initialization,
118 // they may be accessed before the ctor.
119 // , ignore_reads_and_writes()
120 // , ignore_interceptors()
121 , clock(tid, reuse_count)
126 , unique_id(unique_id)
132 , last_sleep_clock(tid)
138 static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
140 uptr n_running_threads;
141 ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads);
142 InternalMmapVector<char> buf(4096);
143 WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads);
144 WriteToFile(fd, buf.data(), internal_strlen(buf.data()));
147 static void BackgroundThread(void *arg) {
148 // This is a non-initialized non-user thread, nothing to see here.
149 // We don't use ScopedIgnoreInterceptors, because we want ignores to be
150 // enabled even when the thread function exits (e.g. during pthread thread
153 cur_thread()->ignore_interceptors++;
154 const u64 kMs2Ns = 1000 * 1000;
156 fd_t mprof_fd = kInvalidFd;
157 if (flags()->profile_memory && flags()->profile_memory[0]) {
158 if (internal_strcmp(flags()->profile_memory, "stdout") == 0) {
160 } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) {
163 InternalScopedString filename(kMaxPathLength);
164 filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid());
165 fd_t fd = OpenFile(filename.data(), WrOnly);
166 if (fd == kInvalidFd) {
167 Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
175 u64 last_flush = NanoTime();
178 atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0;
181 u64 now = NanoTime();
183 // Flush memory if requested.
184 if (flags()->flush_memory_ms > 0) {
185 if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
186 VPrintf(1, "ThreadSanitizer: periodic memory flush\n");
188 last_flush = NanoTime();
191 // GetRSS can be expensive on huge programs, so don't do it every 100ms.
192 if (flags()->memory_limit_mb > 0) {
194 uptr limit = uptr(flags()->memory_limit_mb) << 20;
195 VPrintf(1, "ThreadSanitizer: memory flush check"
196 " RSS=%llu LAST=%llu LIMIT=%llu\n",
197 (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
198 if (2 * rss > limit + last_rss) {
199 VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n");
202 VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
207 // Write memory profile if requested.
208 if (mprof_fd != kInvalidFd)
209 MemoryProfiler(ctx, mprof_fd, i);
211 // Flush symbolizer cache if requested.
212 if (flags()->flush_symbolizer_ms > 0) {
213 u64 last = atomic_load(&ctx->last_symbolize_time_ns,
214 memory_order_relaxed);
215 if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
216 Lock l(&ctx->report_mtx);
217 ScopedErrorReportLock l2;
219 atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
225 static void StartBackgroundThread() {
226 ctx->background_thread = internal_start_thread(&BackgroundThread, 0);
230 static void StopBackgroundThread() {
231 atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);
232 internal_join_thread(ctx->background_thread);
233 ctx->background_thread = 0;
238 void DontNeedShadowFor(uptr addr, uptr size) {
239 ReleaseMemoryPagesToOS(MemToShadow(addr), MemToShadow(addr + size));
243 void UnmapShadow(ThreadState *thr, uptr addr, uptr size) {
244 if (size == 0) return;
245 DontNeedShadowFor(addr, size);
246 ScopedGlobalProcessor sgp;
247 ctx->metamap.ResetRange(thr->proc(), addr, size);
251 void MapShadow(uptr addr, uptr size) {
252 // Global data is not 64K aligned, but there are no adjacent mappings,
253 // so we can get away with unaligned mapping.
254 // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
255 const uptr kPageSize = GetPageSizeCached();
256 uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize);
257 uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize);
258 if (!MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
261 // Meta shadow is 2:1, so tread carefully.
262 static bool data_mapped = false;
263 static uptr mapped_meta_end = 0;
264 uptr meta_begin = (uptr)MemToMeta(addr);
265 uptr meta_end = (uptr)MemToMeta(addr + size);
266 meta_begin = RoundDownTo(meta_begin, 64 << 10);
267 meta_end = RoundUpTo(meta_end, 64 << 10);
269 // First call maps data+bss.
271 if (!MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow"))
274 // Mapping continous heap.
275 // Windows wants 64K alignment.
276 meta_begin = RoundDownTo(meta_begin, 64 << 10);
277 meta_end = RoundUpTo(meta_end, 64 << 10);
278 if (meta_end <= mapped_meta_end)
280 if (meta_begin < mapped_meta_end)
281 meta_begin = mapped_meta_end;
282 if (!MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow"))
284 mapped_meta_end = meta_end;
286 VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n",
287 addr, addr+size, meta_begin, meta_end);
290 void MapThreadTrace(uptr addr, uptr size, const char *name) {
291 DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
292 CHECK_GE(addr, TraceMemBeg());
293 CHECK_LE(addr + size, TraceMemEnd());
294 CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
295 if (!MmapFixedNoReserve(addr, size, name)) {
296 Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p)\n",
302 static void CheckShadowMapping() {
304 for (int i = 0; GetUserRegion(i, &beg, &end); i++) {
305 // Skip cases for empty regions (heap definition for architectures that
306 // do not use 64-bit allocator).
309 VPrintf(3, "checking shadow region %p-%p\n", beg, end);
311 for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) {
312 for (int x = -(int)kShadowCell; x <= (int)kShadowCell; x += kShadowCell) {
313 const uptr p = RoundDown(p0 + x, kShadowCell);
314 if (p < beg || p >= end)
316 const uptr s = MemToShadow(p);
317 const uptr m = (uptr)MemToMeta(p);
318 VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p, s, m);
320 CHECK(IsShadowMem(s));
321 CHECK_EQ(p, ShadowToMem(s));
324 // Ensure that shadow and meta mappings are linear within a single
325 // user range. Lots of code that processes memory ranges assumes it.
326 const uptr prev_s = MemToShadow(prev);
327 const uptr prev_m = (uptr)MemToMeta(prev);
328 CHECK_EQ(s - prev_s, (p - prev) * kShadowMultiplier);
329 CHECK_EQ((m - prev_m) / kMetaShadowSize,
330 (p - prev) / kMetaShadowCell);
339 static void OnStackUnwind(const SignalContext &sig, const void *,
340 BufferedStackTrace *stack) {
341 stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
342 common_flags()->fast_unwind_on_fatal);
345 static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) {
346 HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr);
350 void Initialize(ThreadState *thr) {
351 // Thread safe because done before all threads exist.
352 static bool is_initialized = false;
355 is_initialized = true;
356 // We are not ready to handle interceptors yet.
357 ScopedIgnoreInterceptors ignore;
358 SanitizerToolName = "ThreadSanitizer";
359 // Install tool-specific callbacks in sanitizer_common.
360 SetCheckFailedCallback(TsanCheckFailed);
362 ctx = new(ctx_placeholder) Context;
363 const char *env_name = SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS";
364 const char *options = GetEnv(env_name);
367 InitializeFlags(&ctx->flags, options, env_name);
368 AvoidCVE_2016_2143();
369 __sanitizer::InitializePlatformEarly();
370 __tsan::InitializePlatformEarly();
373 // Re-exec ourselves if we need to set additional env or command line args.
376 InitializeAllocator();
377 ReplaceSystemMalloc();
379 if (common_flags()->detect_deadlocks)
380 ctx->dd = DDetector::Create(flags());
381 Processor *proc = ProcCreate();
383 InitializeInterceptors();
384 CheckShadowMapping();
385 InitializePlatform();
387 InitializeDynamicAnnotations();
389 InitializeShadowMemory();
390 InitializeAllocatorLate();
391 InstallDeadlySignalHandlers(TsanOnDeadlySignal);
393 // Setup correct file descriptor for error reports.
394 __sanitizer_set_report_path(common_flags()->log_path);
395 InitializeSuppressions();
397 InitializeLibIgnore();
398 Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
401 VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
402 (int)internal_getpid());
404 // Initialize thread 0.
405 int tid = ThreadCreate(thr, 0, 0, true);
407 ThreadStart(thr, tid, GetTid(), ThreadType::Regular);
408 #if TSAN_CONTAINS_UBSAN
409 __ubsan::InitAsPlugin();
411 ctx->initialized = true;
414 Symbolizer::LateInitialize();
417 if (flags()->stop_on_start) {
418 Printf("ThreadSanitizer is suspended at startup (pid %d)."
419 " Call __tsan_resume().\n",
420 (int)internal_getpid());
421 while (__tsan_resumed == 0) {}
427 void MaybeSpawnBackgroundThread() {
428 // On MIPS, TSan initialization is run before
429 // __pthread_initialize_minimal_internal() is finished, so we can not spawn
431 #if !SANITIZER_GO && !defined(__mips__)
432 static atomic_uint32_t bg_thread = {};
433 if (atomic_load(&bg_thread, memory_order_relaxed) == 0 &&
434 atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) {
435 StartBackgroundThread();
436 SetSandboxingCallback(StopBackgroundThread);
442 int Finalize(ThreadState *thr) {
445 if (common_flags()->print_module_map == 1) PrintModuleMap();
447 if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
448 SleepForMillis(flags()->atexit_sleep_ms);
450 // Wait for pending reports.
451 ctx->report_mtx.Lock();
452 { ScopedErrorReportLock l; }
453 ctx->report_mtx.Unlock();
456 if (Verbosity()) AllocatorPrintStats();
461 if (ctx->nreported) {
464 Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
466 Printf("Found %d data race(s)\n", ctx->nreported);
470 if (ctx->nmissed_expected) {
472 Printf("ThreadSanitizer: missed %d expected races\n",
473 ctx->nmissed_expected);
476 if (common_flags()->print_suppressions)
477 PrintMatchedSuppressions();
479 if (flags()->print_benign)
480 PrintMatchedBenignRaces();
483 failed = OnFinalize(failed);
485 #if TSAN_COLLECT_STATS
486 StatAggregate(ctx->stat, thr->stat);
487 StatOutput(ctx->stat);
490 return failed ? common_flags()->exitcode : 0;
494 void ForkBefore(ThreadState *thr, uptr pc) {
495 ctx->thread_registry->Lock();
496 ctx->report_mtx.Lock();
499 void ForkParentAfter(ThreadState *thr, uptr pc) {
500 ctx->report_mtx.Unlock();
501 ctx->thread_registry->Unlock();
504 void ForkChildAfter(ThreadState *thr, uptr pc) {
505 ctx->report_mtx.Unlock();
506 ctx->thread_registry->Unlock();
509 ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread /* alive threads */);
510 VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
511 " parent had %d threads\n", (int)internal_getpid(), (int)nthread);
513 StartBackgroundThread();
515 // We've just forked a multi-threaded process. We cannot reasonably function
516 // after that (some mutexes may be locked before fork). So just enable
517 // ignores for everything in the hope that we will exec soon.
518 ctx->after_multithreaded_fork = true;
519 thr->ignore_interceptors++;
520 ThreadIgnoreBegin(thr, pc);
521 ThreadIgnoreSyncBegin(thr, pc);
528 void GrowShadowStack(ThreadState *thr) {
529 const int sz = thr->shadow_stack_end - thr->shadow_stack;
530 const int newsz = 2 * sz;
531 uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
532 newsz * sizeof(uptr));
533 internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
534 internal_free(thr->shadow_stack);
535 thr->shadow_stack = newstack;
536 thr->shadow_stack_pos = newstack + sz;
537 thr->shadow_stack_end = newstack + newsz;
541 u32 CurrentStackId(ThreadState *thr, uptr pc) {
542 if (!thr->is_inited) // May happen during bootstrap.
546 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
548 if (thr->shadow_stack_pos == thr->shadow_stack_end)
549 GrowShadowStack(thr);
551 thr->shadow_stack_pos[0] = pc;
552 thr->shadow_stack_pos++;
554 u32 id = StackDepotPut(
555 StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
557 thr->shadow_stack_pos--;
561 void TraceSwitch(ThreadState *thr) {
563 if (ctx->after_multithreaded_fork)
567 Trace *thr_trace = ThreadTrace(thr->tid);
568 Lock l(&thr_trace->mtx);
569 unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
570 TraceHeader *hdr = &thr_trace->headers[trace];
571 hdr->epoch0 = thr->fast_state.epoch();
572 ObtainCurrentStack(thr, 0, &hdr->stack0);
573 hdr->mset0 = thr->mset;
577 Trace *ThreadTrace(int tid) {
578 return (Trace*)GetThreadTraceHeader(tid);
581 uptr TraceTopPC(ThreadState *thr) {
582 Event *events = (Event*)GetThreadTrace(thr->tid);
583 uptr pc = events[thr->fast_state.GetTracePos()];
588 return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
592 return TraceSize() / kTracePartSize;
596 extern "C" void __tsan_trace_switch() {
597 TraceSwitch(cur_thread());
600 extern "C" void __tsan_report_race() {
601 ReportRace(cur_thread());
606 Shadow LoadShadow(u64 *p) {
607 u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
612 void StoreShadow(u64 *sp, u64 s) {
613 atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
617 void StoreIfNotYetStored(u64 *sp, u64 *s) {
623 void HandleRace(ThreadState *thr, u64 *shadow_mem,
624 Shadow cur, Shadow old) {
625 thr->racy_state[0] = cur.raw();
626 thr->racy_state[1] = old.raw();
627 thr->racy_shadow_addr = shadow_mem;
629 HACKY_CALL(__tsan_report_race);
635 static inline bool HappensBefore(Shadow old, ThreadState *thr) {
636 return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
640 void MemoryAccessImpl1(ThreadState *thr, uptr addr,
641 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
642 u64 *shadow_mem, Shadow cur) {
643 StatInc(thr, StatMop);
644 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
645 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
647 // This potentially can live in an MMX/SSE scratch register.
648 // The required intrinsics are:
649 // __m128i _mm_move_epi64(__m128i*);
650 // _mm_storel_epi64(u64*, __m128i);
651 u64 store_word = cur.raw();
654 // scan all the shadow values and dispatch to 4 categories:
655 // same, replace, candidate and race (see comments below).
656 // we consider only 3 cases regarding access sizes:
657 // equal, intersect and not intersect. initially I considered
658 // larger and smaller as well, it allowed to replace some
659 // 'candidates' with 'same' or 'replace', but I think
660 // it's just not worth it (performance- and complexity-wise).
664 // It release mode we manually unroll the loop,
665 // because empirically gcc generates better code this way.
666 // However, we can't afford unrolling in debug mode, because the function
667 // consumes almost 4K of stack. Gtest gives only 4K of stack to death test
668 // threads, which is not enough for the unrolled loop.
670 for (int idx = 0; idx < 4; idx++) {
671 #include "tsan_update_shadow_word_inl.h"
675 #include "tsan_update_shadow_word_inl.h"
678 #include "tsan_update_shadow_word_inl.h"
680 #include "tsan_update_shadow_word_inl.h"
684 #include "tsan_update_shadow_word_inl.h"
686 #include "tsan_update_shadow_word_inl.h"
690 #include "tsan_update_shadow_word_inl.h"
692 #include "tsan_update_shadow_word_inl.h"
696 // we did not find any races and had already stored
697 // the current access info, so we are done
700 // choose a random candidate slot and replace it
701 StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
702 StatInc(thr, StatShadowReplace);
705 HandleRace(thr, shadow_mem, cur, old);
709 void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
710 int size, bool kAccessIsWrite, bool kIsAtomic) {
713 int kAccessSizeLog = kSizeLog1;
714 if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) {
716 kAccessSizeLog = kSizeLog8;
717 } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) {
719 kAccessSizeLog = kSizeLog4;
720 } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) {
722 kAccessSizeLog = kSizeLog2;
724 MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic);
731 bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
733 for (uptr i = 0; i < kShadowCnt; i++) {
734 Shadow old(LoadShadow(&s[i]));
735 if (Shadow::Addr0AndSizeAreEqual(cur, old) &&
736 old.TidWithIgnore() == cur.TidWithIgnore() &&
737 old.epoch() > sync_epoch &&
738 old.IsAtomic() == cur.IsAtomic() &&
739 old.IsRead() <= cur.IsRead())
745 #if defined(__SSE3__)
746 #define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \
747 _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \
748 (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))
750 bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
751 // This is an optimized version of ContainsSameAccessSlow.
752 // load current access into access[0:63]
753 const m128 access = _mm_cvtsi64_si128(a);
754 // duplicate high part of access in addr0:
755 // addr0[0:31] = access[32:63]
756 // addr0[32:63] = access[32:63]
757 // addr0[64:95] = access[32:63]
758 // addr0[96:127] = access[32:63]
759 const m128 addr0 = SHUF(access, access, 1, 1, 1, 1);
760 // load 4 shadow slots
761 const m128 shadow0 = _mm_load_si128((__m128i*)s);
762 const m128 shadow1 = _mm_load_si128((__m128i*)s + 1);
763 // load high parts of 4 shadow slots into addr_vect:
764 // addr_vect[0:31] = shadow0[32:63]
765 // addr_vect[32:63] = shadow0[96:127]
766 // addr_vect[64:95] = shadow1[32:63]
767 // addr_vect[96:127] = shadow1[96:127]
768 m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3);
770 // set IsRead bit in addr_vect
771 const m128 rw_mask1 = _mm_cvtsi64_si128(1<<15);
772 const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0);
773 addr_vect = _mm_or_si128(addr_vect, rw_mask);
775 // addr0 == addr_vect?
776 const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect);
777 // epoch1[0:63] = sync_epoch
778 const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch);
779 // epoch[0:31] = sync_epoch[0:31]
780 // epoch[32:63] = sync_epoch[0:31]
781 // epoch[64:95] = sync_epoch[0:31]
782 // epoch[96:127] = sync_epoch[0:31]
783 const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0);
784 // load low parts of shadow cell epochs into epoch_vect:
785 // epoch_vect[0:31] = shadow0[0:31]
786 // epoch_vect[32:63] = shadow0[64:95]
787 // epoch_vect[64:95] = shadow1[0:31]
788 // epoch_vect[96:127] = shadow1[64:95]
789 const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2);
790 // epoch_vect >= sync_epoch?
791 const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch);
792 // addr_res & epoch_res
793 const m128 res = _mm_and_si128(addr_res, epoch_res);
797 // mask[15] = res[127]
798 const int mask = _mm_movemask_epi8(res);
804 bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
805 #if defined(__SSE3__)
806 bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write);
807 // NOTE: this check can fail if the shadow is concurrently mutated
808 // by other threads. But it still can be useful if you modify
809 // ContainsSameAccessFast and want to ensure that it's not completely broken.
810 // DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
813 return ContainsSameAccessSlow(s, a, sync_epoch, is_write);
818 void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
819 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
820 u64 *shadow_mem = (u64*)MemToShadow(addr);
821 DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
822 " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
823 (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
824 (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
825 (uptr)shadow_mem[0], (uptr)shadow_mem[1],
826 (uptr)shadow_mem[2], (uptr)shadow_mem[3]);
828 if (!IsAppMem(addr)) {
829 Printf("Access to non app mem %zx\n", addr);
830 DCHECK(IsAppMem(addr));
832 if (!IsShadowMem((uptr)shadow_mem)) {
833 Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
834 DCHECK(IsShadowMem((uptr)shadow_mem));
838 if (!SANITIZER_GO && !kAccessIsWrite && *shadow_mem == kShadowRodata) {
839 // Access to .rodata section, no races here.
840 // Measurements show that it can be 10-20% of all memory accesses.
841 StatInc(thr, StatMop);
842 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
843 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
844 StatInc(thr, StatMopRodata);
848 FastState fast_state = thr->fast_state;
849 if (UNLIKELY(fast_state.GetIgnoreBit())) {
850 StatInc(thr, StatMop);
851 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
852 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
853 StatInc(thr, StatMopIgnored);
857 Shadow cur(fast_state);
858 cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
859 cur.SetWrite(kAccessIsWrite);
860 cur.SetAtomic(kIsAtomic);
862 if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
863 thr->fast_synch_epoch, kAccessIsWrite))) {
864 StatInc(thr, StatMop);
865 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
866 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
867 StatInc(thr, StatMopSame);
871 if (kCollectHistory) {
872 fast_state.IncrementEpoch();
873 thr->fast_state = fast_state;
874 TraceAddEvent(thr, fast_state, EventTypeMop, pc);
875 cur.IncrementEpoch();
878 MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
882 // Called by MemoryAccessRange in tsan_rtl_thread.cpp
884 void MemoryAccessImpl(ThreadState *thr, uptr addr,
885 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
886 u64 *shadow_mem, Shadow cur) {
887 if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
888 thr->fast_synch_epoch, kAccessIsWrite))) {
889 StatInc(thr, StatMop);
890 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
891 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
892 StatInc(thr, StatMopSame);
896 MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
900 static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
907 uptr offset = addr % kShadowCell;
909 offset = kShadowCell - offset;
915 DCHECK_EQ(addr % 8, 0);
916 // If a user passes some insane arguments (memset(0)),
917 // let it just crash as usual.
918 if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
920 // Don't want to touch lots of shadow memory.
921 // If a program maps 10MB stack, there is no need reset the whole range.
922 size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
923 // UnmapOrDie/MmapFixedNoReserve does not work on Windows.
924 if (SANITIZER_WINDOWS || size < common_flags()->clear_shadow_mmap_threshold) {
925 u64 *p = (u64*)MemToShadow(addr);
926 CHECK(IsShadowMem((uptr)p));
927 CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
928 // FIXME: may overwrite a part outside the region
929 for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) {
931 for (uptr j = 1; j < kShadowCnt; j++)
935 // The region is big, reset only beginning and end.
936 const uptr kPageSize = GetPageSizeCached();
937 u64 *begin = (u64*)MemToShadow(addr);
938 u64 *end = begin + size / kShadowCell * kShadowCnt;
940 // Set at least first kPageSize/2 to page boundary.
941 while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) {
943 for (uptr j = 1; j < kShadowCnt; j++)
946 // Reset middle part.
948 p = RoundDown(end, kPageSize);
949 UnmapOrDie((void*)p1, (uptr)p - (uptr)p1);
950 if (!MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1))
955 for (uptr j = 1; j < kShadowCnt; j++)
961 void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
962 MemoryRangeSet(thr, pc, addr, size, 0);
965 void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
966 // Processing more than 1k (4k of shadow) is expensive,
967 // can cause excessive memory consumption (user does not necessary touch
968 // the whole range) and most likely unnecessary.
971 CHECK_EQ(thr->is_freeing, false);
972 thr->is_freeing = true;
973 MemoryAccessRange(thr, pc, addr, size, true);
974 thr->is_freeing = false;
975 if (kCollectHistory) {
976 thr->fast_state.IncrementEpoch();
977 TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
979 Shadow s(thr->fast_state);
983 s.SetAddr0AndSizeLog(0, 3);
984 MemoryRangeSet(thr, pc, addr, size, s.raw());
987 void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
988 if (kCollectHistory) {
989 thr->fast_state.IncrementEpoch();
990 TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
992 Shadow s(thr->fast_state);
995 s.SetAddr0AndSizeLog(0, 3);
996 MemoryRangeSet(thr, pc, addr, size, s.raw());
999 void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
1001 if (thr->ignore_reads_and_writes == 0)
1002 MemoryRangeImitateWrite(thr, pc, addr, size);
1004 MemoryResetRange(thr, pc, addr, size);
1008 void FuncEntry(ThreadState *thr, uptr pc) {
1009 StatInc(thr, StatFuncEnter);
1010 DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
1011 if (kCollectHistory) {
1012 thr->fast_state.IncrementEpoch();
1013 TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
1016 // Shadow stack maintenance can be replaced with
1017 // stack unwinding during trace switch (which presumably must be faster).
1018 DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
1020 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
1022 if (thr->shadow_stack_pos == thr->shadow_stack_end)
1023 GrowShadowStack(thr);
1025 thr->shadow_stack_pos[0] = pc;
1026 thr->shadow_stack_pos++;
1030 void FuncExit(ThreadState *thr) {
1031 StatInc(thr, StatFuncExit);
1032 DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
1033 if (kCollectHistory) {
1034 thr->fast_state.IncrementEpoch();
1035 TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
1038 DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
1040 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
1042 thr->shadow_stack_pos--;
1045 void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack) {
1046 DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
1047 thr->ignore_reads_and_writes++;
1048 CHECK_GT(thr->ignore_reads_and_writes, 0);
1049 thr->fast_state.SetIgnoreBit();
1051 if (save_stack && !ctx->after_multithreaded_fork)
1052 thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
1056 void ThreadIgnoreEnd(ThreadState *thr, uptr pc) {
1057 DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
1058 CHECK_GT(thr->ignore_reads_and_writes, 0);
1059 thr->ignore_reads_and_writes--;
1060 if (thr->ignore_reads_and_writes == 0) {
1061 thr->fast_state.ClearIgnoreBit();
1063 thr->mop_ignore_set.Reset();
1069 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
1070 uptr __tsan_testonly_shadow_stack_current_size() {
1071 ThreadState *thr = cur_thread();
1072 return thr->shadow_stack_pos - thr->shadow_stack;
1076 void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack) {
1077 DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
1079 CHECK_GT(thr->ignore_sync, 0);
1081 if (save_stack && !ctx->after_multithreaded_fork)
1082 thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
1086 void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) {
1087 DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
1088 CHECK_GT(thr->ignore_sync, 0);
1091 if (thr->ignore_sync == 0)
1092 thr->sync_ignore_set.Reset();
1096 bool MD5Hash::operator==(const MD5Hash &other) const {
1097 return hash[0] == other.hash[0] && hash[1] == other.hash[1];
1101 void build_consistency_debug() {}
1103 void build_consistency_release() {}
1106 #if TSAN_COLLECT_STATS
1107 void build_consistency_stats() {}
1109 void build_consistency_nostats() {}
1112 } // namespace __tsan
1115 // Must be included in this file to make sure everything is inlined.
1116 #include "tsan_interface_inl.h"