1 //===-- tsan_rtl.cc -------------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 // Main file (entry points) for the TSan run-time.
13 //===----------------------------------------------------------------------===//
15 #include "sanitizer_common/sanitizer_atomic.h"
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_libc.h"
18 #include "sanitizer_common/sanitizer_stackdepot.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_symbolizer.h"
21 #include "tsan_defs.h"
22 #include "tsan_platform.h"
24 #include "tsan_mman.h"
25 #include "tsan_suppressions.h"
26 #include "tsan_symbolize.h"
27 #include "ubsan/ubsan_init.h"
30 // <emmintrin.h> transitively includes <stdlib.h>,
31 // and it's prohibited to include std headers into tsan runtime.
32 // So we do this dirty trick.
33 #define _MM_MALLOC_H_INCLUDED
35 #include <emmintrin.h>
39 volatile int __tsan_resumed = 0;
41 extern "C" void __tsan_resume() {
47 #if !defined(SANITIZER_GO) && !SANITIZER_MAC
48 THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
50 static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
53 // Can be overriden by a front-end.
54 #ifdef TSAN_EXTERNAL_HOOKS
55 bool OnFinalize(bool failed);
58 SANITIZER_WEAK_CXX_DEFAULT_IMPL
59 bool OnFinalize(bool failed) {
62 SANITIZER_WEAK_CXX_DEFAULT_IMPL
63 void OnInitialize() {}
66 static char thread_registry_placeholder[sizeof(ThreadRegistry)];
68 static ThreadContextBase *CreateThreadContext(u32 tid) {
69 // Map thread trace when context is created.
71 internal_snprintf(name, sizeof(name), "trace %u", tid);
72 MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name);
73 const uptr hdr = GetThreadTraceHeader(tid);
74 internal_snprintf(name, sizeof(name), "trace header %u", tid);
75 MapThreadTrace(hdr, sizeof(Trace), name);
76 new((void*)hdr) Trace();
77 // We are going to use only a small part of the trace with the default
78 // value of history_size. However, the constructor writes to the whole trace.
79 // Unmap the unused part.
80 uptr hdr_end = hdr + sizeof(Trace);
81 hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts());
82 hdr_end = RoundUp(hdr_end, GetPageSizeCached());
83 if (hdr_end < hdr + sizeof(Trace))
84 UnmapOrDie((void*)hdr_end, hdr + sizeof(Trace) - hdr_end);
85 void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
86 return new(mem) ThreadContext(tid);
90 static const u32 kThreadQuarantineSize = 16;
92 static const u32 kThreadQuarantineSize = 64;
97 , report_mtx(MutexTypeReport, StatMtxReport)
100 , thread_registry(new(thread_registry_placeholder) ThreadRegistry(
101 CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse))
102 , racy_mtx(MutexTypeRacy, StatMtxRacy)
103 , racy_stacks(MBlockRacyStacks)
104 , racy_addresses(MBlockRacyAddresses)
105 , fired_suppressions_mtx(MutexTypeFired, StatMtxFired)
106 , fired_suppressions(8) {
109 // The objects are allocated in TLS, so one may rely on zero-initialization.
110 ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
111 unsigned reuse_count,
112 uptr stk_addr, uptr stk_size,
113 uptr tls_addr, uptr tls_size)
114 : fast_state(tid, epoch)
115 // Do not touch these, rely on zero initialization,
116 // they may be accessed before the ctor.
117 // , ignore_reads_and_writes()
118 // , ignore_interceptors()
119 , clock(tid, reuse_count)
121 , jmp_bufs(MBlockJmpBuf)
124 , unique_id(unique_id)
130 , last_sleep_clock(tid)
136 static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
138 uptr n_running_threads;
139 ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads);
140 InternalScopedBuffer<char> buf(4096);
141 WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads);
142 WriteToFile(fd, buf.data(), internal_strlen(buf.data()));
145 static void BackgroundThread(void *arg) {
146 // This is a non-initialized non-user thread, nothing to see here.
147 // We don't use ScopedIgnoreInterceptors, because we want ignores to be
148 // enabled even when the thread function exits (e.g. during pthread thread
150 cur_thread()->ignore_interceptors++;
151 const u64 kMs2Ns = 1000 * 1000;
153 fd_t mprof_fd = kInvalidFd;
154 if (flags()->profile_memory && flags()->profile_memory[0]) {
155 if (internal_strcmp(flags()->profile_memory, "stdout") == 0) {
157 } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) {
160 InternalScopedString filename(kMaxPathLength);
161 filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid());
162 fd_t fd = OpenFile(filename.data(), WrOnly);
163 if (fd == kInvalidFd) {
164 Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
172 u64 last_flush = NanoTime();
175 atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0;
178 u64 now = NanoTime();
180 // Flush memory if requested.
181 if (flags()->flush_memory_ms > 0) {
182 if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
183 VPrintf(1, "ThreadSanitizer: periodic memory flush\n");
185 last_flush = NanoTime();
188 // GetRSS can be expensive on huge programs, so don't do it every 100ms.
189 if (flags()->memory_limit_mb > 0) {
191 uptr limit = uptr(flags()->memory_limit_mb) << 20;
192 VPrintf(1, "ThreadSanitizer: memory flush check"
193 " RSS=%llu LAST=%llu LIMIT=%llu\n",
194 (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
195 if (2 * rss > limit + last_rss) {
196 VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n");
199 VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
204 // Write memory profile if requested.
205 if (mprof_fd != kInvalidFd)
206 MemoryProfiler(ctx, mprof_fd, i);
208 // Flush symbolizer cache if requested.
209 if (flags()->flush_symbolizer_ms > 0) {
210 u64 last = atomic_load(&ctx->last_symbolize_time_ns,
211 memory_order_relaxed);
212 if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
213 Lock l(&ctx->report_mtx);
214 SpinMutexLock l2(&CommonSanitizerReportMutex);
216 atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
222 static void StartBackgroundThread() {
223 ctx->background_thread = internal_start_thread(&BackgroundThread, 0);
227 static void StopBackgroundThread() {
228 atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);
229 internal_join_thread(ctx->background_thread);
230 ctx->background_thread = 0;
235 void DontNeedShadowFor(uptr addr, uptr size) {
236 uptr shadow_beg = MemToShadow(addr);
237 uptr shadow_end = MemToShadow(addr + size);
238 FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
241 void MapShadow(uptr addr, uptr size) {
242 // Global data is not 64K aligned, but there are no adjacent mappings,
243 // so we can get away with unaligned mapping.
244 // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
245 MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier, "shadow");
247 // Meta shadow is 2:1, so tread carefully.
248 static bool data_mapped = false;
249 static uptr mapped_meta_end = 0;
250 uptr meta_begin = (uptr)MemToMeta(addr);
251 uptr meta_end = (uptr)MemToMeta(addr + size);
252 meta_begin = RoundDownTo(meta_begin, 64 << 10);
253 meta_end = RoundUpTo(meta_end, 64 << 10);
255 // First call maps data+bss.
257 MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow");
259 // Mapping continous heap.
260 // Windows wants 64K alignment.
261 meta_begin = RoundDownTo(meta_begin, 64 << 10);
262 meta_end = RoundUpTo(meta_end, 64 << 10);
263 if (meta_end <= mapped_meta_end)
265 if (meta_begin < mapped_meta_end)
266 meta_begin = mapped_meta_end;
267 MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow");
268 mapped_meta_end = meta_end;
270 VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n",
271 addr, addr+size, meta_begin, meta_end);
274 void MapThreadTrace(uptr addr, uptr size, const char *name) {
275 DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
276 CHECK_GE(addr, TraceMemBeg());
277 CHECK_LE(addr + size, TraceMemEnd());
278 CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
279 uptr addr1 = (uptr)MmapFixedNoReserve(addr, size, name);
281 Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p->%p)\n",
287 static void CheckShadowMapping() {
289 for (int i = 0; GetUserRegion(i, &beg, &end); i++) {
290 VPrintf(3, "checking shadow region %p-%p\n", beg, end);
291 for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) {
292 for (int x = -1; x <= 1; x++) {
293 const uptr p = p0 + x;
294 if (p < beg || p >= end)
296 const uptr s = MemToShadow(p);
297 const uptr m = (uptr)MemToMeta(p);
298 VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p, s, m);
300 CHECK(IsShadowMem(s));
301 CHECK_EQ(p & ~(kShadowCell - 1), ShadowToMem(s));
308 void Initialize(ThreadState *thr) {
309 // Thread safe because done before all threads exist.
310 static bool is_initialized = false;
313 is_initialized = true;
314 // We are not ready to handle interceptors yet.
315 ScopedIgnoreInterceptors ignore;
316 SanitizerToolName = "ThreadSanitizer";
317 // Install tool-specific callbacks in sanitizer_common.
318 SetCheckFailedCallback(TsanCheckFailed);
320 ctx = new(ctx_placeholder) Context;
321 const char *options = GetEnv(kTsanOptionsEnv);
323 InitializeFlags(&ctx->flags, options);
324 AvoidCVE_2016_2143();
325 InitializePlatformEarly();
327 // Re-exec ourselves if we need to set additional env or command line args.
330 InitializeAllocator();
331 ReplaceSystemMalloc();
333 if (common_flags()->detect_deadlocks)
334 ctx->dd = DDetector::Create(flags());
335 Processor *proc = ProcCreate();
337 InitializeInterceptors();
338 CheckShadowMapping();
339 InitializePlatform();
341 InitializeDynamicAnnotations();
343 InitializeShadowMemory();
344 InitializeAllocatorLate();
346 // Setup correct file descriptor for error reports.
347 __sanitizer_set_report_path(common_flags()->log_path);
348 InitializeSuppressions();
350 InitializeLibIgnore();
351 Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
352 // On MIPS, TSan initialization is run before
353 // __pthread_initialize_minimal_internal() is finished, so we can not spawn
356 StartBackgroundThread();
357 SetSandboxingCallback(StopBackgroundThread);
361 VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
362 (int)internal_getpid());
364 // Initialize thread 0.
365 int tid = ThreadCreate(thr, 0, 0, true);
367 ThreadStart(thr, tid, internal_getpid());
368 #if TSAN_CONTAINS_UBSAN
369 __ubsan::InitAsPlugin();
371 ctx->initialized = true;
374 Symbolizer::LateInitialize();
377 if (flags()->stop_on_start) {
378 Printf("ThreadSanitizer is suspended at startup (pid %d)."
379 " Call __tsan_resume().\n",
380 (int)internal_getpid());
381 while (__tsan_resumed == 0) {}
387 int Finalize(ThreadState *thr) {
390 if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
391 SleepForMillis(flags()->atexit_sleep_ms);
393 // Wait for pending reports.
394 ctx->report_mtx.Lock();
395 CommonSanitizerReportMutex.Lock();
396 CommonSanitizerReportMutex.Unlock();
397 ctx->report_mtx.Unlock();
400 if (Verbosity()) AllocatorPrintStats();
405 if (ctx->nreported) {
408 Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
410 Printf("Found %d data race(s)\n", ctx->nreported);
414 if (ctx->nmissed_expected) {
416 Printf("ThreadSanitizer: missed %d expected races\n",
417 ctx->nmissed_expected);
420 if (common_flags()->print_suppressions)
421 PrintMatchedSuppressions();
423 if (flags()->print_benign)
424 PrintMatchedBenignRaces();
427 failed = OnFinalize(failed);
429 #if TSAN_COLLECT_STATS
430 StatAggregate(ctx->stat, thr->stat);
431 StatOutput(ctx->stat);
434 return failed ? common_flags()->exitcode : 0;
438 void ForkBefore(ThreadState *thr, uptr pc) {
439 ctx->thread_registry->Lock();
440 ctx->report_mtx.Lock();
443 void ForkParentAfter(ThreadState *thr, uptr pc) {
444 ctx->report_mtx.Unlock();
445 ctx->thread_registry->Unlock();
448 void ForkChildAfter(ThreadState *thr, uptr pc) {
449 ctx->report_mtx.Unlock();
450 ctx->thread_registry->Unlock();
453 ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread /* alive threads */);
454 VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
455 " parent had %d threads\n", (int)internal_getpid(), (int)nthread);
457 StartBackgroundThread();
459 // We've just forked a multi-threaded process. We cannot reasonably function
460 // after that (some mutexes may be locked before fork). So just enable
461 // ignores for everything in the hope that we will exec soon.
462 ctx->after_multithreaded_fork = true;
463 thr->ignore_interceptors++;
464 ThreadIgnoreBegin(thr, pc);
465 ThreadIgnoreSyncBegin(thr, pc);
472 void GrowShadowStack(ThreadState *thr) {
473 const int sz = thr->shadow_stack_end - thr->shadow_stack;
474 const int newsz = 2 * sz;
475 uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
476 newsz * sizeof(uptr));
477 internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
478 internal_free(thr->shadow_stack);
479 thr->shadow_stack = newstack;
480 thr->shadow_stack_pos = newstack + sz;
481 thr->shadow_stack_end = newstack + newsz;
485 u32 CurrentStackId(ThreadState *thr, uptr pc) {
486 if (!thr->is_inited) // May happen during bootstrap.
490 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
492 if (thr->shadow_stack_pos == thr->shadow_stack_end)
493 GrowShadowStack(thr);
495 thr->shadow_stack_pos[0] = pc;
496 thr->shadow_stack_pos++;
498 u32 id = StackDepotPut(
499 StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
501 thr->shadow_stack_pos--;
505 void TraceSwitch(ThreadState *thr) {
507 Trace *thr_trace = ThreadTrace(thr->tid);
508 Lock l(&thr_trace->mtx);
509 unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
510 TraceHeader *hdr = &thr_trace->headers[trace];
511 hdr->epoch0 = thr->fast_state.epoch();
512 ObtainCurrentStack(thr, 0, &hdr->stack0);
513 hdr->mset0 = thr->mset;
517 Trace *ThreadTrace(int tid) {
518 return (Trace*)GetThreadTraceHeader(tid);
521 uptr TraceTopPC(ThreadState *thr) {
522 Event *events = (Event*)GetThreadTrace(thr->tid);
523 uptr pc = events[thr->fast_state.GetTracePos()];
528 return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
532 return TraceSize() / kTracePartSize;
536 extern "C" void __tsan_trace_switch() {
537 TraceSwitch(cur_thread());
540 extern "C" void __tsan_report_race() {
541 ReportRace(cur_thread());
546 Shadow LoadShadow(u64 *p) {
547 u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
552 void StoreShadow(u64 *sp, u64 s) {
553 atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
557 void StoreIfNotYetStored(u64 *sp, u64 *s) {
563 void HandleRace(ThreadState *thr, u64 *shadow_mem,
564 Shadow cur, Shadow old) {
565 thr->racy_state[0] = cur.raw();
566 thr->racy_state[1] = old.raw();
567 thr->racy_shadow_addr = shadow_mem;
569 HACKY_CALL(__tsan_report_race);
575 static inline bool HappensBefore(Shadow old, ThreadState *thr) {
576 return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
580 void MemoryAccessImpl1(ThreadState *thr, uptr addr,
581 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
582 u64 *shadow_mem, Shadow cur) {
583 StatInc(thr, StatMop);
584 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
585 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
587 // This potentially can live in an MMX/SSE scratch register.
588 // The required intrinsics are:
589 // __m128i _mm_move_epi64(__m128i*);
590 // _mm_storel_epi64(u64*, __m128i);
591 u64 store_word = cur.raw();
593 // scan all the shadow values and dispatch to 4 categories:
594 // same, replace, candidate and race (see comments below).
595 // we consider only 3 cases regarding access sizes:
596 // equal, intersect and not intersect. initially I considered
597 // larger and smaller as well, it allowed to replace some
598 // 'candidates' with 'same' or 'replace', but I think
599 // it's just not worth it (performance- and complexity-wise).
603 // It release mode we manually unroll the loop,
604 // because empirically gcc generates better code this way.
605 // However, we can't afford unrolling in debug mode, because the function
606 // consumes almost 4K of stack. Gtest gives only 4K of stack to death test
607 // threads, which is not enough for the unrolled loop.
609 for (int idx = 0; idx < 4; idx++) {
610 #include "tsan_update_shadow_word_inl.h"
614 #include "tsan_update_shadow_word_inl.h"
616 #include "tsan_update_shadow_word_inl.h"
618 #include "tsan_update_shadow_word_inl.h"
620 #include "tsan_update_shadow_word_inl.h"
623 // we did not find any races and had already stored
624 // the current access info, so we are done
625 if (LIKELY(store_word == 0))
627 // choose a random candidate slot and replace it
628 StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
629 StatInc(thr, StatShadowReplace);
632 HandleRace(thr, shadow_mem, cur, old);
636 void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
637 int size, bool kAccessIsWrite, bool kIsAtomic) {
640 int kAccessSizeLog = kSizeLog1;
641 if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) {
643 kAccessSizeLog = kSizeLog8;
644 } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) {
646 kAccessSizeLog = kSizeLog4;
647 } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) {
649 kAccessSizeLog = kSizeLog2;
651 MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic);
658 bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
660 for (uptr i = 0; i < kShadowCnt; i++) {
661 Shadow old(LoadShadow(&s[i]));
662 if (Shadow::Addr0AndSizeAreEqual(cur, old) &&
663 old.TidWithIgnore() == cur.TidWithIgnore() &&
664 old.epoch() > sync_epoch &&
665 old.IsAtomic() == cur.IsAtomic() &&
666 old.IsRead() <= cur.IsRead())
672 #if defined(__SSE3__)
673 #define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \
674 _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \
675 (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))
677 bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
678 // This is an optimized version of ContainsSameAccessSlow.
679 // load current access into access[0:63]
680 const m128 access = _mm_cvtsi64_si128(a);
681 // duplicate high part of access in addr0:
682 // addr0[0:31] = access[32:63]
683 // addr0[32:63] = access[32:63]
684 // addr0[64:95] = access[32:63]
685 // addr0[96:127] = access[32:63]
686 const m128 addr0 = SHUF(access, access, 1, 1, 1, 1);
687 // load 4 shadow slots
688 const m128 shadow0 = _mm_load_si128((__m128i*)s);
689 const m128 shadow1 = _mm_load_si128((__m128i*)s + 1);
690 // load high parts of 4 shadow slots into addr_vect:
691 // addr_vect[0:31] = shadow0[32:63]
692 // addr_vect[32:63] = shadow0[96:127]
693 // addr_vect[64:95] = shadow1[32:63]
694 // addr_vect[96:127] = shadow1[96:127]
695 m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3);
697 // set IsRead bit in addr_vect
698 const m128 rw_mask1 = _mm_cvtsi64_si128(1<<15);
699 const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0);
700 addr_vect = _mm_or_si128(addr_vect, rw_mask);
702 // addr0 == addr_vect?
703 const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect);
704 // epoch1[0:63] = sync_epoch
705 const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch);
706 // epoch[0:31] = sync_epoch[0:31]
707 // epoch[32:63] = sync_epoch[0:31]
708 // epoch[64:95] = sync_epoch[0:31]
709 // epoch[96:127] = sync_epoch[0:31]
710 const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0);
711 // load low parts of shadow cell epochs into epoch_vect:
712 // epoch_vect[0:31] = shadow0[0:31]
713 // epoch_vect[32:63] = shadow0[64:95]
714 // epoch_vect[64:95] = shadow1[0:31]
715 // epoch_vect[96:127] = shadow1[64:95]
716 const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2);
717 // epoch_vect >= sync_epoch?
718 const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch);
719 // addr_res & epoch_res
720 const m128 res = _mm_and_si128(addr_res, epoch_res);
724 // mask[15] = res[127]
725 const int mask = _mm_movemask_epi8(res);
731 bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
732 #if defined(__SSE3__)
733 bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write);
734 // NOTE: this check can fail if the shadow is concurrently mutated
735 // by other threads. But it still can be useful if you modify
736 // ContainsSameAccessFast and want to ensure that it's not completely broken.
737 // DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
740 return ContainsSameAccessSlow(s, a, sync_epoch, is_write);
745 void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
746 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
747 u64 *shadow_mem = (u64*)MemToShadow(addr);
748 DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
749 " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
750 (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
751 (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
752 (uptr)shadow_mem[0], (uptr)shadow_mem[1],
753 (uptr)shadow_mem[2], (uptr)shadow_mem[3]);
755 if (!IsAppMem(addr)) {
756 Printf("Access to non app mem %zx\n", addr);
757 DCHECK(IsAppMem(addr));
759 if (!IsShadowMem((uptr)shadow_mem)) {
760 Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
761 DCHECK(IsShadowMem((uptr)shadow_mem));
765 if (kCppMode && *shadow_mem == kShadowRodata) {
766 // Access to .rodata section, no races here.
767 // Measurements show that it can be 10-20% of all memory accesses.
768 StatInc(thr, StatMop);
769 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
770 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
771 StatInc(thr, StatMopRodata);
775 FastState fast_state = thr->fast_state;
776 if (fast_state.GetIgnoreBit()) {
777 StatInc(thr, StatMop);
778 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
779 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
780 StatInc(thr, StatMopIgnored);
784 Shadow cur(fast_state);
785 cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
786 cur.SetWrite(kAccessIsWrite);
787 cur.SetAtomic(kIsAtomic);
789 if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
790 thr->fast_synch_epoch, kAccessIsWrite))) {
791 StatInc(thr, StatMop);
792 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
793 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
794 StatInc(thr, StatMopSame);
798 if (kCollectHistory) {
799 fast_state.IncrementEpoch();
800 thr->fast_state = fast_state;
801 TraceAddEvent(thr, fast_state, EventTypeMop, pc);
802 cur.IncrementEpoch();
805 MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
809 // Called by MemoryAccessRange in tsan_rtl_thread.cc
811 void MemoryAccessImpl(ThreadState *thr, uptr addr,
812 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
813 u64 *shadow_mem, Shadow cur) {
814 if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
815 thr->fast_synch_epoch, kAccessIsWrite))) {
816 StatInc(thr, StatMop);
817 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
818 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
819 StatInc(thr, StatMopSame);
823 MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
827 static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
834 uptr offset = addr % kShadowCell;
836 offset = kShadowCell - offset;
842 DCHECK_EQ(addr % 8, 0);
843 // If a user passes some insane arguments (memset(0)),
844 // let it just crash as usual.
845 if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
847 // Don't want to touch lots of shadow memory.
848 // If a program maps 10MB stack, there is no need reset the whole range.
849 size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
850 // UnmapOrDie/MmapFixedNoReserve does not work on Windows,
851 // so we do it only for C/C++.
852 if (kGoMode || size < common_flags()->clear_shadow_mmap_threshold) {
853 u64 *p = (u64*)MemToShadow(addr);
854 CHECK(IsShadowMem((uptr)p));
855 CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
856 // FIXME: may overwrite a part outside the region
857 for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) {
859 for (uptr j = 1; j < kShadowCnt; j++)
863 // The region is big, reset only beginning and end.
864 const uptr kPageSize = GetPageSizeCached();
865 u64 *begin = (u64*)MemToShadow(addr);
866 u64 *end = begin + size / kShadowCell * kShadowCnt;
868 // Set at least first kPageSize/2 to page boundary.
869 while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) {
871 for (uptr j = 1; j < kShadowCnt; j++)
874 // Reset middle part.
876 p = RoundDown(end, kPageSize);
877 UnmapOrDie((void*)p1, (uptr)p - (uptr)p1);
878 MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1);
882 for (uptr j = 1; j < kShadowCnt; j++)
888 void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
889 MemoryRangeSet(thr, pc, addr, size, 0);
892 void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
893 // Processing more than 1k (4k of shadow) is expensive,
894 // can cause excessive memory consumption (user does not necessary touch
895 // the whole range) and most likely unnecessary.
898 CHECK_EQ(thr->is_freeing, false);
899 thr->is_freeing = true;
900 MemoryAccessRange(thr, pc, addr, size, true);
901 thr->is_freeing = false;
902 if (kCollectHistory) {
903 thr->fast_state.IncrementEpoch();
904 TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
906 Shadow s(thr->fast_state);
910 s.SetAddr0AndSizeLog(0, 3);
911 MemoryRangeSet(thr, pc, addr, size, s.raw());
914 void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
915 if (kCollectHistory) {
916 thr->fast_state.IncrementEpoch();
917 TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
919 Shadow s(thr->fast_state);
922 s.SetAddr0AndSizeLog(0, 3);
923 MemoryRangeSet(thr, pc, addr, size, s.raw());
927 void FuncEntry(ThreadState *thr, uptr pc) {
928 StatInc(thr, StatFuncEnter);
929 DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
930 if (kCollectHistory) {
931 thr->fast_state.IncrementEpoch();
932 TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
935 // Shadow stack maintenance can be replaced with
936 // stack unwinding during trace switch (which presumably must be faster).
937 DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
939 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
941 if (thr->shadow_stack_pos == thr->shadow_stack_end)
942 GrowShadowStack(thr);
944 thr->shadow_stack_pos[0] = pc;
945 thr->shadow_stack_pos++;
949 void FuncExit(ThreadState *thr) {
950 StatInc(thr, StatFuncExit);
951 DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
952 if (kCollectHistory) {
953 thr->fast_state.IncrementEpoch();
954 TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
957 DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
959 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
961 thr->shadow_stack_pos--;
964 void ThreadIgnoreBegin(ThreadState *thr, uptr pc) {
965 DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
966 thr->ignore_reads_and_writes++;
967 CHECK_GT(thr->ignore_reads_and_writes, 0);
968 thr->fast_state.SetIgnoreBit();
970 if (!ctx->after_multithreaded_fork)
971 thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
975 void ThreadIgnoreEnd(ThreadState *thr, uptr pc) {
976 DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
977 thr->ignore_reads_and_writes--;
978 CHECK_GE(thr->ignore_reads_and_writes, 0);
979 if (thr->ignore_reads_and_writes == 0) {
980 thr->fast_state.ClearIgnoreBit();
982 thr->mop_ignore_set.Reset();
987 void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
988 DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
990 CHECK_GT(thr->ignore_sync, 0);
992 if (!ctx->after_multithreaded_fork)
993 thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
997 void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) {
998 DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
1000 CHECK_GE(thr->ignore_sync, 0);
1001 #ifndef SANITIZER_GO
1002 if (thr->ignore_sync == 0)
1003 thr->sync_ignore_set.Reset();
1007 bool MD5Hash::operator==(const MD5Hash &other) const {
1008 return hash[0] == other.hash[0] && hash[1] == other.hash[1];
1012 void build_consistency_debug() {}
1014 void build_consistency_release() {}
1017 #if TSAN_COLLECT_STATS
1018 void build_consistency_stats() {}
1020 void build_consistency_nostats() {}
1023 } // namespace __tsan
1025 #ifndef SANITIZER_GO
1026 // Must be included in this file to make sure everything is inlined.
1027 #include "tsan_interface_inl.h"