1 //===-- tsan_interface_ann.cc ---------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_libc.h"
14 #include "sanitizer_common/sanitizer_internal_defs.h"
15 #include "sanitizer_common/sanitizer_placement_new.h"
16 #include "sanitizer_common/sanitizer_stacktrace.h"
17 #include "tsan_interface_ann.h"
18 #include "tsan_mutex.h"
19 #include "tsan_report.h"
21 #include "tsan_mman.h"
22 #include "tsan_flags.h"
23 #include "tsan_platform.h"
24 #include "tsan_vector.h"
26 #define CALLERPC ((uptr)__builtin_return_address(0))
28 using namespace __tsan; // NOLINT
32 class ScopedAnnotation {
34 ScopedAnnotation(ThreadState *thr, const char *aname, uptr pc)
37 DPrintf("#%d: annotation %s()\n", thr_->tid, aname);
45 ThreadState *const thr_;
48 #define SCOPED_ANNOTATION_RET(typ, ret) \
49 if (!flags()->enable_annotations) \
51 ThreadState *thr = cur_thread(); \
52 const uptr caller_pc = (uptr)__builtin_return_address(0); \
53 StatInc(thr, StatAnnotation); \
54 StatInc(thr, Stat##typ); \
55 ScopedAnnotation sa(thr, __func__, caller_pc); \
56 const uptr pc = StackTrace::GetCurrentPc(); \
60 #define SCOPED_ANNOTATION(typ) SCOPED_ANNOTATION_RET(typ, )
62 static const int kMaxDescLen = 128;
67 atomic_uintptr_t hitcount;
68 atomic_uintptr_t addcount;
73 char desc[kMaxDescLen];
76 struct DynamicAnnContext {
82 : mtx(MutexTypeAnnotations, StatMtxAnnotations) {
86 static DynamicAnnContext *dyn_ann_ctx;
87 static char dyn_ann_ctx_placeholder[sizeof(DynamicAnnContext)] ALIGNED(64);
89 static void AddExpectRace(ExpectRace *list,
90 char *f, int l, uptr addr, uptr size, char *desc) {
91 ExpectRace *race = list->next;
92 for (; race != list; race = race->next) {
93 if (race->addr == addr && race->size == size) {
94 atomic_store_relaxed(&race->addcount,
95 atomic_load_relaxed(&race->addcount) + 1);
99 race = (ExpectRace*)internal_alloc(MBlockExpectRace, sizeof(ExpectRace));
105 atomic_store_relaxed(&race->hitcount, 0);
106 atomic_store_relaxed(&race->addcount, 1);
109 for (; i < kMaxDescLen - 1 && desc[i]; i++)
110 race->desc[i] = desc[i];
114 race->next = list->next;
115 race->next->prev = race;
119 static ExpectRace *FindRace(ExpectRace *list, uptr addr, uptr size) {
120 for (ExpectRace *race = list->next; race != list; race = race->next) {
121 uptr maxbegin = max(race->addr, addr);
122 uptr minend = min(race->addr + race->size, addr + size);
123 if (maxbegin < minend)
129 static bool CheckContains(ExpectRace *list, uptr addr, uptr size) {
130 ExpectRace *race = FindRace(list, addr, size);
133 DPrintf("Hit expected/benign race: %s addr=%zx:%d %s:%d\n",
134 race->desc, race->addr, (int)race->size, race->file, race->line);
135 atomic_fetch_add(&race->hitcount, 1, memory_order_relaxed);
139 static void InitList(ExpectRace *list) {
144 void InitializeDynamicAnnotations() {
145 dyn_ann_ctx = new(dyn_ann_ctx_placeholder) DynamicAnnContext;
146 InitList(&dyn_ann_ctx->expect);
147 InitList(&dyn_ann_ctx->benign);
150 bool IsExpectedReport(uptr addr, uptr size) {
151 ReadLock lock(&dyn_ann_ctx->mtx);
152 if (CheckContains(&dyn_ann_ctx->expect, addr, size))
154 if (CheckContains(&dyn_ann_ctx->benign, addr, size))
159 static void CollectMatchedBenignRaces(Vector<ExpectRace> *matched,
160 int *unique_count, int *hit_count, atomic_uintptr_t ExpectRace::*counter) {
161 ExpectRace *list = &dyn_ann_ctx->benign;
162 for (ExpectRace *race = list->next; race != list; race = race->next) {
164 const uptr cnt = atomic_load_relaxed(&(race->*counter));
169 for (; i < matched->Size(); i++) {
170 ExpectRace *race0 = &(*matched)[i];
171 if (race->line == race0->line
172 && internal_strcmp(race->file, race0->file) == 0
173 && internal_strcmp(race->desc, race0->desc) == 0) {
174 atomic_fetch_add(&(race0->*counter), cnt, memory_order_relaxed);
178 if (i == matched->Size())
179 matched->PushBack(*race);
183 void PrintMatchedBenignRaces() {
184 Lock lock(&dyn_ann_ctx->mtx);
185 int unique_count = 0;
188 Vector<ExpectRace> hit_matched(MBlockScopedBuf);
189 CollectMatchedBenignRaces(&hit_matched, &unique_count, &hit_count,
190 &ExpectRace::hitcount);
191 Vector<ExpectRace> add_matched(MBlockScopedBuf);
192 CollectMatchedBenignRaces(&add_matched, &unique_count, &add_count,
193 &ExpectRace::addcount);
194 if (hit_matched.Size()) {
195 Printf("ThreadSanitizer: Matched %d \"benign\" races (pid=%d):\n",
196 hit_count, (int)internal_getpid());
197 for (uptr i = 0; i < hit_matched.Size(); i++) {
198 Printf("%d %s:%d %s\n",
199 atomic_load_relaxed(&hit_matched[i].hitcount),
200 hit_matched[i].file, hit_matched[i].line, hit_matched[i].desc);
203 if (hit_matched.Size()) {
204 Printf("ThreadSanitizer: Annotated %d \"benign\" races, %d unique"
206 add_count, unique_count, (int)internal_getpid());
207 for (uptr i = 0; i < add_matched.Size(); i++) {
208 Printf("%d %s:%d %s\n",
209 atomic_load_relaxed(&add_matched[i].addcount),
210 add_matched[i].file, add_matched[i].line, add_matched[i].desc);
215 static void ReportMissedExpectedRace(ExpectRace *race) {
216 Printf("==================\n");
217 Printf("WARNING: ThreadSanitizer: missed expected data race\n");
218 Printf(" %s addr=%zx %s:%d\n",
219 race->desc, race->addr, race->file, race->line);
220 Printf("==================\n");
222 } // namespace __tsan
224 using namespace __tsan; // NOLINT
227 void INTERFACE_ATTRIBUTE AnnotateHappensBefore(char *f, int l, uptr addr) {
228 SCOPED_ANNOTATION(AnnotateHappensBefore);
229 Release(thr, pc, addr);
232 void INTERFACE_ATTRIBUTE AnnotateHappensAfter(char *f, int l, uptr addr) {
233 SCOPED_ANNOTATION(AnnotateHappensAfter);
234 Acquire(thr, pc, addr);
237 void INTERFACE_ATTRIBUTE AnnotateCondVarSignal(char *f, int l, uptr cv) {
238 SCOPED_ANNOTATION(AnnotateCondVarSignal);
241 void INTERFACE_ATTRIBUTE AnnotateCondVarSignalAll(char *f, int l, uptr cv) {
242 SCOPED_ANNOTATION(AnnotateCondVarSignalAll);
245 void INTERFACE_ATTRIBUTE AnnotateMutexIsNotPHB(char *f, int l, uptr mu) {
246 SCOPED_ANNOTATION(AnnotateMutexIsNotPHB);
249 void INTERFACE_ATTRIBUTE AnnotateCondVarWait(char *f, int l, uptr cv,
251 SCOPED_ANNOTATION(AnnotateCondVarWait);
254 void INTERFACE_ATTRIBUTE AnnotateRWLockCreate(char *f, int l, uptr m) {
255 SCOPED_ANNOTATION(AnnotateRWLockCreate);
256 MutexCreate(thr, pc, m, MutexFlagWriteReentrant);
259 void INTERFACE_ATTRIBUTE AnnotateRWLockCreateStatic(char *f, int l, uptr m) {
260 SCOPED_ANNOTATION(AnnotateRWLockCreateStatic);
261 MutexCreate(thr, pc, m, MutexFlagWriteReentrant | MutexFlagLinkerInit);
264 void INTERFACE_ATTRIBUTE AnnotateRWLockDestroy(char *f, int l, uptr m) {
265 SCOPED_ANNOTATION(AnnotateRWLockDestroy);
266 MutexDestroy(thr, pc, m);
269 void INTERFACE_ATTRIBUTE AnnotateRWLockAcquired(char *f, int l, uptr m,
271 SCOPED_ANNOTATION(AnnotateRWLockAcquired);
273 MutexPostLock(thr, pc, m, MutexFlagDoPreLockOnPostLock);
275 MutexPostReadLock(thr, pc, m, MutexFlagDoPreLockOnPostLock);
278 void INTERFACE_ATTRIBUTE AnnotateRWLockReleased(char *f, int l, uptr m,
280 SCOPED_ANNOTATION(AnnotateRWLockReleased);
282 MutexUnlock(thr, pc, m);
284 MutexReadUnlock(thr, pc, m);
287 void INTERFACE_ATTRIBUTE AnnotateTraceMemory(char *f, int l, uptr mem) {
288 SCOPED_ANNOTATION(AnnotateTraceMemory);
291 void INTERFACE_ATTRIBUTE AnnotateFlushState(char *f, int l) {
292 SCOPED_ANNOTATION(AnnotateFlushState);
295 void INTERFACE_ATTRIBUTE AnnotateNewMemory(char *f, int l, uptr mem,
297 SCOPED_ANNOTATION(AnnotateNewMemory);
300 void INTERFACE_ATTRIBUTE AnnotateNoOp(char *f, int l, uptr mem) {
301 SCOPED_ANNOTATION(AnnotateNoOp);
304 void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) {
305 SCOPED_ANNOTATION(AnnotateFlushExpectedRaces);
306 Lock lock(&dyn_ann_ctx->mtx);
307 while (dyn_ann_ctx->expect.next != &dyn_ann_ctx->expect) {
308 ExpectRace *race = dyn_ann_ctx->expect.next;
309 if (atomic_load_relaxed(&race->hitcount) == 0) {
310 ctx->nmissed_expected++;
311 ReportMissedExpectedRace(race);
313 race->prev->next = race->next;
314 race->next->prev = race->prev;
319 void INTERFACE_ATTRIBUTE AnnotateEnableRaceDetection(
320 char *f, int l, int enable) {
321 SCOPED_ANNOTATION(AnnotateEnableRaceDetection);
322 // FIXME: Reconsider this functionality later. It may be irrelevant.
325 void INTERFACE_ATTRIBUTE AnnotateMutexIsUsedAsCondVar(
326 char *f, int l, uptr mu) {
327 SCOPED_ANNOTATION(AnnotateMutexIsUsedAsCondVar);
330 void INTERFACE_ATTRIBUTE AnnotatePCQGet(
331 char *f, int l, uptr pcq) {
332 SCOPED_ANNOTATION(AnnotatePCQGet);
335 void INTERFACE_ATTRIBUTE AnnotatePCQPut(
336 char *f, int l, uptr pcq) {
337 SCOPED_ANNOTATION(AnnotatePCQPut);
340 void INTERFACE_ATTRIBUTE AnnotatePCQDestroy(
341 char *f, int l, uptr pcq) {
342 SCOPED_ANNOTATION(AnnotatePCQDestroy);
345 void INTERFACE_ATTRIBUTE AnnotatePCQCreate(
346 char *f, int l, uptr pcq) {
347 SCOPED_ANNOTATION(AnnotatePCQCreate);
350 void INTERFACE_ATTRIBUTE AnnotateExpectRace(
351 char *f, int l, uptr mem, char *desc) {
352 SCOPED_ANNOTATION(AnnotateExpectRace);
353 Lock lock(&dyn_ann_ctx->mtx);
354 AddExpectRace(&dyn_ann_ctx->expect,
356 DPrintf("Add expected race: %s addr=%zx %s:%d\n", desc, mem, f, l);
359 static void BenignRaceImpl(
360 char *f, int l, uptr mem, uptr size, char *desc) {
361 Lock lock(&dyn_ann_ctx->mtx);
362 AddExpectRace(&dyn_ann_ctx->benign,
363 f, l, mem, size, desc);
364 DPrintf("Add benign race: %s addr=%zx %s:%d\n", desc, mem, f, l);
367 // FIXME: Turn it off later. WTF is benign race?1?? Go talk to Hans Boehm.
368 void INTERFACE_ATTRIBUTE AnnotateBenignRaceSized(
369 char *f, int l, uptr mem, uptr size, char *desc) {
370 SCOPED_ANNOTATION(AnnotateBenignRaceSized);
371 BenignRaceImpl(f, l, mem, size, desc);
374 void INTERFACE_ATTRIBUTE AnnotateBenignRace(
375 char *f, int l, uptr mem, char *desc) {
376 SCOPED_ANNOTATION(AnnotateBenignRace);
377 BenignRaceImpl(f, l, mem, 1, desc);
380 void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsBegin(char *f, int l) {
381 SCOPED_ANNOTATION(AnnotateIgnoreReadsBegin);
382 ThreadIgnoreBegin(thr, pc);
385 void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsEnd(char *f, int l) {
386 SCOPED_ANNOTATION(AnnotateIgnoreReadsEnd);
387 ThreadIgnoreEnd(thr, pc);
390 void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesBegin(char *f, int l) {
391 SCOPED_ANNOTATION(AnnotateIgnoreWritesBegin);
392 ThreadIgnoreBegin(thr, pc);
395 void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesEnd(char *f, int l) {
396 SCOPED_ANNOTATION(AnnotateIgnoreWritesEnd);
397 ThreadIgnoreEnd(thr, pc);
400 void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncBegin(char *f, int l) {
401 SCOPED_ANNOTATION(AnnotateIgnoreSyncBegin);
402 ThreadIgnoreSyncBegin(thr, pc);
405 void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncEnd(char *f, int l) {
406 SCOPED_ANNOTATION(AnnotateIgnoreSyncEnd);
407 ThreadIgnoreSyncEnd(thr, pc);
410 void INTERFACE_ATTRIBUTE AnnotatePublishMemoryRange(
411 char *f, int l, uptr addr, uptr size) {
412 SCOPED_ANNOTATION(AnnotatePublishMemoryRange);
415 void INTERFACE_ATTRIBUTE AnnotateUnpublishMemoryRange(
416 char *f, int l, uptr addr, uptr size) {
417 SCOPED_ANNOTATION(AnnotateUnpublishMemoryRange);
420 void INTERFACE_ATTRIBUTE AnnotateThreadName(
421 char *f, int l, char *name) {
422 SCOPED_ANNOTATION(AnnotateThreadName);
423 ThreadSetName(thr, name);
426 // We deliberately omit the implementation of WTFAnnotateHappensBefore() and
427 // WTFAnnotateHappensAfter(). Those are being used by Webkit to annotate
428 // atomic operations, which should be handled by ThreadSanitizer correctly.
429 void INTERFACE_ATTRIBUTE WTFAnnotateHappensBefore(char *f, int l, uptr addr) {
430 SCOPED_ANNOTATION(AnnotateHappensBefore);
433 void INTERFACE_ATTRIBUTE WTFAnnotateHappensAfter(char *f, int l, uptr addr) {
434 SCOPED_ANNOTATION(AnnotateHappensAfter);
437 void INTERFACE_ATTRIBUTE WTFAnnotateBenignRaceSized(
438 char *f, int l, uptr mem, uptr sz, char *desc) {
439 SCOPED_ANNOTATION(AnnotateBenignRaceSized);
440 BenignRaceImpl(f, l, mem, sz, desc);
443 int INTERFACE_ATTRIBUTE RunningOnValgrind() {
444 return flags()->running_on_valgrind;
447 double __attribute__((weak)) INTERFACE_ATTRIBUTE ValgrindSlowdown(void) {
451 const char INTERFACE_ATTRIBUTE* ThreadSanitizerQuery(const char *query) {
452 if (internal_strcmp(query, "pure_happens_before") == 0)
458 void INTERFACE_ATTRIBUTE
459 AnnotateMemoryIsInitialized(char *f, int l, uptr mem, uptr sz) {}
460 void INTERFACE_ATTRIBUTE
461 AnnotateMemoryIsUninitialized(char *f, int l, uptr mem, uptr sz) {}
463 // Note: the parameter is called flagz, because flags is already taken
464 // by the global function that returns flags.
466 void __tsan_mutex_create(void *m, unsigned flagz) {
467 SCOPED_ANNOTATION(__tsan_mutex_create);
468 MutexCreate(thr, pc, (uptr)m, flagz & MutexCreationFlagMask);
472 void __tsan_mutex_destroy(void *m, unsigned flagz) {
473 SCOPED_ANNOTATION(__tsan_mutex_destroy);
474 MutexDestroy(thr, pc, (uptr)m, flagz);
478 void __tsan_mutex_pre_lock(void *m, unsigned flagz) {
479 SCOPED_ANNOTATION(__tsan_mutex_pre_lock);
480 if (!(flagz & MutexFlagTryLock)) {
481 if (flagz & MutexFlagReadLock)
482 MutexPreReadLock(thr, pc, (uptr)m);
484 MutexPreLock(thr, pc, (uptr)m);
486 ThreadIgnoreBegin(thr, pc, false);
487 ThreadIgnoreSyncBegin(thr, pc, false);
491 void __tsan_mutex_post_lock(void *m, unsigned flagz, int rec) {
492 SCOPED_ANNOTATION(__tsan_mutex_post_lock);
493 ThreadIgnoreSyncEnd(thr, pc);
494 ThreadIgnoreEnd(thr, pc);
495 if (!(flagz & MutexFlagTryLockFailed)) {
496 if (flagz & MutexFlagReadLock)
497 MutexPostReadLock(thr, pc, (uptr)m, flagz);
499 MutexPostLock(thr, pc, (uptr)m, flagz, rec);
504 int __tsan_mutex_pre_unlock(void *m, unsigned flagz) {
505 SCOPED_ANNOTATION_RET(__tsan_mutex_pre_unlock, 0);
507 if (flagz & MutexFlagReadLock) {
508 CHECK(!(flagz & MutexFlagRecursiveUnlock));
509 MutexReadUnlock(thr, pc, (uptr)m);
511 ret = MutexUnlock(thr, pc, (uptr)m, flagz);
513 ThreadIgnoreBegin(thr, pc, false);
514 ThreadIgnoreSyncBegin(thr, pc, false);
519 void __tsan_mutex_post_unlock(void *m, unsigned flagz) {
520 SCOPED_ANNOTATION(__tsan_mutex_post_unlock);
521 ThreadIgnoreSyncEnd(thr, pc);
522 ThreadIgnoreEnd(thr, pc);
526 void __tsan_mutex_pre_signal(void *addr, unsigned flagz) {
527 SCOPED_ANNOTATION(__tsan_mutex_pre_signal);
528 ThreadIgnoreBegin(thr, pc, false);
529 ThreadIgnoreSyncBegin(thr, pc, false);
533 void __tsan_mutex_post_signal(void *addr, unsigned flagz) {
534 SCOPED_ANNOTATION(__tsan_mutex_post_signal);
535 ThreadIgnoreSyncEnd(thr, pc);
536 ThreadIgnoreEnd(thr, pc);
540 void __tsan_mutex_pre_divert(void *addr, unsigned flagz) {
541 SCOPED_ANNOTATION(__tsan_mutex_pre_divert);
542 // Exit from ignore region started in __tsan_mutex_pre_lock/unlock/signal.
543 ThreadIgnoreSyncEnd(thr, pc);
544 ThreadIgnoreEnd(thr, pc);
548 void __tsan_mutex_post_divert(void *addr, unsigned flagz) {
549 SCOPED_ANNOTATION(__tsan_mutex_post_divert);
550 ThreadIgnoreBegin(thr, pc, false);
551 ThreadIgnoreSyncBegin(thr, pc, false);