1 //===-- tsan_interface_atomic.cc ------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 //===----------------------------------------------------------------------===//
14 // ThreadSanitizer atomic operations are based on C++11/C1x standards.
15 // For background see C++11 standard. A slightly older, publicly
16 // available draft of the standard (not entirely up-to-date, but close enough
17 // for casual browsing) is available here:
18 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
19 // The following page contains more background information:
20 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
22 #include "sanitizer_common/sanitizer_placement_new.h"
23 #include "sanitizer_common/sanitizer_stacktrace.h"
24 #include "sanitizer_common/sanitizer_mutex.h"
25 #include "tsan_flags.h"
28 using namespace __tsan; // NOLINT
30 // These should match declarations from public tsan_interface_atomic.h header.
31 typedef unsigned char a8;
32 typedef unsigned short a16; // NOLINT
33 typedef unsigned int a32;
34 typedef unsigned long long a64; // NOLINT
35 #if !defined(SANITIZER_GO) && (defined(__SIZEOF_INT128__) \
36 || (__clang_major__ * 100 + __clang_minor__ >= 302)) && !defined(__mips64)
37 __extension__ typedef __int128 a128;
38 # define __TSAN_HAS_INT128 1
40 # define __TSAN_HAS_INT128 0
43 #if !defined(SANITIZER_GO) && __TSAN_HAS_INT128
44 // Protects emulation of 128-bit atomic operations.
45 static StaticSpinMutex mutex128;
48 // Part of ABI, do not change.
49 // http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup
59 static bool IsLoadOrder(morder mo) {
60 return mo == mo_relaxed || mo == mo_consume
61 || mo == mo_acquire || mo == mo_seq_cst;
64 static bool IsStoreOrder(morder mo) {
65 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
68 static bool IsReleaseOrder(morder mo) {
69 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
72 static bool IsAcquireOrder(morder mo) {
73 return mo == mo_consume || mo == mo_acquire
74 || mo == mo_acq_rel || mo == mo_seq_cst;
77 static bool IsAcqRelOrder(morder mo) {
78 return mo == mo_acq_rel || mo == mo_seq_cst;
81 template<typename T> T func_xchg(volatile T *v, T op) {
82 T res = __sync_lock_test_and_set(v, op);
83 // __sync_lock_test_and_set does not contain full barrier.
88 template<typename T> T func_add(volatile T *v, T op) {
89 return __sync_fetch_and_add(v, op);
92 template<typename T> T func_sub(volatile T *v, T op) {
93 return __sync_fetch_and_sub(v, op);
96 template<typename T> T func_and(volatile T *v, T op) {
97 return __sync_fetch_and_and(v, op);
100 template<typename T> T func_or(volatile T *v, T op) {
101 return __sync_fetch_and_or(v, op);
104 template<typename T> T func_xor(volatile T *v, T op) {
105 return __sync_fetch_and_xor(v, op);
108 template<typename T> T func_nand(volatile T *v, T op) {
109 // clang does not support __sync_fetch_and_nand.
112 T newv = ~(cmp & op);
113 T cur = __sync_val_compare_and_swap(v, cmp, newv);
120 template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
121 return __sync_val_compare_and_swap(v, cmp, xch);
124 // clang does not support 128-bit atomic ops.
125 // Atomic ops are executed under tsan internal mutex,
126 // here we assume that the atomic variables are not accessed
127 // from non-instrumented code.
128 #if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !defined(SANITIZER_GO) \
130 a128 func_xchg(volatile a128 *v, a128 op) {
131 SpinMutexLock lock(&mutex128);
137 a128 func_add(volatile a128 *v, a128 op) {
138 SpinMutexLock lock(&mutex128);
144 a128 func_sub(volatile a128 *v, a128 op) {
145 SpinMutexLock lock(&mutex128);
151 a128 func_and(volatile a128 *v, a128 op) {
152 SpinMutexLock lock(&mutex128);
158 a128 func_or(volatile a128 *v, a128 op) {
159 SpinMutexLock lock(&mutex128);
165 a128 func_xor(volatile a128 *v, a128 op) {
166 SpinMutexLock lock(&mutex128);
172 a128 func_nand(volatile a128 *v, a128 op) {
173 SpinMutexLock lock(&mutex128);
179 a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
180 SpinMutexLock lock(&mutex128);
189 static int SizeLog() {
192 else if (sizeof(T) <= 2)
194 else if (sizeof(T) <= 4)
198 // For 16-byte atomics we also use 8-byte memory access,
199 // this leads to false negatives only in very obscure cases.
203 static atomic_uint8_t *to_atomic(const volatile a8 *a) {
204 return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
207 static atomic_uint16_t *to_atomic(const volatile a16 *a) {
208 return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a));
212 static atomic_uint32_t *to_atomic(const volatile a32 *a) {
213 return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a));
216 static atomic_uint64_t *to_atomic(const volatile a64 *a) {
217 return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));
220 static memory_order to_mo(morder mo) {
222 case mo_relaxed: return memory_order_relaxed;
223 case mo_consume: return memory_order_consume;
224 case mo_acquire: return memory_order_acquire;
225 case mo_release: return memory_order_release;
226 case mo_acq_rel: return memory_order_acq_rel;
227 case mo_seq_cst: return memory_order_seq_cst;
230 return memory_order_seq_cst;
234 static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
235 return atomic_load(to_atomic(a), to_mo(mo));
238 #if __TSAN_HAS_INT128 && !defined(SANITIZER_GO)
239 static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
240 SpinMutexLock lock(&mutex128);
246 static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
248 CHECK(IsLoadOrder(mo));
249 // This fast-path is critical for performance.
250 // Assume the access is atomic.
251 if (!IsAcquireOrder(mo)) {
252 MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
253 return NoTsanAtomicLoad(a, mo);
255 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, false);
256 AcquireImpl(thr, pc, &s->clock);
257 T v = NoTsanAtomicLoad(a, mo);
259 MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
264 static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
265 atomic_store(to_atomic(a), v, to_mo(mo));
268 #if __TSAN_HAS_INT128 && !defined(SANITIZER_GO)
269 static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
270 SpinMutexLock lock(&mutex128);
276 static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
278 CHECK(IsStoreOrder(mo));
279 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
280 // This fast-path is critical for performance.
281 // Assume the access is atomic.
282 // Strictly saying even relaxed store cuts off release sequence,
283 // so must reset the clock.
284 if (!IsReleaseOrder(mo)) {
285 NoTsanAtomicStore(a, v, mo);
288 __sync_synchronize();
289 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
290 thr->fast_state.IncrementEpoch();
291 // Can't increment epoch w/o writing to the trace as well.
292 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
293 ReleaseImpl(thr, pc, &s->clock);
294 NoTsanAtomicStore(a, v, mo);
298 template<typename T, T (*F)(volatile T *v, T op)>
299 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
300 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
302 if (mo != mo_relaxed) {
303 s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
304 thr->fast_state.IncrementEpoch();
305 // Can't increment epoch w/o writing to the trace as well.
306 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
307 if (IsAcqRelOrder(mo))
308 AcquireReleaseImpl(thr, pc, &s->clock);
309 else if (IsReleaseOrder(mo))
310 ReleaseImpl(thr, pc, &s->clock);
311 else if (IsAcquireOrder(mo))
312 AcquireImpl(thr, pc, &s->clock);
321 static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
322 return func_xchg(a, v);
326 static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
327 return func_add(a, v);
331 static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
332 return func_sub(a, v);
336 static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
337 return func_and(a, v);
341 static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
342 return func_or(a, v);
346 static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
347 return func_xor(a, v);
351 static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
352 return func_nand(a, v);
356 static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
358 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
362 static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
364 return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
368 static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
370 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
374 static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
376 return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
380 static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
382 return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
386 static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
388 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
392 static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
394 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
398 static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
399 return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
402 #if __TSAN_HAS_INT128
403 static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
404 morder mo, morder fmo) {
406 a128 cur = func_cas(a, old, v);
415 static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
416 NoTsanAtomicCAS(a, &c, v, mo, fmo);
421 static bool AtomicCAS(ThreadState *thr, uptr pc,
422 volatile T *a, T *c, T v, morder mo, morder fmo) {
423 (void)fmo; // Unused because llvm does not pass it yet.
424 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
426 bool write_lock = mo != mo_acquire && mo != mo_consume;
427 if (mo != mo_relaxed) {
428 s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
429 thr->fast_state.IncrementEpoch();
430 // Can't increment epoch w/o writing to the trace as well.
431 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
432 if (IsAcqRelOrder(mo))
433 AcquireReleaseImpl(thr, pc, &s->clock);
434 else if (IsReleaseOrder(mo))
435 ReleaseImpl(thr, pc, &s->clock);
436 else if (IsAcquireOrder(mo))
437 AcquireImpl(thr, pc, &s->clock);
440 T pr = func_cas(a, cc, v);
454 static T AtomicCAS(ThreadState *thr, uptr pc,
455 volatile T *a, T c, T v, morder mo, morder fmo) {
456 AtomicCAS(thr, pc, a, &c, v, mo, fmo);
461 static void NoTsanAtomicFence(morder mo) {
462 __sync_synchronize();
465 static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
466 // FIXME(dvyukov): not implemented.
467 __sync_synchronize();
471 // Interface functions follow.
476 #define SCOPED_ATOMIC(func, ...) \
477 const uptr callpc = (uptr)__builtin_return_address(0); \
478 uptr pc = StackTrace::GetCurrentPc(); \
479 mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
480 ThreadState *const thr = cur_thread(); \
481 if (thr->ignore_interceptors) \
482 return NoTsanAtomic##func(__VA_ARGS__); \
483 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
484 ScopedAtomic sa(thr, callpc, a, mo, __func__); \
485 return Atomic##func(thr, pc, __VA_ARGS__); \
490 ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
491 morder mo, const char *func)
494 DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
497 ProcessPendingSignals(thr_);
504 static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
505 StatInc(thr, StatAtomic);
507 StatInc(thr, size == 1 ? StatAtomic1
508 : size == 2 ? StatAtomic2
509 : size == 4 ? StatAtomic4
510 : size == 8 ? StatAtomic8
512 StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
513 : mo == mo_consume ? StatAtomicConsume
514 : mo == mo_acquire ? StatAtomicAcquire
515 : mo == mo_release ? StatAtomicRelease
516 : mo == mo_acq_rel ? StatAtomicAcq_Rel
517 : StatAtomicSeq_Cst);
521 SANITIZER_INTERFACE_ATTRIBUTE
522 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
523 SCOPED_ATOMIC(Load, a, mo);
526 SANITIZER_INTERFACE_ATTRIBUTE
527 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
528 SCOPED_ATOMIC(Load, a, mo);
531 SANITIZER_INTERFACE_ATTRIBUTE
532 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
533 SCOPED_ATOMIC(Load, a, mo);
536 SANITIZER_INTERFACE_ATTRIBUTE
537 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
538 SCOPED_ATOMIC(Load, a, mo);
541 #if __TSAN_HAS_INT128
542 SANITIZER_INTERFACE_ATTRIBUTE
543 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
544 SCOPED_ATOMIC(Load, a, mo);
548 SANITIZER_INTERFACE_ATTRIBUTE
549 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
550 SCOPED_ATOMIC(Store, a, v, mo);
553 SANITIZER_INTERFACE_ATTRIBUTE
554 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
555 SCOPED_ATOMIC(Store, a, v, mo);
558 SANITIZER_INTERFACE_ATTRIBUTE
559 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
560 SCOPED_ATOMIC(Store, a, v, mo);
563 SANITIZER_INTERFACE_ATTRIBUTE
564 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
565 SCOPED_ATOMIC(Store, a, v, mo);
568 #if __TSAN_HAS_INT128
569 SANITIZER_INTERFACE_ATTRIBUTE
570 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
571 SCOPED_ATOMIC(Store, a, v, mo);
575 SANITIZER_INTERFACE_ATTRIBUTE
576 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
577 SCOPED_ATOMIC(Exchange, a, v, mo);
580 SANITIZER_INTERFACE_ATTRIBUTE
581 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
582 SCOPED_ATOMIC(Exchange, a, v, mo);
585 SANITIZER_INTERFACE_ATTRIBUTE
586 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
587 SCOPED_ATOMIC(Exchange, a, v, mo);
590 SANITIZER_INTERFACE_ATTRIBUTE
591 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
592 SCOPED_ATOMIC(Exchange, a, v, mo);
595 #if __TSAN_HAS_INT128
596 SANITIZER_INTERFACE_ATTRIBUTE
597 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
598 SCOPED_ATOMIC(Exchange, a, v, mo);
602 SANITIZER_INTERFACE_ATTRIBUTE
603 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
604 SCOPED_ATOMIC(FetchAdd, a, v, mo);
607 SANITIZER_INTERFACE_ATTRIBUTE
608 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
609 SCOPED_ATOMIC(FetchAdd, a, v, mo);
612 SANITIZER_INTERFACE_ATTRIBUTE
613 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
614 SCOPED_ATOMIC(FetchAdd, a, v, mo);
617 SANITIZER_INTERFACE_ATTRIBUTE
618 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
619 SCOPED_ATOMIC(FetchAdd, a, v, mo);
622 #if __TSAN_HAS_INT128
623 SANITIZER_INTERFACE_ATTRIBUTE
624 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
625 SCOPED_ATOMIC(FetchAdd, a, v, mo);
629 SANITIZER_INTERFACE_ATTRIBUTE
630 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
631 SCOPED_ATOMIC(FetchSub, a, v, mo);
634 SANITIZER_INTERFACE_ATTRIBUTE
635 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
636 SCOPED_ATOMIC(FetchSub, a, v, mo);
639 SANITIZER_INTERFACE_ATTRIBUTE
640 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
641 SCOPED_ATOMIC(FetchSub, a, v, mo);
644 SANITIZER_INTERFACE_ATTRIBUTE
645 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
646 SCOPED_ATOMIC(FetchSub, a, v, mo);
649 #if __TSAN_HAS_INT128
650 SANITIZER_INTERFACE_ATTRIBUTE
651 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
652 SCOPED_ATOMIC(FetchSub, a, v, mo);
656 SANITIZER_INTERFACE_ATTRIBUTE
657 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
658 SCOPED_ATOMIC(FetchAnd, a, v, mo);
661 SANITIZER_INTERFACE_ATTRIBUTE
662 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
663 SCOPED_ATOMIC(FetchAnd, a, v, mo);
666 SANITIZER_INTERFACE_ATTRIBUTE
667 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
668 SCOPED_ATOMIC(FetchAnd, a, v, mo);
671 SANITIZER_INTERFACE_ATTRIBUTE
672 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
673 SCOPED_ATOMIC(FetchAnd, a, v, mo);
676 #if __TSAN_HAS_INT128
677 SANITIZER_INTERFACE_ATTRIBUTE
678 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
679 SCOPED_ATOMIC(FetchAnd, a, v, mo);
683 SANITIZER_INTERFACE_ATTRIBUTE
684 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
685 SCOPED_ATOMIC(FetchOr, a, v, mo);
688 SANITIZER_INTERFACE_ATTRIBUTE
689 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
690 SCOPED_ATOMIC(FetchOr, a, v, mo);
693 SANITIZER_INTERFACE_ATTRIBUTE
694 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
695 SCOPED_ATOMIC(FetchOr, a, v, mo);
698 SANITIZER_INTERFACE_ATTRIBUTE
699 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
700 SCOPED_ATOMIC(FetchOr, a, v, mo);
703 #if __TSAN_HAS_INT128
704 SANITIZER_INTERFACE_ATTRIBUTE
705 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
706 SCOPED_ATOMIC(FetchOr, a, v, mo);
710 SANITIZER_INTERFACE_ATTRIBUTE
711 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
712 SCOPED_ATOMIC(FetchXor, a, v, mo);
715 SANITIZER_INTERFACE_ATTRIBUTE
716 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
717 SCOPED_ATOMIC(FetchXor, a, v, mo);
720 SANITIZER_INTERFACE_ATTRIBUTE
721 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
722 SCOPED_ATOMIC(FetchXor, a, v, mo);
725 SANITIZER_INTERFACE_ATTRIBUTE
726 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
727 SCOPED_ATOMIC(FetchXor, a, v, mo);
730 #if __TSAN_HAS_INT128
731 SANITIZER_INTERFACE_ATTRIBUTE
732 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
733 SCOPED_ATOMIC(FetchXor, a, v, mo);
737 SANITIZER_INTERFACE_ATTRIBUTE
738 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
739 SCOPED_ATOMIC(FetchNand, a, v, mo);
742 SANITIZER_INTERFACE_ATTRIBUTE
743 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
744 SCOPED_ATOMIC(FetchNand, a, v, mo);
747 SANITIZER_INTERFACE_ATTRIBUTE
748 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
749 SCOPED_ATOMIC(FetchNand, a, v, mo);
752 SANITIZER_INTERFACE_ATTRIBUTE
753 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
754 SCOPED_ATOMIC(FetchNand, a, v, mo);
757 #if __TSAN_HAS_INT128
758 SANITIZER_INTERFACE_ATTRIBUTE
759 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
760 SCOPED_ATOMIC(FetchNand, a, v, mo);
764 SANITIZER_INTERFACE_ATTRIBUTE
765 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
766 morder mo, morder fmo) {
767 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
770 SANITIZER_INTERFACE_ATTRIBUTE
771 int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
772 morder mo, morder fmo) {
773 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
776 SANITIZER_INTERFACE_ATTRIBUTE
777 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
778 morder mo, morder fmo) {
779 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
782 SANITIZER_INTERFACE_ATTRIBUTE
783 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
784 morder mo, morder fmo) {
785 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
788 #if __TSAN_HAS_INT128
789 SANITIZER_INTERFACE_ATTRIBUTE
790 int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
791 morder mo, morder fmo) {
792 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
796 SANITIZER_INTERFACE_ATTRIBUTE
797 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
798 morder mo, morder fmo) {
799 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
802 SANITIZER_INTERFACE_ATTRIBUTE
803 int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
804 morder mo, morder fmo) {
805 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
808 SANITIZER_INTERFACE_ATTRIBUTE
809 int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
810 morder mo, morder fmo) {
811 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
814 SANITIZER_INTERFACE_ATTRIBUTE
815 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
816 morder mo, morder fmo) {
817 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
820 #if __TSAN_HAS_INT128
821 SANITIZER_INTERFACE_ATTRIBUTE
822 int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
823 morder mo, morder fmo) {
824 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
828 SANITIZER_INTERFACE_ATTRIBUTE
829 a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
830 morder mo, morder fmo) {
831 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
834 SANITIZER_INTERFACE_ATTRIBUTE
835 a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
836 morder mo, morder fmo) {
837 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
840 SANITIZER_INTERFACE_ATTRIBUTE
841 a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
842 morder mo, morder fmo) {
843 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
846 SANITIZER_INTERFACE_ATTRIBUTE
847 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
848 morder mo, morder fmo) {
849 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
852 #if __TSAN_HAS_INT128
853 SANITIZER_INTERFACE_ATTRIBUTE
854 a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
855 morder mo, morder fmo) {
856 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
860 SANITIZER_INTERFACE_ATTRIBUTE
861 void __tsan_atomic_thread_fence(morder mo) {
863 SCOPED_ATOMIC(Fence, mo);
866 SANITIZER_INTERFACE_ATTRIBUTE
867 void __tsan_atomic_signal_fence(morder mo) {
871 #else // #ifndef SANITIZER_GO
875 #define ATOMIC(func, ...) \
876 if (thr->ignore_sync) { \
877 NoTsanAtomic##func(__VA_ARGS__); \
879 FuncEntry(thr, cpc); \
880 Atomic##func(thr, pc, __VA_ARGS__); \
885 #define ATOMIC_RET(func, ret, ...) \
886 if (thr->ignore_sync) { \
887 (ret) = NoTsanAtomic##func(__VA_ARGS__); \
889 FuncEntry(thr, cpc); \
890 (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
896 SANITIZER_INTERFACE_ATTRIBUTE
897 void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
898 ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
901 SANITIZER_INTERFACE_ATTRIBUTE
902 void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
903 ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
906 SANITIZER_INTERFACE_ATTRIBUTE
907 void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
908 ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
911 SANITIZER_INTERFACE_ATTRIBUTE
912 void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
913 ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
916 SANITIZER_INTERFACE_ATTRIBUTE
917 void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
918 ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
921 SANITIZER_INTERFACE_ATTRIBUTE
922 void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
923 ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
926 SANITIZER_INTERFACE_ATTRIBUTE
927 void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
928 ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
931 SANITIZER_INTERFACE_ATTRIBUTE
932 void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
933 ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
936 SANITIZER_INTERFACE_ATTRIBUTE
937 void __tsan_go_atomic32_compare_exchange(
938 ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
940 a32 cmp = *(a32*)(a+8);
941 ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
942 *(bool*)(a+16) = (cur == cmp);
945 SANITIZER_INTERFACE_ATTRIBUTE
946 void __tsan_go_atomic64_compare_exchange(
947 ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
949 a64 cmp = *(a64*)(a+8);
950 ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
951 *(bool*)(a+24) = (cur == cmp);
954 #endif // #ifndef SANITIZER_GO