1 //===-- tsan_interface_atomic.cc ------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 //===----------------------------------------------------------------------===//
14 // ThreadSanitizer atomic operations are based on C++11/C1x standards.
15 // For background see C++11 standard. A slightly older, publicly
16 // available draft of the standard (not entirely up-to-date, but close enough
17 // for casual browsing) is available here:
18 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
19 // The following page contains more background information:
20 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
22 #include "sanitizer_common/sanitizer_placement_new.h"
23 #include "sanitizer_common/sanitizer_stacktrace.h"
24 #include "sanitizer_common/sanitizer_mutex.h"
25 #include "tsan_flags.h"
26 #include "tsan_interface.h"
29 using namespace __tsan; // NOLINT
31 #if !SANITIZER_GO && __TSAN_HAS_INT128
32 // Protects emulation of 128-bit atomic operations.
33 static StaticSpinMutex mutex128;
36 static bool IsLoadOrder(morder mo) {
37 return mo == mo_relaxed || mo == mo_consume
38 || mo == mo_acquire || mo == mo_seq_cst;
41 static bool IsStoreOrder(morder mo) {
42 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
45 static bool IsReleaseOrder(morder mo) {
46 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
49 static bool IsAcquireOrder(morder mo) {
50 return mo == mo_consume || mo == mo_acquire
51 || mo == mo_acq_rel || mo == mo_seq_cst;
54 static bool IsAcqRelOrder(morder mo) {
55 return mo == mo_acq_rel || mo == mo_seq_cst;
58 template<typename T> T func_xchg(volatile T *v, T op) {
59 T res = __sync_lock_test_and_set(v, op);
60 // __sync_lock_test_and_set does not contain full barrier.
65 template<typename T> T func_add(volatile T *v, T op) {
66 return __sync_fetch_and_add(v, op);
69 template<typename T> T func_sub(volatile T *v, T op) {
70 return __sync_fetch_and_sub(v, op);
73 template<typename T> T func_and(volatile T *v, T op) {
74 return __sync_fetch_and_and(v, op);
77 template<typename T> T func_or(volatile T *v, T op) {
78 return __sync_fetch_and_or(v, op);
81 template<typename T> T func_xor(volatile T *v, T op) {
82 return __sync_fetch_and_xor(v, op);
85 template<typename T> T func_nand(volatile T *v, T op) {
86 // clang does not support __sync_fetch_and_nand.
90 T cur = __sync_val_compare_and_swap(v, cmp, newv);
97 template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
98 return __sync_val_compare_and_swap(v, cmp, xch);
101 // clang does not support 128-bit atomic ops.
102 // Atomic ops are executed under tsan internal mutex,
103 // here we assume that the atomic variables are not accessed
104 // from non-instrumented code.
105 #if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \
107 a128 func_xchg(volatile a128 *v, a128 op) {
108 SpinMutexLock lock(&mutex128);
114 a128 func_add(volatile a128 *v, a128 op) {
115 SpinMutexLock lock(&mutex128);
121 a128 func_sub(volatile a128 *v, a128 op) {
122 SpinMutexLock lock(&mutex128);
128 a128 func_and(volatile a128 *v, a128 op) {
129 SpinMutexLock lock(&mutex128);
135 a128 func_or(volatile a128 *v, a128 op) {
136 SpinMutexLock lock(&mutex128);
142 a128 func_xor(volatile a128 *v, a128 op) {
143 SpinMutexLock lock(&mutex128);
149 a128 func_nand(volatile a128 *v, a128 op) {
150 SpinMutexLock lock(&mutex128);
156 a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
157 SpinMutexLock lock(&mutex128);
166 static int SizeLog() {
169 else if (sizeof(T) <= 2)
171 else if (sizeof(T) <= 4)
175 // For 16-byte atomics we also use 8-byte memory access,
176 // this leads to false negatives only in very obscure cases.
180 static atomic_uint8_t *to_atomic(const volatile a8 *a) {
181 return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
184 static atomic_uint16_t *to_atomic(const volatile a16 *a) {
185 return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a));
189 static atomic_uint32_t *to_atomic(const volatile a32 *a) {
190 return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a));
193 static atomic_uint64_t *to_atomic(const volatile a64 *a) {
194 return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));
197 static memory_order to_mo(morder mo) {
199 case mo_relaxed: return memory_order_relaxed;
200 case mo_consume: return memory_order_consume;
201 case mo_acquire: return memory_order_acquire;
202 case mo_release: return memory_order_release;
203 case mo_acq_rel: return memory_order_acq_rel;
204 case mo_seq_cst: return memory_order_seq_cst;
207 return memory_order_seq_cst;
211 static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
212 return atomic_load(to_atomic(a), to_mo(mo));
215 #if __TSAN_HAS_INT128 && !SANITIZER_GO
216 static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
217 SpinMutexLock lock(&mutex128);
223 static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
224 CHECK(IsLoadOrder(mo));
225 // This fast-path is critical for performance.
226 // Assume the access is atomic.
227 if (!IsAcquireOrder(mo)) {
228 MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
229 return NoTsanAtomicLoad(a, mo);
231 // Don't create sync object if it does not exist yet. For example, an atomic
232 // pointer is initialized to nullptr and then periodically acquire-loaded.
233 T v = NoTsanAtomicLoad(a, mo);
234 SyncVar *s = ctx->metamap.GetIfExistsAndLock((uptr)a, false);
236 AcquireImpl(thr, pc, &s->clock);
237 // Re-read under sync mutex because we need a consistent snapshot
238 // of the value and the clock we acquire.
239 v = NoTsanAtomicLoad(a, mo);
242 MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
247 static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
248 atomic_store(to_atomic(a), v, to_mo(mo));
251 #if __TSAN_HAS_INT128 && !SANITIZER_GO
252 static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
253 SpinMutexLock lock(&mutex128);
259 static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
261 CHECK(IsStoreOrder(mo));
262 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
263 // This fast-path is critical for performance.
264 // Assume the access is atomic.
265 // Strictly saying even relaxed store cuts off release sequence,
266 // so must reset the clock.
267 if (!IsReleaseOrder(mo)) {
268 NoTsanAtomicStore(a, v, mo);
271 __sync_synchronize();
272 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
273 thr->fast_state.IncrementEpoch();
274 // Can't increment epoch w/o writing to the trace as well.
275 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
276 ReleaseStoreImpl(thr, pc, &s->clock);
277 NoTsanAtomicStore(a, v, mo);
281 template<typename T, T (*F)(volatile T *v, T op)>
282 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
283 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
285 if (mo != mo_relaxed) {
286 s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
287 thr->fast_state.IncrementEpoch();
288 // Can't increment epoch w/o writing to the trace as well.
289 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
290 if (IsAcqRelOrder(mo))
291 AcquireReleaseImpl(thr, pc, &s->clock);
292 else if (IsReleaseOrder(mo))
293 ReleaseImpl(thr, pc, &s->clock);
294 else if (IsAcquireOrder(mo))
295 AcquireImpl(thr, pc, &s->clock);
304 static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
305 return func_xchg(a, v);
309 static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
310 return func_add(a, v);
314 static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
315 return func_sub(a, v);
319 static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
320 return func_and(a, v);
324 static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
325 return func_or(a, v);
329 static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
330 return func_xor(a, v);
334 static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
335 return func_nand(a, v);
339 static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
341 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
345 static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
347 return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
351 static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
353 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
357 static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
359 return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
363 static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
365 return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
369 static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
371 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
375 static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
377 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
381 static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
382 return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
385 #if __TSAN_HAS_INT128
386 static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
387 morder mo, morder fmo) {
389 a128 cur = func_cas(a, old, v);
398 static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
399 NoTsanAtomicCAS(a, &c, v, mo, fmo);
404 static bool AtomicCAS(ThreadState *thr, uptr pc,
405 volatile T *a, T *c, T v, morder mo, morder fmo) {
406 (void)fmo; // Unused because llvm does not pass it yet.
407 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
409 bool write_lock = mo != mo_acquire && mo != mo_consume;
410 if (mo != mo_relaxed) {
411 s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
412 thr->fast_state.IncrementEpoch();
413 // Can't increment epoch w/o writing to the trace as well.
414 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
415 if (IsAcqRelOrder(mo))
416 AcquireReleaseImpl(thr, pc, &s->clock);
417 else if (IsReleaseOrder(mo))
418 ReleaseImpl(thr, pc, &s->clock);
419 else if (IsAcquireOrder(mo))
420 AcquireImpl(thr, pc, &s->clock);
423 T pr = func_cas(a, cc, v);
437 static T AtomicCAS(ThreadState *thr, uptr pc,
438 volatile T *a, T c, T v, morder mo, morder fmo) {
439 AtomicCAS(thr, pc, a, &c, v, mo, fmo);
444 static void NoTsanAtomicFence(morder mo) {
445 __sync_synchronize();
448 static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
449 // FIXME(dvyukov): not implemented.
450 __sync_synchronize();
454 // Interface functions follow.
459 static morder convert_morder(morder mo) {
460 if (flags()->force_seq_cst_atomics)
461 return (morder)mo_seq_cst;
463 // Filter out additional memory order flags:
464 // MEMMODEL_SYNC = 1 << 15
465 // __ATOMIC_HLE_ACQUIRE = 1 << 16
466 // __ATOMIC_HLE_RELEASE = 1 << 17
468 // HLE is an optimization, and we pretend that elision always fails.
469 // MEMMODEL_SYNC is used when lowering __sync_ atomics,
470 // since we use __sync_ atomics for actual atomic operations,
471 // we can safely ignore it as well. It also subtly affects semantics,
472 // but we don't model the difference.
473 return (morder)(mo & 0x7fff);
476 #define SCOPED_ATOMIC(func, ...) \
477 ThreadState *const thr = cur_thread(); \
478 if (thr->ignore_sync || thr->ignore_interceptors) { \
479 ProcessPendingSignals(thr); \
480 return NoTsanAtomic##func(__VA_ARGS__); \
482 const uptr callpc = (uptr)__builtin_return_address(0); \
483 uptr pc = StackTrace::GetCurrentPc(); \
484 mo = convert_morder(mo); \
485 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
486 ScopedAtomic sa(thr, callpc, a, mo, __func__); \
487 return Atomic##func(thr, pc, __VA_ARGS__); \
492 ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
493 morder mo, const char *func)
496 DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
499 ProcessPendingSignals(thr_);
506 static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
507 StatInc(thr, StatAtomic);
509 StatInc(thr, size == 1 ? StatAtomic1
510 : size == 2 ? StatAtomic2
511 : size == 4 ? StatAtomic4
512 : size == 8 ? StatAtomic8
514 StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
515 : mo == mo_consume ? StatAtomicConsume
516 : mo == mo_acquire ? StatAtomicAcquire
517 : mo == mo_release ? StatAtomicRelease
518 : mo == mo_acq_rel ? StatAtomicAcq_Rel
519 : StatAtomicSeq_Cst);
523 SANITIZER_INTERFACE_ATTRIBUTE
524 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
525 SCOPED_ATOMIC(Load, a, mo);
528 SANITIZER_INTERFACE_ATTRIBUTE
529 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
530 SCOPED_ATOMIC(Load, a, mo);
533 SANITIZER_INTERFACE_ATTRIBUTE
534 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
535 SCOPED_ATOMIC(Load, a, mo);
538 SANITIZER_INTERFACE_ATTRIBUTE
539 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
540 SCOPED_ATOMIC(Load, a, mo);
543 #if __TSAN_HAS_INT128
544 SANITIZER_INTERFACE_ATTRIBUTE
545 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
546 SCOPED_ATOMIC(Load, a, mo);
550 SANITIZER_INTERFACE_ATTRIBUTE
551 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
552 SCOPED_ATOMIC(Store, a, v, mo);
555 SANITIZER_INTERFACE_ATTRIBUTE
556 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
557 SCOPED_ATOMIC(Store, a, v, mo);
560 SANITIZER_INTERFACE_ATTRIBUTE
561 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
562 SCOPED_ATOMIC(Store, a, v, mo);
565 SANITIZER_INTERFACE_ATTRIBUTE
566 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
567 SCOPED_ATOMIC(Store, a, v, mo);
570 #if __TSAN_HAS_INT128
571 SANITIZER_INTERFACE_ATTRIBUTE
572 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
573 SCOPED_ATOMIC(Store, a, v, mo);
577 SANITIZER_INTERFACE_ATTRIBUTE
578 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
579 SCOPED_ATOMIC(Exchange, a, v, mo);
582 SANITIZER_INTERFACE_ATTRIBUTE
583 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
584 SCOPED_ATOMIC(Exchange, a, v, mo);
587 SANITIZER_INTERFACE_ATTRIBUTE
588 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
589 SCOPED_ATOMIC(Exchange, a, v, mo);
592 SANITIZER_INTERFACE_ATTRIBUTE
593 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
594 SCOPED_ATOMIC(Exchange, a, v, mo);
597 #if __TSAN_HAS_INT128
598 SANITIZER_INTERFACE_ATTRIBUTE
599 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
600 SCOPED_ATOMIC(Exchange, a, v, mo);
604 SANITIZER_INTERFACE_ATTRIBUTE
605 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
606 SCOPED_ATOMIC(FetchAdd, a, v, mo);
609 SANITIZER_INTERFACE_ATTRIBUTE
610 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
611 SCOPED_ATOMIC(FetchAdd, a, v, mo);
614 SANITIZER_INTERFACE_ATTRIBUTE
615 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
616 SCOPED_ATOMIC(FetchAdd, a, v, mo);
619 SANITIZER_INTERFACE_ATTRIBUTE
620 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
621 SCOPED_ATOMIC(FetchAdd, a, v, mo);
624 #if __TSAN_HAS_INT128
625 SANITIZER_INTERFACE_ATTRIBUTE
626 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
627 SCOPED_ATOMIC(FetchAdd, a, v, mo);
631 SANITIZER_INTERFACE_ATTRIBUTE
632 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
633 SCOPED_ATOMIC(FetchSub, a, v, mo);
636 SANITIZER_INTERFACE_ATTRIBUTE
637 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
638 SCOPED_ATOMIC(FetchSub, a, v, mo);
641 SANITIZER_INTERFACE_ATTRIBUTE
642 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
643 SCOPED_ATOMIC(FetchSub, a, v, mo);
646 SANITIZER_INTERFACE_ATTRIBUTE
647 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
648 SCOPED_ATOMIC(FetchSub, a, v, mo);
651 #if __TSAN_HAS_INT128
652 SANITIZER_INTERFACE_ATTRIBUTE
653 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
654 SCOPED_ATOMIC(FetchSub, a, v, mo);
658 SANITIZER_INTERFACE_ATTRIBUTE
659 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
660 SCOPED_ATOMIC(FetchAnd, a, v, mo);
663 SANITIZER_INTERFACE_ATTRIBUTE
664 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
665 SCOPED_ATOMIC(FetchAnd, a, v, mo);
668 SANITIZER_INTERFACE_ATTRIBUTE
669 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
670 SCOPED_ATOMIC(FetchAnd, a, v, mo);
673 SANITIZER_INTERFACE_ATTRIBUTE
674 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
675 SCOPED_ATOMIC(FetchAnd, a, v, mo);
678 #if __TSAN_HAS_INT128
679 SANITIZER_INTERFACE_ATTRIBUTE
680 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
681 SCOPED_ATOMIC(FetchAnd, a, v, mo);
685 SANITIZER_INTERFACE_ATTRIBUTE
686 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
687 SCOPED_ATOMIC(FetchOr, a, v, mo);
690 SANITIZER_INTERFACE_ATTRIBUTE
691 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
692 SCOPED_ATOMIC(FetchOr, a, v, mo);
695 SANITIZER_INTERFACE_ATTRIBUTE
696 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
697 SCOPED_ATOMIC(FetchOr, a, v, mo);
700 SANITIZER_INTERFACE_ATTRIBUTE
701 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
702 SCOPED_ATOMIC(FetchOr, a, v, mo);
705 #if __TSAN_HAS_INT128
706 SANITIZER_INTERFACE_ATTRIBUTE
707 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
708 SCOPED_ATOMIC(FetchOr, a, v, mo);
712 SANITIZER_INTERFACE_ATTRIBUTE
713 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
714 SCOPED_ATOMIC(FetchXor, a, v, mo);
717 SANITIZER_INTERFACE_ATTRIBUTE
718 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
719 SCOPED_ATOMIC(FetchXor, a, v, mo);
722 SANITIZER_INTERFACE_ATTRIBUTE
723 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
724 SCOPED_ATOMIC(FetchXor, a, v, mo);
727 SANITIZER_INTERFACE_ATTRIBUTE
728 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
729 SCOPED_ATOMIC(FetchXor, a, v, mo);
732 #if __TSAN_HAS_INT128
733 SANITIZER_INTERFACE_ATTRIBUTE
734 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
735 SCOPED_ATOMIC(FetchXor, a, v, mo);
739 SANITIZER_INTERFACE_ATTRIBUTE
740 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
741 SCOPED_ATOMIC(FetchNand, a, v, mo);
744 SANITIZER_INTERFACE_ATTRIBUTE
745 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
746 SCOPED_ATOMIC(FetchNand, a, v, mo);
749 SANITIZER_INTERFACE_ATTRIBUTE
750 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
751 SCOPED_ATOMIC(FetchNand, a, v, mo);
754 SANITIZER_INTERFACE_ATTRIBUTE
755 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
756 SCOPED_ATOMIC(FetchNand, a, v, mo);
759 #if __TSAN_HAS_INT128
760 SANITIZER_INTERFACE_ATTRIBUTE
761 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
762 SCOPED_ATOMIC(FetchNand, a, v, mo);
766 SANITIZER_INTERFACE_ATTRIBUTE
767 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
768 morder mo, morder fmo) {
769 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
772 SANITIZER_INTERFACE_ATTRIBUTE
773 int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
774 morder mo, morder fmo) {
775 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
778 SANITIZER_INTERFACE_ATTRIBUTE
779 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
780 morder mo, morder fmo) {
781 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
784 SANITIZER_INTERFACE_ATTRIBUTE
785 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
786 morder mo, morder fmo) {
787 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
790 #if __TSAN_HAS_INT128
791 SANITIZER_INTERFACE_ATTRIBUTE
792 int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
793 morder mo, morder fmo) {
794 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
798 SANITIZER_INTERFACE_ATTRIBUTE
799 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
800 morder mo, morder fmo) {
801 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
804 SANITIZER_INTERFACE_ATTRIBUTE
805 int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
806 morder mo, morder fmo) {
807 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
810 SANITIZER_INTERFACE_ATTRIBUTE
811 int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
812 morder mo, morder fmo) {
813 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
816 SANITIZER_INTERFACE_ATTRIBUTE
817 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
818 morder mo, morder fmo) {
819 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
822 #if __TSAN_HAS_INT128
823 SANITIZER_INTERFACE_ATTRIBUTE
824 int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
825 morder mo, morder fmo) {
826 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
830 SANITIZER_INTERFACE_ATTRIBUTE
831 a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
832 morder mo, morder fmo) {
833 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
836 SANITIZER_INTERFACE_ATTRIBUTE
837 a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
838 morder mo, morder fmo) {
839 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
842 SANITIZER_INTERFACE_ATTRIBUTE
843 a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
844 morder mo, morder fmo) {
845 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
848 SANITIZER_INTERFACE_ATTRIBUTE
849 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
850 morder mo, morder fmo) {
851 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
854 #if __TSAN_HAS_INT128
855 SANITIZER_INTERFACE_ATTRIBUTE
856 a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
857 morder mo, morder fmo) {
858 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
862 SANITIZER_INTERFACE_ATTRIBUTE
863 void __tsan_atomic_thread_fence(morder mo) {
865 SCOPED_ATOMIC(Fence, mo);
868 SANITIZER_INTERFACE_ATTRIBUTE
869 void __tsan_atomic_signal_fence(morder mo) {
873 #else // #if !SANITIZER_GO
877 #define ATOMIC(func, ...) \
878 if (thr->ignore_sync) { \
879 NoTsanAtomic##func(__VA_ARGS__); \
881 FuncEntry(thr, cpc); \
882 Atomic##func(thr, pc, __VA_ARGS__); \
887 #define ATOMIC_RET(func, ret, ...) \
888 if (thr->ignore_sync) { \
889 (ret) = NoTsanAtomic##func(__VA_ARGS__); \
891 FuncEntry(thr, cpc); \
892 (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
898 SANITIZER_INTERFACE_ATTRIBUTE
899 void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
900 ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
903 SANITIZER_INTERFACE_ATTRIBUTE
904 void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
905 ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
908 SANITIZER_INTERFACE_ATTRIBUTE
909 void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
910 ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
913 SANITIZER_INTERFACE_ATTRIBUTE
914 void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
915 ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
918 SANITIZER_INTERFACE_ATTRIBUTE
919 void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
920 ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
923 SANITIZER_INTERFACE_ATTRIBUTE
924 void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
925 ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
928 SANITIZER_INTERFACE_ATTRIBUTE
929 void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
930 ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
933 SANITIZER_INTERFACE_ATTRIBUTE
934 void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
935 ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
938 SANITIZER_INTERFACE_ATTRIBUTE
939 void __tsan_go_atomic32_compare_exchange(
940 ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
942 a32 cmp = *(a32*)(a+8);
943 ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
944 *(bool*)(a+16) = (cur == cmp);
947 SANITIZER_INTERFACE_ATTRIBUTE
948 void __tsan_go_atomic64_compare_exchange(
949 ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
951 a64 cmp = *(a64*)(a+8);
952 ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
953 *(bool*)(a+24) = (cur == cmp);
956 #endif // #if !SANITIZER_GO