1 //===-- tsan_interface_atomic.cc ------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //===----------------------------------------------------------------------===//
13 // ThreadSanitizer atomic operations are based on C++11/C1x standards.
14 // For background see C++11 standard. A slightly older, publicly
15 // available draft of the standard (not entirely up-to-date, but close enough
16 // for casual browsing) is available here:
17 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
18 // The following page contains more background information:
19 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
21 #include "sanitizer_common/sanitizer_placement_new.h"
22 #include "sanitizer_common/sanitizer_stacktrace.h"
23 #include "sanitizer_common/sanitizer_mutex.h"
24 #include "tsan_flags.h"
25 #include "tsan_interface.h"
28 using namespace __tsan; // NOLINT
30 #if !SANITIZER_GO && __TSAN_HAS_INT128
31 // Protects emulation of 128-bit atomic operations.
32 static StaticSpinMutex mutex128;
35 static bool IsLoadOrder(morder mo) {
36 return mo == mo_relaxed || mo == mo_consume
37 || mo == mo_acquire || mo == mo_seq_cst;
40 static bool IsStoreOrder(morder mo) {
41 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
44 static bool IsReleaseOrder(morder mo) {
45 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
48 static bool IsAcquireOrder(morder mo) {
49 return mo == mo_consume || mo == mo_acquire
50 || mo == mo_acq_rel || mo == mo_seq_cst;
53 static bool IsAcqRelOrder(morder mo) {
54 return mo == mo_acq_rel || mo == mo_seq_cst;
57 template<typename T> T func_xchg(volatile T *v, T op) {
58 T res = __sync_lock_test_and_set(v, op);
59 // __sync_lock_test_and_set does not contain full barrier.
64 template<typename T> T func_add(volatile T *v, T op) {
65 return __sync_fetch_and_add(v, op);
68 template<typename T> T func_sub(volatile T *v, T op) {
69 return __sync_fetch_and_sub(v, op);
72 template<typename T> T func_and(volatile T *v, T op) {
73 return __sync_fetch_and_and(v, op);
76 template<typename T> T func_or(volatile T *v, T op) {
77 return __sync_fetch_and_or(v, op);
80 template<typename T> T func_xor(volatile T *v, T op) {
81 return __sync_fetch_and_xor(v, op);
84 template<typename T> T func_nand(volatile T *v, T op) {
85 // clang does not support __sync_fetch_and_nand.
89 T cur = __sync_val_compare_and_swap(v, cmp, newv);
96 template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
97 return __sync_val_compare_and_swap(v, cmp, xch);
100 // clang does not support 128-bit atomic ops.
101 // Atomic ops are executed under tsan internal mutex,
102 // here we assume that the atomic variables are not accessed
103 // from non-instrumented code.
104 #if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \
106 a128 func_xchg(volatile a128 *v, a128 op) {
107 SpinMutexLock lock(&mutex128);
113 a128 func_add(volatile a128 *v, a128 op) {
114 SpinMutexLock lock(&mutex128);
120 a128 func_sub(volatile a128 *v, a128 op) {
121 SpinMutexLock lock(&mutex128);
127 a128 func_and(volatile a128 *v, a128 op) {
128 SpinMutexLock lock(&mutex128);
134 a128 func_or(volatile a128 *v, a128 op) {
135 SpinMutexLock lock(&mutex128);
141 a128 func_xor(volatile a128 *v, a128 op) {
142 SpinMutexLock lock(&mutex128);
148 a128 func_nand(volatile a128 *v, a128 op) {
149 SpinMutexLock lock(&mutex128);
155 a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
156 SpinMutexLock lock(&mutex128);
165 static int SizeLog() {
168 else if (sizeof(T) <= 2)
170 else if (sizeof(T) <= 4)
174 // For 16-byte atomics we also use 8-byte memory access,
175 // this leads to false negatives only in very obscure cases.
179 static atomic_uint8_t *to_atomic(const volatile a8 *a) {
180 return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
183 static atomic_uint16_t *to_atomic(const volatile a16 *a) {
184 return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a));
188 static atomic_uint32_t *to_atomic(const volatile a32 *a) {
189 return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a));
192 static atomic_uint64_t *to_atomic(const volatile a64 *a) {
193 return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));
196 static memory_order to_mo(morder mo) {
198 case mo_relaxed: return memory_order_relaxed;
199 case mo_consume: return memory_order_consume;
200 case mo_acquire: return memory_order_acquire;
201 case mo_release: return memory_order_release;
202 case mo_acq_rel: return memory_order_acq_rel;
203 case mo_seq_cst: return memory_order_seq_cst;
206 return memory_order_seq_cst;
210 static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
211 return atomic_load(to_atomic(a), to_mo(mo));
214 #if __TSAN_HAS_INT128 && !SANITIZER_GO
215 static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
216 SpinMutexLock lock(&mutex128);
222 static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
223 CHECK(IsLoadOrder(mo));
224 // This fast-path is critical for performance.
225 // Assume the access is atomic.
226 if (!IsAcquireOrder(mo)) {
227 MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
228 return NoTsanAtomicLoad(a, mo);
230 // Don't create sync object if it does not exist yet. For example, an atomic
231 // pointer is initialized to nullptr and then periodically acquire-loaded.
232 T v = NoTsanAtomicLoad(a, mo);
233 SyncVar *s = ctx->metamap.GetIfExistsAndLock((uptr)a, false);
235 AcquireImpl(thr, pc, &s->clock);
236 // Re-read under sync mutex because we need a consistent snapshot
237 // of the value and the clock we acquire.
238 v = NoTsanAtomicLoad(a, mo);
241 MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
246 static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
247 atomic_store(to_atomic(a), v, to_mo(mo));
250 #if __TSAN_HAS_INT128 && !SANITIZER_GO
251 static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
252 SpinMutexLock lock(&mutex128);
258 static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
260 CHECK(IsStoreOrder(mo));
261 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
262 // This fast-path is critical for performance.
263 // Assume the access is atomic.
264 // Strictly saying even relaxed store cuts off release sequence,
265 // so must reset the clock.
266 if (!IsReleaseOrder(mo)) {
267 NoTsanAtomicStore(a, v, mo);
270 __sync_synchronize();
271 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
272 thr->fast_state.IncrementEpoch();
273 // Can't increment epoch w/o writing to the trace as well.
274 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
275 ReleaseStoreImpl(thr, pc, &s->clock);
276 NoTsanAtomicStore(a, v, mo);
280 template<typename T, T (*F)(volatile T *v, T op)>
281 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
282 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
284 if (mo != mo_relaxed) {
285 s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
286 thr->fast_state.IncrementEpoch();
287 // Can't increment epoch w/o writing to the trace as well.
288 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
289 if (IsAcqRelOrder(mo))
290 AcquireReleaseImpl(thr, pc, &s->clock);
291 else if (IsReleaseOrder(mo))
292 ReleaseImpl(thr, pc, &s->clock);
293 else if (IsAcquireOrder(mo))
294 AcquireImpl(thr, pc, &s->clock);
303 static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
304 return func_xchg(a, v);
308 static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
309 return func_add(a, v);
313 static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
314 return func_sub(a, v);
318 static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
319 return func_and(a, v);
323 static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
324 return func_or(a, v);
328 static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
329 return func_xor(a, v);
333 static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
334 return func_nand(a, v);
338 static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
340 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
344 static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
346 return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
350 static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
352 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
356 static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
358 return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
362 static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
364 return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
368 static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
370 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
374 static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
376 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
380 static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
381 return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
384 #if __TSAN_HAS_INT128
385 static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
386 morder mo, morder fmo) {
388 a128 cur = func_cas(a, old, v);
397 static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
398 NoTsanAtomicCAS(a, &c, v, mo, fmo);
403 static bool AtomicCAS(ThreadState *thr, uptr pc,
404 volatile T *a, T *c, T v, morder mo, morder fmo) {
405 (void)fmo; // Unused because llvm does not pass it yet.
406 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
408 bool write_lock = mo != mo_acquire && mo != mo_consume;
409 if (mo != mo_relaxed) {
410 s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
411 thr->fast_state.IncrementEpoch();
412 // Can't increment epoch w/o writing to the trace as well.
413 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
414 if (IsAcqRelOrder(mo))
415 AcquireReleaseImpl(thr, pc, &s->clock);
416 else if (IsReleaseOrder(mo))
417 ReleaseImpl(thr, pc, &s->clock);
418 else if (IsAcquireOrder(mo))
419 AcquireImpl(thr, pc, &s->clock);
422 T pr = func_cas(a, cc, v);
436 static T AtomicCAS(ThreadState *thr, uptr pc,
437 volatile T *a, T c, T v, morder mo, morder fmo) {
438 AtomicCAS(thr, pc, a, &c, v, mo, fmo);
443 static void NoTsanAtomicFence(morder mo) {
444 __sync_synchronize();
447 static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
448 // FIXME(dvyukov): not implemented.
449 __sync_synchronize();
453 // Interface functions follow.
458 static morder convert_morder(morder mo) {
459 if (flags()->force_seq_cst_atomics)
460 return (morder)mo_seq_cst;
462 // Filter out additional memory order flags:
463 // MEMMODEL_SYNC = 1 << 15
464 // __ATOMIC_HLE_ACQUIRE = 1 << 16
465 // __ATOMIC_HLE_RELEASE = 1 << 17
467 // HLE is an optimization, and we pretend that elision always fails.
468 // MEMMODEL_SYNC is used when lowering __sync_ atomics,
469 // since we use __sync_ atomics for actual atomic operations,
470 // we can safely ignore it as well. It also subtly affects semantics,
471 // but we don't model the difference.
472 return (morder)(mo & 0x7fff);
475 #define SCOPED_ATOMIC(func, ...) \
476 ThreadState *const thr = cur_thread(); \
477 if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors)) { \
478 ProcessPendingSignals(thr); \
479 return NoTsanAtomic##func(__VA_ARGS__); \
481 const uptr callpc = (uptr)__builtin_return_address(0); \
482 uptr pc = StackTrace::GetCurrentPc(); \
483 mo = convert_morder(mo); \
484 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
485 ScopedAtomic sa(thr, callpc, a, mo, __func__); \
486 return Atomic##func(thr, pc, __VA_ARGS__); \
491 ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
492 morder mo, const char *func)
495 DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
498 ProcessPendingSignals(thr_);
505 static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
506 StatInc(thr, StatAtomic);
508 StatInc(thr, size == 1 ? StatAtomic1
509 : size == 2 ? StatAtomic2
510 : size == 4 ? StatAtomic4
511 : size == 8 ? StatAtomic8
513 StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
514 : mo == mo_consume ? StatAtomicConsume
515 : mo == mo_acquire ? StatAtomicAcquire
516 : mo == mo_release ? StatAtomicRelease
517 : mo == mo_acq_rel ? StatAtomicAcq_Rel
518 : StatAtomicSeq_Cst);
522 SANITIZER_INTERFACE_ATTRIBUTE
523 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
524 SCOPED_ATOMIC(Load, a, mo);
527 SANITIZER_INTERFACE_ATTRIBUTE
528 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
529 SCOPED_ATOMIC(Load, a, mo);
532 SANITIZER_INTERFACE_ATTRIBUTE
533 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
534 SCOPED_ATOMIC(Load, a, mo);
537 SANITIZER_INTERFACE_ATTRIBUTE
538 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
539 SCOPED_ATOMIC(Load, a, mo);
542 #if __TSAN_HAS_INT128
543 SANITIZER_INTERFACE_ATTRIBUTE
544 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
545 SCOPED_ATOMIC(Load, a, mo);
549 SANITIZER_INTERFACE_ATTRIBUTE
550 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
551 SCOPED_ATOMIC(Store, a, v, mo);
554 SANITIZER_INTERFACE_ATTRIBUTE
555 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
556 SCOPED_ATOMIC(Store, a, v, mo);
559 SANITIZER_INTERFACE_ATTRIBUTE
560 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
561 SCOPED_ATOMIC(Store, a, v, mo);
564 SANITIZER_INTERFACE_ATTRIBUTE
565 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
566 SCOPED_ATOMIC(Store, a, v, mo);
569 #if __TSAN_HAS_INT128
570 SANITIZER_INTERFACE_ATTRIBUTE
571 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
572 SCOPED_ATOMIC(Store, a, v, mo);
576 SANITIZER_INTERFACE_ATTRIBUTE
577 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
578 SCOPED_ATOMIC(Exchange, a, v, mo);
581 SANITIZER_INTERFACE_ATTRIBUTE
582 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
583 SCOPED_ATOMIC(Exchange, a, v, mo);
586 SANITIZER_INTERFACE_ATTRIBUTE
587 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
588 SCOPED_ATOMIC(Exchange, a, v, mo);
591 SANITIZER_INTERFACE_ATTRIBUTE
592 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
593 SCOPED_ATOMIC(Exchange, a, v, mo);
596 #if __TSAN_HAS_INT128
597 SANITIZER_INTERFACE_ATTRIBUTE
598 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
599 SCOPED_ATOMIC(Exchange, a, v, mo);
603 SANITIZER_INTERFACE_ATTRIBUTE
604 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
605 SCOPED_ATOMIC(FetchAdd, a, v, mo);
608 SANITIZER_INTERFACE_ATTRIBUTE
609 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
610 SCOPED_ATOMIC(FetchAdd, a, v, mo);
613 SANITIZER_INTERFACE_ATTRIBUTE
614 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
615 SCOPED_ATOMIC(FetchAdd, a, v, mo);
618 SANITIZER_INTERFACE_ATTRIBUTE
619 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
620 SCOPED_ATOMIC(FetchAdd, a, v, mo);
623 #if __TSAN_HAS_INT128
624 SANITIZER_INTERFACE_ATTRIBUTE
625 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
626 SCOPED_ATOMIC(FetchAdd, a, v, mo);
630 SANITIZER_INTERFACE_ATTRIBUTE
631 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
632 SCOPED_ATOMIC(FetchSub, a, v, mo);
635 SANITIZER_INTERFACE_ATTRIBUTE
636 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
637 SCOPED_ATOMIC(FetchSub, a, v, mo);
640 SANITIZER_INTERFACE_ATTRIBUTE
641 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
642 SCOPED_ATOMIC(FetchSub, a, v, mo);
645 SANITIZER_INTERFACE_ATTRIBUTE
646 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
647 SCOPED_ATOMIC(FetchSub, a, v, mo);
650 #if __TSAN_HAS_INT128
651 SANITIZER_INTERFACE_ATTRIBUTE
652 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
653 SCOPED_ATOMIC(FetchSub, a, v, mo);
657 SANITIZER_INTERFACE_ATTRIBUTE
658 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
659 SCOPED_ATOMIC(FetchAnd, a, v, mo);
662 SANITIZER_INTERFACE_ATTRIBUTE
663 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
664 SCOPED_ATOMIC(FetchAnd, a, v, mo);
667 SANITIZER_INTERFACE_ATTRIBUTE
668 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
669 SCOPED_ATOMIC(FetchAnd, a, v, mo);
672 SANITIZER_INTERFACE_ATTRIBUTE
673 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
674 SCOPED_ATOMIC(FetchAnd, a, v, mo);
677 #if __TSAN_HAS_INT128
678 SANITIZER_INTERFACE_ATTRIBUTE
679 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
680 SCOPED_ATOMIC(FetchAnd, a, v, mo);
684 SANITIZER_INTERFACE_ATTRIBUTE
685 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
686 SCOPED_ATOMIC(FetchOr, a, v, mo);
689 SANITIZER_INTERFACE_ATTRIBUTE
690 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
691 SCOPED_ATOMIC(FetchOr, a, v, mo);
694 SANITIZER_INTERFACE_ATTRIBUTE
695 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
696 SCOPED_ATOMIC(FetchOr, a, v, mo);
699 SANITIZER_INTERFACE_ATTRIBUTE
700 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
701 SCOPED_ATOMIC(FetchOr, a, v, mo);
704 #if __TSAN_HAS_INT128
705 SANITIZER_INTERFACE_ATTRIBUTE
706 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
707 SCOPED_ATOMIC(FetchOr, a, v, mo);
711 SANITIZER_INTERFACE_ATTRIBUTE
712 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
713 SCOPED_ATOMIC(FetchXor, a, v, mo);
716 SANITIZER_INTERFACE_ATTRIBUTE
717 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
718 SCOPED_ATOMIC(FetchXor, a, v, mo);
721 SANITIZER_INTERFACE_ATTRIBUTE
722 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
723 SCOPED_ATOMIC(FetchXor, a, v, mo);
726 SANITIZER_INTERFACE_ATTRIBUTE
727 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
728 SCOPED_ATOMIC(FetchXor, a, v, mo);
731 #if __TSAN_HAS_INT128
732 SANITIZER_INTERFACE_ATTRIBUTE
733 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
734 SCOPED_ATOMIC(FetchXor, a, v, mo);
738 SANITIZER_INTERFACE_ATTRIBUTE
739 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
740 SCOPED_ATOMIC(FetchNand, a, v, mo);
743 SANITIZER_INTERFACE_ATTRIBUTE
744 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
745 SCOPED_ATOMIC(FetchNand, a, v, mo);
748 SANITIZER_INTERFACE_ATTRIBUTE
749 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
750 SCOPED_ATOMIC(FetchNand, a, v, mo);
753 SANITIZER_INTERFACE_ATTRIBUTE
754 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
755 SCOPED_ATOMIC(FetchNand, a, v, mo);
758 #if __TSAN_HAS_INT128
759 SANITIZER_INTERFACE_ATTRIBUTE
760 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
761 SCOPED_ATOMIC(FetchNand, a, v, mo);
765 SANITIZER_INTERFACE_ATTRIBUTE
766 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
767 morder mo, morder fmo) {
768 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
771 SANITIZER_INTERFACE_ATTRIBUTE
772 int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
773 morder mo, morder fmo) {
774 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
777 SANITIZER_INTERFACE_ATTRIBUTE
778 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
779 morder mo, morder fmo) {
780 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
783 SANITIZER_INTERFACE_ATTRIBUTE
784 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
785 morder mo, morder fmo) {
786 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
789 #if __TSAN_HAS_INT128
790 SANITIZER_INTERFACE_ATTRIBUTE
791 int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
792 morder mo, morder fmo) {
793 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
797 SANITIZER_INTERFACE_ATTRIBUTE
798 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
799 morder mo, morder fmo) {
800 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
803 SANITIZER_INTERFACE_ATTRIBUTE
804 int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
805 morder mo, morder fmo) {
806 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
809 SANITIZER_INTERFACE_ATTRIBUTE
810 int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
811 morder mo, morder fmo) {
812 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
815 SANITIZER_INTERFACE_ATTRIBUTE
816 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
817 morder mo, morder fmo) {
818 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
821 #if __TSAN_HAS_INT128
822 SANITIZER_INTERFACE_ATTRIBUTE
823 int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
824 morder mo, morder fmo) {
825 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
829 SANITIZER_INTERFACE_ATTRIBUTE
830 a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
831 morder mo, morder fmo) {
832 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
835 SANITIZER_INTERFACE_ATTRIBUTE
836 a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
837 morder mo, morder fmo) {
838 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
841 SANITIZER_INTERFACE_ATTRIBUTE
842 a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
843 morder mo, morder fmo) {
844 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
847 SANITIZER_INTERFACE_ATTRIBUTE
848 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
849 morder mo, morder fmo) {
850 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
853 #if __TSAN_HAS_INT128
854 SANITIZER_INTERFACE_ATTRIBUTE
855 a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
856 morder mo, morder fmo) {
857 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
861 SANITIZER_INTERFACE_ATTRIBUTE
862 void __tsan_atomic_thread_fence(morder mo) {
864 SCOPED_ATOMIC(Fence, mo);
867 SANITIZER_INTERFACE_ATTRIBUTE
868 void __tsan_atomic_signal_fence(morder mo) {
872 #else // #if !SANITIZER_GO
876 #define ATOMIC(func, ...) \
877 if (thr->ignore_sync) { \
878 NoTsanAtomic##func(__VA_ARGS__); \
880 FuncEntry(thr, cpc); \
881 Atomic##func(thr, pc, __VA_ARGS__); \
886 #define ATOMIC_RET(func, ret, ...) \
887 if (thr->ignore_sync) { \
888 (ret) = NoTsanAtomic##func(__VA_ARGS__); \
890 FuncEntry(thr, cpc); \
891 (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
897 SANITIZER_INTERFACE_ATTRIBUTE
898 void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
899 ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
902 SANITIZER_INTERFACE_ATTRIBUTE
903 void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
904 ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
907 SANITIZER_INTERFACE_ATTRIBUTE
908 void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
909 ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
912 SANITIZER_INTERFACE_ATTRIBUTE
913 void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
914 ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
917 SANITIZER_INTERFACE_ATTRIBUTE
918 void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
919 ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
922 SANITIZER_INTERFACE_ATTRIBUTE
923 void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
924 ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
927 SANITIZER_INTERFACE_ATTRIBUTE
928 void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
929 ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
932 SANITIZER_INTERFACE_ATTRIBUTE
933 void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
934 ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
937 SANITIZER_INTERFACE_ATTRIBUTE
938 void __tsan_go_atomic32_compare_exchange(
939 ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
941 a32 cmp = *(a32*)(a+8);
942 ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
943 *(bool*)(a+16) = (cur == cmp);
946 SANITIZER_INTERFACE_ATTRIBUTE
947 void __tsan_go_atomic64_compare_exchange(
948 ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
950 a64 cmp = *(a64*)(a+8);
951 ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
952 *(bool*)(a+24) = (cur == cmp);
955 #endif // #if !SANITIZER_GO