1 //===-- sanitizer_atomic_clang_other.h --------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11 // Not intended for direct inclusion. Include sanitizer_atomic.h.
13 //===----------------------------------------------------------------------===//
15 #ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
16 #define SANITIZER_ATOMIC_CLANG_OTHER_H
18 namespace __sanitizer {
20 // MIPS32 does not support atomic > 4 bytes. To address this lack of
21 // functionality, the sanitizer library provides helper methods which use an
22 // internal spin lock mechanism to emulate atomic oprations when the size is
24 #if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
25 static void __spin_lock(volatile int *lock) {
26 while (__sync_lock_test_and_set(lock, 1))
31 static void __spin_unlock(volatile int *lock) { __sync_lock_release(lock); }
34 // Make sure the lock is on its own cache line to prevent false sharing.
35 // Put it inside a struct that is aligned and padded to the typical MIPS
36 // cacheline which is 32 bytes.
39 char pad[32 - sizeof(int)];
40 } __attribute__((aligned(32))) lock = {0};
43 T __mips_sync_fetch_and_add(volatile T *ptr, T val) {
46 __spin_lock(&lock.lock);
51 __spin_unlock(&lock.lock);
57 T __mips_sync_val_compare_and_swap(volatile T *ptr, T oldval, T newval) {
59 __spin_lock(&lock.lock);
62 if (ret == oldval) *ptr = newval;
64 __spin_unlock(&lock.lock);
70 INLINE void proc_yield(int cnt) {
71 __asm__ __volatile__("" ::: "memory");
75 INLINE typename T::Type atomic_load(
76 const volatile T *a, memory_order mo) {
77 DCHECK(mo & (memory_order_relaxed | memory_order_consume
78 | memory_order_acquire | memory_order_seq_cst));
79 DCHECK(!((uptr)a % sizeof(*a)));
82 if (sizeof(*a) < 8 || sizeof(void*) == 8) {
83 // Assume that aligned loads are atomic.
84 if (mo == memory_order_relaxed) {
86 } else if (mo == memory_order_consume) {
87 // Assume that processor respects data dependencies
88 // (and that compiler won't break them).
89 __asm__ __volatile__("" ::: "memory");
91 __asm__ __volatile__("" ::: "memory");
92 } else if (mo == memory_order_acquire) {
93 __asm__ __volatile__("" ::: "memory");
97 // E.g. on POWER we need a hw fence even before the store.
100 __sync_synchronize();
103 // 64-bit load on 32-bit platform.
104 // Gross, but simple and reliable.
105 // Assume that it is not in read-only memory.
106 #if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
107 typename T::Type volatile *val_ptr =
108 const_cast<typename T::Type volatile *>(&a->val_dont_use);
109 v = __mips_sync_fetch_and_add<u64>(
110 reinterpret_cast<u64 volatile *>(val_ptr), 0);
112 v = __sync_fetch_and_add(
113 const_cast<typename T::Type volatile *>(&a->val_dont_use), 0);
120 INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
121 DCHECK(mo & (memory_order_relaxed | memory_order_release
122 | memory_order_seq_cst));
123 DCHECK(!((uptr)a % sizeof(*a)));
125 if (sizeof(*a) < 8 || sizeof(void*) == 8) {
126 // Assume that aligned loads are atomic.
127 if (mo == memory_order_relaxed) {
129 } else if (mo == memory_order_release) {
130 __sync_synchronize();
132 __asm__ __volatile__("" ::: "memory");
134 __sync_synchronize();
136 __sync_synchronize();
139 // 64-bit store on 32-bit platform.
140 // Gross, but simple and reliable.
141 typename T::Type cmp = a->val_dont_use;
142 typename T::Type cur;
144 #if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
145 typename T::Type volatile *val_ptr =
146 const_cast<typename T::Type volatile *>(&a->val_dont_use);
147 cur = __mips_sync_val_compare_and_swap<u64>(
148 reinterpret_cast<u64 volatile *>(val_ptr), (u64)cmp, (u64)v);
150 cur = __sync_val_compare_and_swap(&a->val_dont_use, cmp, v);
159 } // namespace __sanitizer
161 #endif // #ifndef SANITIZER_ATOMIC_CLANG_OTHER_H