1 //===-- sanitizer_atomic_clang_x86.h ----------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11 // Not intended for direct inclusion. Include sanitizer_atomic.h.
13 //===----------------------------------------------------------------------===//
15 #ifndef SANITIZER_ATOMIC_CLANG_X86_H
16 #define SANITIZER_ATOMIC_CLANG_X86_H
18 namespace __sanitizer {
20 INLINE void proc_yield(int cnt) {
21 __asm__ __volatile__("" ::: "memory");
22 for (int i = 0; i < cnt; i++)
23 __asm__ __volatile__("pause");
24 __asm__ __volatile__("" ::: "memory");
28 INLINE typename T::Type atomic_load(
29 const volatile T *a, memory_order mo) {
30 DCHECK(mo & (memory_order_relaxed | memory_order_consume
31 | memory_order_acquire | memory_order_seq_cst));
32 DCHECK(!((uptr)a % sizeof(*a)));
35 if (sizeof(*a) < 8 || sizeof(void*) == 8) {
36 // Assume that aligned loads are atomic.
37 if (mo == memory_order_relaxed) {
39 } else if (mo == memory_order_consume) {
40 // Assume that processor respects data dependencies
41 // (and that compiler won't break them).
42 __asm__ __volatile__("" ::: "memory");
44 __asm__ __volatile__("" ::: "memory");
45 } else if (mo == memory_order_acquire) {
46 __asm__ __volatile__("" ::: "memory");
48 // On x86 loads are implicitly acquire.
49 __asm__ __volatile__("" ::: "memory");
51 // On x86 plain MOV is enough for seq_cst store.
52 __asm__ __volatile__("" ::: "memory");
54 __asm__ __volatile__("" ::: "memory");
57 // 64-bit load on 32-bit platform.
59 "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
60 "movq %%mm0, %0;" // (ptr could be read-only)
61 "emms;" // Empty mmx state/Reset FP regs
63 : "m" (a->val_dont_use)
64 : // mark the FP stack and mmx registers as clobbered
65 "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
67 "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
68 #endif // #ifdef __MMX__
75 INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
76 DCHECK(mo & (memory_order_relaxed | memory_order_release
77 | memory_order_seq_cst));
78 DCHECK(!((uptr)a % sizeof(*a)));
80 if (sizeof(*a) < 8 || sizeof(void*) == 8) {
81 // Assume that aligned loads are atomic.
82 if (mo == memory_order_relaxed) {
84 } else if (mo == memory_order_release) {
85 // On x86 stores are implicitly release.
86 __asm__ __volatile__("" ::: "memory");
88 __asm__ __volatile__("" ::: "memory");
90 // On x86 stores are implicitly release.
91 __asm__ __volatile__("" ::: "memory");
96 // 64-bit store on 32-bit platform.
98 "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
100 "emms;" // Empty mmx state/Reset FP regs
101 : "=m" (a->val_dont_use)
103 : // mark the FP stack and mmx registers as clobbered
104 "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
106 "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
107 #endif // #ifdef __MMX__
109 if (mo == memory_order_seq_cst)
110 __sync_synchronize();
114 } // namespace __sanitizer
116 #endif // #ifndef SANITIZER_ATOMIC_CLANG_X86_H