1 //===------------------------ memory.cpp ----------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 #ifndef _LIBCPP_HAS_NO_THREADS
13 #if defined(__unix__) && !defined(__ANDROID__) && defined(__ELF__) && defined(_LIBCPP_HAS_COMMENT_LIB_PRAGMA)
14 #pragma comment(lib, "pthread")
17 #include "include/atomic_support.h"
19 _LIBCPP_BEGIN_NAMESPACE_STD
21 const allocator_arg_t allocator_arg = allocator_arg_t();
23 bad_weak_ptr::~bad_weak_ptr() _NOEXCEPT {}
26 bad_weak_ptr::what() const _NOEXCEPT
28 return "bad_weak_ptr";
31 __shared_count::~__shared_count()
35 __shared_weak_count::~__shared_weak_count()
39 #if defined(_LIBCPP_DEPRECATED_ABI_LEGACY_LIBRARY_DEFINITIONS_FOR_INLINE_FUNCTIONS)
41 __shared_count::__add_shared() _NOEXCEPT
43 __libcpp_atomic_refcount_increment(__shared_owners_);
47 __shared_count::__release_shared() _NOEXCEPT
49 if (__libcpp_atomic_refcount_decrement(__shared_owners_) == -1)
58 __shared_weak_count::__add_shared() _NOEXCEPT
60 __shared_count::__add_shared();
64 __shared_weak_count::__add_weak() _NOEXCEPT
66 __libcpp_atomic_refcount_increment(__shared_weak_owners_);
70 __shared_weak_count::__release_shared() _NOEXCEPT
72 if (__shared_count::__release_shared())
76 #endif // _LIBCPP_DEPRECATED_ABI_LEGACY_LIBRARY_DEFINITIONS_FOR_INLINE_FUNCTIONS
79 __shared_weak_count::__release_weak() _NOEXCEPT
81 // NOTE: The acquire load here is an optimization of the very
82 // common case where a shared pointer is being destructed while
83 // having no other contended references.
85 // BENEFIT: We avoid expensive atomic stores like XADD and STREX
86 // in a common case. Those instructions are slow and do nasty
89 // IS THIS SAFE? Yes. During weak destruction, if we see that we
90 // are the last reference, we know that no-one else is accessing
91 // us. If someone were accessing us, then they would be doing so
92 // while the last shared / weak_ptr was being destructed, and
93 // that's undefined anyway.
95 // If we see anything other than a 0, then we have possible
96 // contention, and need to use an atomicrmw primitive.
97 // The same arguments don't apply for increment, where it is legal
98 // (though inadvisable) to share shared_ptr references between
99 // threads, and have them all get copied at once. The argument
100 // also doesn't apply for __release_shared, because an outstanding
101 // weak_ptr::lock() could read / modify the shared count.
102 if (__libcpp_atomic_load(&__shared_weak_owners_, _AO_Acquire) == 0)
104 // no need to do this store, because we are about
105 // to destroy everything.
106 //__libcpp_atomic_store(&__shared_weak_owners_, -1, _AO_Release);
107 __on_zero_shared_weak();
109 else if (__libcpp_atomic_refcount_decrement(__shared_weak_owners_) == -1)
110 __on_zero_shared_weak();
114 __shared_weak_count::lock() _NOEXCEPT
116 long object_owners = __libcpp_atomic_load(&__shared_owners_);
117 while (object_owners != -1)
119 if (__libcpp_atomic_compare_exchange(&__shared_owners_,
127 #if !defined(_LIBCPP_NO_RTTI) || !defined(_LIBCPP_BUILD_STATIC)
130 __shared_weak_count::__get_deleter(const type_info&) const _NOEXCEPT
135 #endif // _LIBCPP_NO_RTTI
137 #if !defined(_LIBCPP_HAS_NO_ATOMIC_HEADER)
139 _LIBCPP_SAFE_STATIC static const std::size_t __sp_mut_count = 16;
140 _LIBCPP_SAFE_STATIC static __libcpp_mutex_t mut_back[__sp_mut_count] =
142 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER,
143 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER,
144 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER,
145 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER
148 _LIBCPP_CONSTEXPR __sp_mut::__sp_mut(void* p) _NOEXCEPT
154 __sp_mut::lock() _NOEXCEPT
156 auto m = static_cast<__libcpp_mutex_t*>(__lx);
158 while (!__libcpp_mutex_trylock(m))
162 __libcpp_mutex_lock(m);
165 this_thread::yield();
170 __sp_mut::unlock() _NOEXCEPT
172 __libcpp_mutex_unlock(static_cast<__libcpp_mutex_t*>(__lx));
176 __get_sp_mut(const void* p)
178 static __sp_mut muts[__sp_mut_count]
180 &mut_back[ 0], &mut_back[ 1], &mut_back[ 2], &mut_back[ 3],
181 &mut_back[ 4], &mut_back[ 5], &mut_back[ 6], &mut_back[ 7],
182 &mut_back[ 8], &mut_back[ 9], &mut_back[10], &mut_back[11],
183 &mut_back[12], &mut_back[13], &mut_back[14], &mut_back[15]
185 return muts[hash<const void*>()(p) & (__sp_mut_count-1)];
188 #endif // !defined(_LIBCPP_HAS_NO_ATOMIC_HEADER)
191 declare_reachable(void*)
196 declare_no_pointers(char*, size_t)
201 undeclare_no_pointers(char*, size_t)
205 #if !defined(_LIBCPP_ABI_POINTER_SAFETY_ENUM_TYPE)
206 pointer_safety get_pointer_safety() _NOEXCEPT
208 return pointer_safety::relaxed;
213 __undeclare_reachable(void* p)
219 align(size_t alignment, size_t size, void*& ptr, size_t& space)
224 char* p1 = static_cast<char*>(ptr);
225 char* p2 = reinterpret_cast<char*>(reinterpret_cast<size_t>(p1 + (alignment - 1)) & -alignment);
226 size_t d = static_cast<size_t>(p2 - p1);
227 if (d <= space - size)
237 _LIBCPP_END_NAMESPACE_STD