1 //===-- tsd_exclusive.h -----------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #ifndef SCUDO_TSD_EXCLUSIVE_H_
10 #define SCUDO_TSD_EXCLUSIVE_H_
16 enum class ThreadState : u8 {
22 template <class Allocator> void teardownThread(void *Ptr);
24 template <class Allocator> struct TSDRegistryExT {
25 void initLinkerInitialized(Allocator *Instance) {
26 Instance->initLinkerInitialized();
27 CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
28 FallbackTSD = reinterpret_cast<TSD<Allocator> *>(
29 map(nullptr, sizeof(TSD<Allocator>), "scudo:tsd"));
30 FallbackTSD->initLinkerInitialized(Instance);
33 void init(Allocator *Instance) {
34 memset(this, 0, sizeof(*this));
35 initLinkerInitialized(Instance);
38 void unmapTestOnly() {
39 unmap(reinterpret_cast<void *>(FallbackTSD), sizeof(TSD<Allocator>));
42 ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
43 if (LIKELY(State != ThreadState::NotInitialized))
45 initThread(Instance, MinimalInit);
48 ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
49 if (LIKELY(State == ThreadState::Initialized &&
50 !atomic_load(&Disabled, memory_order_acquire))) {
51 *UnlockRequired = false;
56 *UnlockRequired = true;
60 // To disable the exclusive TSD registry, we effectively lock the fallback TSD
61 // and force all threads to attempt to use it instead of their local one.
65 atomic_store(&Disabled, 1U, memory_order_release);
69 atomic_store(&Disabled, 0U, memory_order_release);
70 FallbackTSD->unlock();
75 void initOnceMaybe(Allocator *Instance) {
77 if (LIKELY(Initialized))
79 initLinkerInitialized(Instance); // Sets Initialized.
82 // Using minimal initialization allows for global initialization while keeping
83 // the thread specific structure untouched. The fallback structure will be
85 NOINLINE void initThread(Allocator *Instance, bool MinimalInit) {
86 initOnceMaybe(Instance);
87 if (UNLIKELY(MinimalInit))
90 pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
91 ThreadTSD.initLinkerInitialized(Instance);
92 State = ThreadState::Initialized;
93 Instance->callPostInitCallback();
96 pthread_key_t PThreadKey;
99 TSD<Allocator> *FallbackTSD;
101 static THREADLOCAL ThreadState State;
102 static THREADLOCAL TSD<Allocator> ThreadTSD;
104 friend void teardownThread<Allocator>(void *Ptr);
107 template <class Allocator>
108 THREADLOCAL TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
109 template <class Allocator>
110 THREADLOCAL ThreadState TSDRegistryExT<Allocator>::State;
112 template <class Allocator> void teardownThread(void *Ptr) {
113 typedef TSDRegistryExT<Allocator> TSDRegistryT;
114 Allocator *Instance = reinterpret_cast<Allocator *>(Ptr);
115 // The glibc POSIX thread-local-storage deallocation routine calls user
116 // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
117 // We want to be called last since other destructors might call free and the
118 // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
119 // quarantine and swallowing the cache.
120 if (TSDRegistryT::ThreadTSD.DestructorIterations > 1) {
121 TSDRegistryT::ThreadTSD.DestructorIterations--;
122 // If pthread_setspecific fails, we will go ahead with the teardown.
123 if (LIKELY(pthread_setspecific(Instance->getTSDRegistry()->PThreadKey,
127 TSDRegistryT::ThreadTSD.commitBack(Instance);
128 TSDRegistryT::State = ThreadState::TornDown;
133 #endif // SCUDO_TSD_EXCLUSIVE_H_