1 //===---------- emutls.c - Implements __emutls_get_address ---------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
16 // There are 4 pthread key cleanup rounds on Bionic. Delay emutls deallocation
17 // to round 2. We need to delay deallocation because:
18 // - Android versions older than M lack __cxa_thread_atexit_impl, so apps
19 // use a pthread key destructor to call C++ destructors.
20 // - Apps might use __thread/thread_local variables in pthread destructors.
21 // We can't wait until the final two rounds, because jemalloc needs two rounds
22 // after the final malloc/free call to free its thread-specific data (see
23 // https://reviews.llvm.org/D46978#1107507).
24 #define EMUTLS_SKIP_DESTRUCTOR_ROUNDS 1
26 #define EMUTLS_SKIP_DESTRUCTOR_ROUNDS 0
29 typedef struct emutls_address_array {
30 uintptr_t skip_destructor_rounds;
31 uintptr_t size; // number of elements in the 'data' array
33 } emutls_address_array;
35 static void emutls_shutdown(emutls_address_array *array);
41 static pthread_mutex_t emutls_mutex = PTHREAD_MUTEX_INITIALIZER;
42 static pthread_key_t emutls_pthread_key;
43 static bool emutls_key_created = false;
45 typedef unsigned int gcc_word __attribute__((mode(word)));
46 typedef unsigned int gcc_pointer __attribute__((mode(pointer)));
48 // Default is not to use posix_memalign, so systems like Android
49 // can use thread local data without heavier POSIX memory allocators.
50 #ifndef EMUTLS_USE_POSIX_MEMALIGN
51 #define EMUTLS_USE_POSIX_MEMALIGN 0
54 static __inline void *emutls_memalign_alloc(size_t align, size_t size) {
56 #if EMUTLS_USE_POSIX_MEMALIGN
57 if (posix_memalign(&base, align, size) != 0)
60 #define EXTRA_ALIGN_PTR_BYTES (align - 1 + sizeof(void *))
62 if ((object = (char *)malloc(EXTRA_ALIGN_PTR_BYTES + size)) == NULL)
64 base = (void *)(((uintptr_t)(object + EXTRA_ALIGN_PTR_BYTES)) &
65 ~(uintptr_t)(align - 1));
67 ((void **)base)[-1] = object;
72 static __inline void emutls_memalign_free(void *base) {
73 #if EMUTLS_USE_POSIX_MEMALIGN
76 // The mallocated address is in ((void**)base)[-1]
77 free(((void **)base)[-1]);
81 static __inline void emutls_setspecific(emutls_address_array *value) {
82 pthread_setspecific(emutls_pthread_key, (void *)value);
85 static __inline emutls_address_array *emutls_getspecific() {
86 return (emutls_address_array *)pthread_getspecific(emutls_pthread_key);
89 static void emutls_key_destructor(void *ptr) {
90 emutls_address_array *array = (emutls_address_array *)ptr;
91 if (array->skip_destructor_rounds > 0) {
92 // emutls is deallocated using a pthread key destructor. These
93 // destructors are called in several rounds to accommodate destructor
94 // functions that (re)initialize key values with pthread_setspecific.
95 // Delay the emutls deallocation to accommodate other end-of-thread
96 // cleanup tasks like calling thread_local destructors (e.g. the
97 // __cxa_thread_atexit fallback in libc++abi).
98 array->skip_destructor_rounds--;
99 emutls_setspecific(array);
101 emutls_shutdown(array);
106 static __inline void emutls_init(void) {
107 if (pthread_key_create(&emutls_pthread_key, emutls_key_destructor) != 0)
109 emutls_key_created = true;
112 static __inline void emutls_init_once(void) {
113 static pthread_once_t once = PTHREAD_ONCE_INIT;
114 pthread_once(&once, emutls_init);
117 static __inline void emutls_lock() { pthread_mutex_lock(&emutls_mutex); }
119 static __inline void emutls_unlock() { pthread_mutex_unlock(&emutls_mutex); }
128 static LPCRITICAL_SECTION emutls_mutex;
129 static DWORD emutls_tls_index = TLS_OUT_OF_INDEXES;
131 typedef uintptr_t gcc_word;
132 typedef void *gcc_pointer;
134 static void win_error(DWORD last_err, const char *hint) {
136 if (FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER |
137 FORMAT_MESSAGE_FROM_SYSTEM |
138 FORMAT_MESSAGE_MAX_WIDTH_MASK,
139 NULL, last_err, 0, (LPSTR)&buffer, 1, NULL)) {
140 fprintf(stderr, "Windows error: %s\n", buffer);
142 fprintf(stderr, "Unkown Windows error: %s\n", hint);
147 static __inline void win_abort(DWORD last_err, const char *hint) {
148 win_error(last_err, hint);
152 static __inline void *emutls_memalign_alloc(size_t align, size_t size) {
153 void *base = _aligned_malloc(size, align);
155 win_abort(GetLastError(), "_aligned_malloc");
159 static __inline void emutls_memalign_free(void *base) { _aligned_free(base); }
161 static void emutls_exit(void) {
163 DeleteCriticalSection(emutls_mutex);
164 _aligned_free(emutls_mutex);
167 if (emutls_tls_index != TLS_OUT_OF_INDEXES) {
168 emutls_shutdown((emutls_address_array *)TlsGetValue(emutls_tls_index));
169 TlsFree(emutls_tls_index);
170 emutls_tls_index = TLS_OUT_OF_INDEXES;
174 #pragma warning(push)
175 #pragma warning(disable : 4100)
176 static BOOL CALLBACK emutls_init(PINIT_ONCE p0, PVOID p1, PVOID *p2) {
178 (LPCRITICAL_SECTION)_aligned_malloc(sizeof(CRITICAL_SECTION), 16);
180 win_error(GetLastError(), "_aligned_malloc");
183 InitializeCriticalSection(emutls_mutex);
185 emutls_tls_index = TlsAlloc();
186 if (emutls_tls_index == TLS_OUT_OF_INDEXES) {
188 win_error(GetLastError(), "TlsAlloc");
191 atexit(&emutls_exit);
195 static __inline void emutls_init_once(void) {
196 static INIT_ONCE once;
197 InitOnceExecuteOnce(&once, emutls_init, NULL, NULL);
200 static __inline void emutls_lock() { EnterCriticalSection(emutls_mutex); }
202 static __inline void emutls_unlock() { LeaveCriticalSection(emutls_mutex); }
204 static __inline void emutls_setspecific(emutls_address_array *value) {
205 if (TlsSetValue(emutls_tls_index, (LPVOID)value) == 0)
206 win_abort(GetLastError(), "TlsSetValue");
209 static __inline emutls_address_array *emutls_getspecific() {
210 LPVOID value = TlsGetValue(emutls_tls_index);
212 const DWORD err = GetLastError();
213 if (err != ERROR_SUCCESS)
214 win_abort(err, "TlsGetValue");
216 return (emutls_address_array *)value;
219 // Provide atomic load/store functions for emutls_get_index if built with MSVC.
220 #if !defined(__ATOMIC_RELEASE)
223 enum { __ATOMIC_ACQUIRE = 2, __ATOMIC_RELEASE = 3 };
225 static __inline uintptr_t __atomic_load_n(void *ptr, unsigned type) {
226 assert(type == __ATOMIC_ACQUIRE);
227 // These return the previous value - but since we do an OR with 0,
228 // it's equivalent to a plain load.
230 return InterlockedOr64(ptr, 0);
232 return InterlockedOr(ptr, 0);
236 static __inline void __atomic_store_n(void *ptr, uintptr_t val, unsigned type) {
237 assert(type == __ATOMIC_RELEASE);
238 InterlockedExchangePointer((void *volatile *)ptr, (void *)val);
241 #endif // __ATOMIC_RELEASE
247 static size_t emutls_num_object = 0; // number of allocated TLS objects
249 // Free the allocated TLS data
250 static void emutls_shutdown(emutls_address_array *array) {
253 for (i = 0; i < array->size; ++i) {
255 emutls_memalign_free(array->data[i]);
260 // For every TLS variable xyz,
261 // there is one __emutls_control variable named __emutls_v.xyz.
262 // If xyz has non-zero initial value, __emutls_v.xyz's "value"
263 // will point to __emutls_t.xyz, which has the initial value.
264 typedef struct __emutls_control {
265 // Must use gcc_word here, instead of size_t, to match GCC. When
266 // gcc_word is larger than size_t, the upper extra bits are all
267 // zeros. We can use variables of size_t to operate on size and
269 gcc_word size; // size of the object in bytes
270 gcc_word align; // alignment of the object in bytes
272 uintptr_t index; // data[index-1] is the object address
273 void *address; // object address, when in single thread env
275 void *value; // null or non-zero initial value for the object
278 // Emulated TLS objects are always allocated at run-time.
279 static __inline void *emutls_allocate_object(__emutls_control *control) {
280 // Use standard C types, check with gcc's emutls.o.
281 COMPILE_TIME_ASSERT(sizeof(uintptr_t) == sizeof(gcc_pointer));
282 COMPILE_TIME_ASSERT(sizeof(uintptr_t) == sizeof(void *));
284 size_t size = control->size;
285 size_t align = control->align;
287 if (align < sizeof(void *))
288 align = sizeof(void *);
289 // Make sure that align is power of 2.
290 if ((align & (align - 1)) != 0)
293 base = emutls_memalign_alloc(align, size);
295 memcpy(base, control->value, size);
297 memset(base, 0, size);
301 // Returns control->object.index; set index if not allocated yet.
302 static __inline uintptr_t emutls_get_index(__emutls_control *control) {
303 uintptr_t index = __atomic_load_n(&control->object.index, __ATOMIC_ACQUIRE);
307 index = control->object.index;
309 index = ++emutls_num_object;
310 __atomic_store_n(&control->object.index, index, __ATOMIC_RELEASE);
317 // Updates newly allocated thread local emutls_address_array.
318 static __inline void emutls_check_array_set_size(emutls_address_array *array,
323 emutls_setspecific(array);
326 // Returns the new 'data' array size, number of elements,
327 // which must be no smaller than the given index.
328 static __inline uintptr_t emutls_new_data_array_size(uintptr_t index) {
329 // Need to allocate emutls_address_array with extra slots
330 // to store the header.
331 // Round up the emutls_address_array size to multiple of 16.
332 uintptr_t header_words = sizeof(emutls_address_array) / sizeof(void *);
333 return ((index + header_words + 15) & ~((uintptr_t)15)) - header_words;
336 // Returns the size in bytes required for an emutls_address_array with
337 // N number of elements for data field.
338 static __inline uintptr_t emutls_asize(uintptr_t N) {
339 return N * sizeof(void *) + sizeof(emutls_address_array);
342 // Returns the thread local emutls_address_array.
343 // Extends its size if necessary to hold address at index.
344 static __inline emutls_address_array *
345 emutls_get_address_array(uintptr_t index) {
346 emutls_address_array *array = emutls_getspecific();
348 uintptr_t new_size = emutls_new_data_array_size(index);
349 array = (emutls_address_array *)malloc(emutls_asize(new_size));
351 memset(array->data, 0, new_size * sizeof(void *));
352 array->skip_destructor_rounds = EMUTLS_SKIP_DESTRUCTOR_ROUNDS;
354 emutls_check_array_set_size(array, new_size);
355 } else if (index > array->size) {
356 uintptr_t orig_size = array->size;
357 uintptr_t new_size = emutls_new_data_array_size(index);
358 array = (emutls_address_array *)realloc(array, emutls_asize(new_size));
360 memset(array->data + orig_size, 0,
361 (new_size - orig_size) * sizeof(void *));
362 emutls_check_array_set_size(array, new_size);
367 void *__emutls_get_address(__emutls_control *control) {
368 uintptr_t index = emutls_get_index(control);
369 emutls_address_array *array = emutls_get_address_array(index--);
370 if (array->data[index] == NULL)
371 array->data[index] = emutls_allocate_object(control);
372 return array->data[index];
376 // Called by Bionic on dlclose to delete the emutls pthread key.
377 __attribute__((visibility("hidden"))) void __emutls_unregister_key(void) {
378 if (emutls_key_created) {
379 pthread_key_delete(emutls_pthread_key);
380 emutls_key_created = false;