1 /* ===---------- emutls.c - Implements __emutls_get_address ---------------===
3 * The LLVM Compiler Infrastructure
5 * This file is dual licensed under the MIT and the University of Illinois Open
6 * Source Licenses. See LICENSE.TXT for details.
8 * ===----------------------------------------------------------------------===
18 /* There are 4 pthread key cleanup rounds on Bionic. Delay emutls deallocation
19 to round 2. We need to delay deallocation because:
20 - Android versions older than M lack __cxa_thread_atexit_impl, so apps
21 use a pthread key destructor to call C++ destructors.
22 - Apps might use __thread/thread_local variables in pthread destructors.
23 We can't wait until the final two rounds, because jemalloc needs two rounds
24 after the final malloc/free call to free its thread-specific data (see
25 https://reviews.llvm.org/D46978#1107507). */
26 #define EMUTLS_SKIP_DESTRUCTOR_ROUNDS 1
28 #define EMUTLS_SKIP_DESTRUCTOR_ROUNDS 0
31 typedef struct emutls_address_array {
32 uintptr_t skip_destructor_rounds;
33 uintptr_t size; /* number of elements in the 'data' array */
35 } emutls_address_array;
37 static void emutls_shutdown(emutls_address_array *array);
43 static pthread_mutex_t emutls_mutex = PTHREAD_MUTEX_INITIALIZER;
44 static pthread_key_t emutls_pthread_key;
45 static bool emutls_key_created = false;
47 typedef unsigned int gcc_word __attribute__((mode(word)));
48 typedef unsigned int gcc_pointer __attribute__((mode(pointer)));
50 /* Default is not to use posix_memalign, so systems like Android
51 * can use thread local data without heavier POSIX memory allocators.
53 #ifndef EMUTLS_USE_POSIX_MEMALIGN
54 #define EMUTLS_USE_POSIX_MEMALIGN 0
57 static __inline void *emutls_memalign_alloc(size_t align, size_t size) {
59 #if EMUTLS_USE_POSIX_MEMALIGN
60 if (posix_memalign(&base, align, size) != 0)
63 #define EXTRA_ALIGN_PTR_BYTES (align - 1 + sizeof(void*))
65 if ((object = (char*)malloc(EXTRA_ALIGN_PTR_BYTES + size)) == NULL)
67 base = (void*)(((uintptr_t)(object + EXTRA_ALIGN_PTR_BYTES))
68 & ~(uintptr_t)(align - 1));
70 ((void**)base)[-1] = object;
75 static __inline void emutls_memalign_free(void *base) {
76 #if EMUTLS_USE_POSIX_MEMALIGN
79 /* The mallocated address is in ((void**)base)[-1] */
80 free(((void**)base)[-1]);
84 static __inline void emutls_setspecific(emutls_address_array *value) {
85 pthread_setspecific(emutls_pthread_key, (void*) value);
88 static __inline emutls_address_array* emutls_getspecific() {
89 return (emutls_address_array*) pthread_getspecific(emutls_pthread_key);
92 static void emutls_key_destructor(void* ptr) {
93 emutls_address_array *array = (emutls_address_array*)ptr;
94 if (array->skip_destructor_rounds > 0) {
95 /* emutls is deallocated using a pthread key destructor. These
96 * destructors are called in several rounds to accommodate destructor
97 * functions that (re)initialize key values with pthread_setspecific.
98 * Delay the emutls deallocation to accommodate other end-of-thread
99 * cleanup tasks like calling thread_local destructors (e.g. the
100 * __cxa_thread_atexit fallback in libc++abi).
102 array->skip_destructor_rounds--;
103 emutls_setspecific(array);
105 emutls_shutdown(array);
110 static __inline void emutls_init(void) {
111 if (pthread_key_create(&emutls_pthread_key, emutls_key_destructor) != 0)
113 emutls_key_created = true;
116 static __inline void emutls_init_once(void) {
117 static pthread_once_t once = PTHREAD_ONCE_INIT;
118 pthread_once(&once, emutls_init);
121 static __inline void emutls_lock() {
122 pthread_mutex_lock(&emutls_mutex);
125 static __inline void emutls_unlock() {
126 pthread_mutex_unlock(&emutls_mutex);
136 static LPCRITICAL_SECTION emutls_mutex;
137 static DWORD emutls_tls_index = TLS_OUT_OF_INDEXES;
139 typedef uintptr_t gcc_word;
140 typedef void * gcc_pointer;
142 static void win_error(DWORD last_err, const char *hint) {
144 if (FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER |
145 FORMAT_MESSAGE_FROM_SYSTEM |
146 FORMAT_MESSAGE_MAX_WIDTH_MASK,
147 NULL, last_err, 0, (LPSTR)&buffer, 1, NULL)) {
148 fprintf(stderr, "Windows error: %s\n", buffer);
150 fprintf(stderr, "Unkown Windows error: %s\n", hint);
155 static __inline void win_abort(DWORD last_err, const char *hint) {
156 win_error(last_err, hint);
160 static __inline void *emutls_memalign_alloc(size_t align, size_t size) {
161 void *base = _aligned_malloc(size, align);
163 win_abort(GetLastError(), "_aligned_malloc");
167 static __inline void emutls_memalign_free(void *base) {
171 static void emutls_exit(void) {
173 DeleteCriticalSection(emutls_mutex);
174 _aligned_free(emutls_mutex);
177 if (emutls_tls_index != TLS_OUT_OF_INDEXES) {
178 emutls_shutdown((emutls_address_array*)TlsGetValue(emutls_tls_index));
179 TlsFree(emutls_tls_index);
180 emutls_tls_index = TLS_OUT_OF_INDEXES;
184 #pragma warning (push)
185 #pragma warning (disable : 4100)
186 static BOOL CALLBACK emutls_init(PINIT_ONCE p0, PVOID p1, PVOID *p2) {
187 emutls_mutex = (LPCRITICAL_SECTION)_aligned_malloc(sizeof(CRITICAL_SECTION), 16);
189 win_error(GetLastError(), "_aligned_malloc");
192 InitializeCriticalSection(emutls_mutex);
194 emutls_tls_index = TlsAlloc();
195 if (emutls_tls_index == TLS_OUT_OF_INDEXES) {
197 win_error(GetLastError(), "TlsAlloc");
200 atexit(&emutls_exit);
204 static __inline void emutls_init_once(void) {
205 static INIT_ONCE once;
206 InitOnceExecuteOnce(&once, emutls_init, NULL, NULL);
209 static __inline void emutls_lock() {
210 EnterCriticalSection(emutls_mutex);
213 static __inline void emutls_unlock() {
214 LeaveCriticalSection(emutls_mutex);
217 static __inline void emutls_setspecific(emutls_address_array *value) {
218 if (TlsSetValue(emutls_tls_index, (LPVOID) value) == 0)
219 win_abort(GetLastError(), "TlsSetValue");
222 static __inline emutls_address_array* emutls_getspecific() {
223 LPVOID value = TlsGetValue(emutls_tls_index);
225 const DWORD err = GetLastError();
226 if (err != ERROR_SUCCESS)
227 win_abort(err, "TlsGetValue");
229 return (emutls_address_array*) value;
232 /* Provide atomic load/store functions for emutls_get_index if built with MSVC.
234 #if !defined(__ATOMIC_RELEASE)
237 enum { __ATOMIC_ACQUIRE = 2, __ATOMIC_RELEASE = 3 };
239 static __inline uintptr_t __atomic_load_n(void *ptr, unsigned type) {
240 assert(type == __ATOMIC_ACQUIRE);
241 // These return the previous value - but since we do an OR with 0,
242 // it's equivalent to a plain load.
244 return InterlockedOr64(ptr, 0);
246 return InterlockedOr(ptr, 0);
250 static __inline void __atomic_store_n(void *ptr, uintptr_t val, unsigned type) {
251 assert(type == __ATOMIC_RELEASE);
252 InterlockedExchangePointer((void *volatile *)ptr, (void *)val);
255 #endif /* __ATOMIC_RELEASE */
257 #pragma warning (pop)
261 static size_t emutls_num_object = 0; /* number of allocated TLS objects */
263 /* Free the allocated TLS data
265 static void emutls_shutdown(emutls_address_array *array) {
268 for (i = 0; i < array->size; ++i) {
270 emutls_memalign_free(array->data[i]);
275 /* For every TLS variable xyz,
276 * there is one __emutls_control variable named __emutls_v.xyz.
277 * If xyz has non-zero initial value, __emutls_v.xyz's "value"
278 * will point to __emutls_t.xyz, which has the initial value.
280 typedef struct __emutls_control {
281 /* Must use gcc_word here, instead of size_t, to match GCC. When
282 gcc_word is larger than size_t, the upper extra bits are all
283 zeros. We can use variables of size_t to operate on size and
285 gcc_word size; /* size of the object in bytes */
286 gcc_word align; /* alignment of the object in bytes */
288 uintptr_t index; /* data[index-1] is the object address */
289 void* address; /* object address, when in single thread env */
291 void* value; /* null or non-zero initial value for the object */
294 /* Emulated TLS objects are always allocated at run-time. */
295 static __inline void *emutls_allocate_object(__emutls_control *control) {
296 /* Use standard C types, check with gcc's emutls.o. */
297 COMPILE_TIME_ASSERT(sizeof(uintptr_t) == sizeof(gcc_pointer));
298 COMPILE_TIME_ASSERT(sizeof(uintptr_t) == sizeof(void*));
300 size_t size = control->size;
301 size_t align = control->align;
303 if (align < sizeof(void*))
304 align = sizeof(void*);
305 /* Make sure that align is power of 2. */
306 if ((align & (align - 1)) != 0)
309 base = emutls_memalign_alloc(align, size);
311 memcpy(base, control->value, size);
313 memset(base, 0, size);
318 /* Returns control->object.index; set index if not allocated yet. */
319 static __inline uintptr_t emutls_get_index(__emutls_control *control) {
320 uintptr_t index = __atomic_load_n(&control->object.index, __ATOMIC_ACQUIRE);
324 index = control->object.index;
326 index = ++emutls_num_object;
327 __atomic_store_n(&control->object.index, index, __ATOMIC_RELEASE);
334 /* Updates newly allocated thread local emutls_address_array. */
335 static __inline void emutls_check_array_set_size(emutls_address_array *array,
340 emutls_setspecific(array);
343 /* Returns the new 'data' array size, number of elements,
344 * which must be no smaller than the given index.
346 static __inline uintptr_t emutls_new_data_array_size(uintptr_t index) {
347 /* Need to allocate emutls_address_array with extra slots
348 * to store the header.
349 * Round up the emutls_address_array size to multiple of 16.
351 uintptr_t header_words = sizeof(emutls_address_array) / sizeof(void *);
352 return ((index + header_words + 15) & ~((uintptr_t)15)) - header_words;
355 /* Returns the size in bytes required for an emutls_address_array with
356 * N number of elements for data field.
358 static __inline uintptr_t emutls_asize(uintptr_t N) {
359 return N * sizeof(void *) + sizeof(emutls_address_array);
362 /* Returns the thread local emutls_address_array.
363 * Extends its size if necessary to hold address at index.
365 static __inline emutls_address_array *
366 emutls_get_address_array(uintptr_t index) {
367 emutls_address_array* array = emutls_getspecific();
369 uintptr_t new_size = emutls_new_data_array_size(index);
370 array = (emutls_address_array*) malloc(emutls_asize(new_size));
372 memset(array->data, 0, new_size * sizeof(void*));
373 array->skip_destructor_rounds = EMUTLS_SKIP_DESTRUCTOR_ROUNDS;
375 emutls_check_array_set_size(array, new_size);
376 } else if (index > array->size) {
377 uintptr_t orig_size = array->size;
378 uintptr_t new_size = emutls_new_data_array_size(index);
379 array = (emutls_address_array*) realloc(array, emutls_asize(new_size));
381 memset(array->data + orig_size, 0,
382 (new_size - orig_size) * sizeof(void*));
383 emutls_check_array_set_size(array, new_size);
388 void* __emutls_get_address(__emutls_control* control) {
389 uintptr_t index = emutls_get_index(control);
390 emutls_address_array* array = emutls_get_address_array(index--);
391 if (array->data[index] == NULL)
392 array->data[index] = emutls_allocate_object(control);
393 return array->data[index];
397 /* Called by Bionic on dlclose to delete the emutls pthread key. */
398 __attribute__((visibility("hidden")))
399 void __emutls_unregister_key(void) {
400 if (emutls_key_created) {
401 pthread_key_delete(emutls_pthread_key);
402 emutls_key_created = false;