1 #define JEMALLOC_MUTEX_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/malloc_io.h"
9 #define _CRT_SPINCOUNT 4000
12 /******************************************************************************/
15 #ifdef JEMALLOC_LAZY_LOCK
16 bool isthreaded = false;
18 #ifdef JEMALLOC_MUTEX_INIT_CB
19 static bool postpone_init = true;
20 static malloc_mutex_t *postponed_mutexes = NULL;
23 /******************************************************************************/
25 * We intercept pthread_create() calls in order to toggle isthreaded if the
26 * process goes multi-threaded.
29 #if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
31 pthread_create(pthread_t *__restrict thread,
32 const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
33 void *__restrict arg) {
34 return pthread_create_wrapper(thread, attr, start_routine, arg);
38 /******************************************************************************/
40 #ifdef JEMALLOC_MUTEX_INIT_CB
41 JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
42 void *(calloc_cb)(size_t, size_t));
44 #pragma weak _pthread_mutex_init_calloc_cb
46 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
47 void *(calloc_cb)(size_t, size_t))
50 return (((int (*)(pthread_mutex_t *, void *(*)(size_t, size_t)))
51 __libc_interposing[INTERPOS__pthread_mutex_init_calloc_cb])(mutex,
57 malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
58 mutex_prof_data_t *data = &mutex->prof_data;
59 UNUSED nstime_t before = NSTIME_ZERO_INITIALIZER;
65 int cnt = 0, max_cnt = MALLOC_MUTEX_MAX_SPIN;
68 if (!malloc_mutex_trylock_final(mutex)) {
69 data->n_spin_acquired++;
72 } while (cnt++ < max_cnt);
75 /* Only spin is useful when stats is off. */
76 malloc_mutex_lock_final(mutex);
80 nstime_update(&before);
81 /* Copy before to after to avoid clock skews. */
83 nstime_copy(&after, &before);
84 uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1,
86 /* One last try as above two calls may take quite some cycles. */
87 if (!malloc_mutex_trylock_final(mutex)) {
88 atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
89 data->n_spin_acquired++;
94 malloc_mutex_lock_final(mutex);
95 /* Update more slow-path only counters. */
96 atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
97 nstime_update(&after);
100 nstime_copy(&delta, &after);
101 nstime_subtract(&delta, &before);
103 data->n_wait_times++;
104 nstime_add(&data->tot_wait_time, &delta);
105 if (nstime_compare(&data->max_wait_time, &delta) < 0) {
106 nstime_copy(&data->max_wait_time, &delta);
108 if (n_thds > data->max_n_thds) {
109 data->max_n_thds = n_thds;
114 mutex_prof_data_init(mutex_prof_data_t *data) {
115 memset(data, 0, sizeof(mutex_prof_data_t));
116 nstime_init(&data->max_wait_time, 0);
117 nstime_init(&data->tot_wait_time, 0);
118 data->prev_owner = NULL;
122 malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex) {
123 malloc_mutex_assert_owner(tsdn, mutex);
124 mutex_prof_data_init(&mutex->prof_data);
128 mutex_addr_comp(const witness_t *witness1, void *mutex1,
129 const witness_t *witness2, void *mutex2) {
130 assert(mutex1 != NULL);
131 assert(mutex2 != NULL);
132 uintptr_t mu1int = (uintptr_t)mutex1;
133 uintptr_t mu2int = (uintptr_t)mutex2;
134 if (mu1int < mu2int) {
136 } else if (mu1int == mu2int) {
144 malloc_mutex_first_thread(void) {
146 #ifndef JEMALLOC_MUTEX_INIT_CB
147 return (malloc_mutex_first_thread());
154 malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
155 witness_rank_t rank, malloc_mutex_lock_order_t lock_order) {
156 mutex_prof_data_init(&mutex->prof_data);
158 # if _WIN32_WINNT >= 0x0600
159 InitializeSRWLock(&mutex->lock);
161 if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
166 #elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
167 mutex->lock = OS_UNFAIR_LOCK_INIT;
168 #elif (defined(JEMALLOC_OSSPIN))
170 #elif (defined(JEMALLOC_MUTEX_INIT_CB))
172 mutex->postponed_next = postponed_mutexes;
173 postponed_mutexes = mutex;
175 if (_pthread_mutex_init_calloc_cb(&mutex->lock,
176 bootstrap_calloc) != 0) {
181 pthread_mutexattr_t attr;
183 if (pthread_mutexattr_init(&attr) != 0) {
186 pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
187 if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
188 pthread_mutexattr_destroy(&attr);
191 pthread_mutexattr_destroy(&attr);
194 mutex->lock_order = lock_order;
195 if (lock_order == malloc_mutex_address_ordered) {
196 witness_init(&mutex->witness, name, rank,
197 mutex_addr_comp, &mutex);
199 witness_init(&mutex->witness, name, rank, NULL, NULL);
206 malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) {
207 malloc_mutex_lock(tsdn, mutex);
211 malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) {
212 malloc_mutex_unlock(tsdn, mutex);
216 malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) {
217 #ifdef JEMALLOC_MUTEX_INIT_CB
218 malloc_mutex_unlock(tsdn, mutex);
220 if (malloc_mutex_init(mutex, mutex->witness.name,
221 mutex->witness.rank, mutex->lock_order)) {
222 malloc_printf("<jemalloc>: Error re-initializing mutex in "
232 malloc_mutex_boot(void) {
233 #ifdef JEMALLOC_MUTEX_INIT_CB
234 postpone_init = false;
235 while (postponed_mutexes != NULL) {
236 if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
237 bootstrap_calloc) != 0) {
240 postponed_mutexes = postponed_mutexes->postponed_next;