1 #ifndef JEMALLOC_INTERNAL_MUTEX_H
2 #define JEMALLOC_INTERNAL_MUTEX_H
4 #include "jemalloc/internal/atomic.h"
5 #include "jemalloc/internal/mutex_prof.h"
6 #include "jemalloc/internal/tsd.h"
7 #include "jemalloc/internal/witness.h"
10 /* Can only acquire one mutex of a given witness rank at a time. */
11 malloc_mutex_rank_exclusive,
13 * Can acquire multiple mutexes of the same witness rank, but in
14 * address-ascending order only.
16 malloc_mutex_address_ordered
17 } malloc_mutex_lock_order_t;
19 typedef struct malloc_mutex_s malloc_mutex_t;
20 struct malloc_mutex_s {
24 * prof_data is defined first to reduce cacheline
25 * bouncing: the data is not touched by the mutex holder
26 * during unlocking, while might be modified by
27 * contenders. Having it before the mutex itself could
28 * avoid prefetching a modified cacheline (for the
31 mutex_prof_data_t prof_data;
33 # if _WIN32_WINNT >= 0x0600
36 CRITICAL_SECTION lock;
38 #elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
40 #elif (defined(JEMALLOC_OSSPIN))
42 #elif (defined(JEMALLOC_MUTEX_INIT_CB))
44 malloc_mutex_t *postponed_next;
50 * We only touch witness when configured w/ debug. However we
51 * keep the field in a union when !debug so that we don't have
52 * to pollute the code base with #ifdefs, while avoid paying the
55 #if !defined(JEMALLOC_DEBUG)
57 malloc_mutex_lock_order_t lock_order;
61 #if defined(JEMALLOC_DEBUG)
63 malloc_mutex_lock_order_t lock_order;
68 * Based on benchmark results, a fixed spin with this amount of retries works
69 * well for our critical sections.
71 #define MALLOC_MUTEX_MAX_SPIN 250
74 # if _WIN32_WINNT >= 0x0600
75 # define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock)
76 # define MALLOC_MUTEX_UNLOCK(m) ReleaseSRWLockExclusive(&(m)->lock)
77 # define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock))
79 # define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock)
80 # define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock)
81 # define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock))
83 #elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
84 # define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock)
85 # define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock)
86 # define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock))
87 #elif (defined(JEMALLOC_OSSPIN))
88 # define MALLOC_MUTEX_LOCK(m) OSSpinLockLock(&(m)->lock)
89 # define MALLOC_MUTEX_UNLOCK(m) OSSpinLockUnlock(&(m)->lock)
90 # define MALLOC_MUTEX_TRYLOCK(m) (!OSSpinLockTry(&(m)->lock))
92 # define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock)
93 # define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock)
94 # define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0)
97 #define LOCK_PROF_DATA_INITIALIZER \
98 {NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \
99 ATOMIC_INIT(0), 0, NULL, 0}
102 # define MALLOC_MUTEX_INITIALIZER
103 #elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
104 # define MALLOC_MUTEX_INITIALIZER \
105 {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT}}, \
106 WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
107 #elif (defined(JEMALLOC_OSSPIN))
108 # define MALLOC_MUTEX_INITIALIZER \
109 {{{LOCK_PROF_DATA_INITIALIZER, 0}}, \
110 WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
111 #elif (defined(JEMALLOC_MUTEX_INIT_CB))
112 # define MALLOC_MUTEX_INITIALIZER \
113 {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL}}, \
114 WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
116 # define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
117 # define MALLOC_MUTEX_INITIALIZER \
118 {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}}, \
119 WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
122 #ifdef JEMALLOC_LAZY_LOCK
123 extern bool isthreaded;
126 bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
127 witness_rank_t rank, malloc_mutex_lock_order_t lock_order);
128 void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
129 void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
130 void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
131 bool malloc_mutex_first_thread(void);
132 bool malloc_mutex_boot(void);
133 void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex);
135 void malloc_mutex_lock_slow(malloc_mutex_t *mutex);
138 malloc_mutex_lock_final(malloc_mutex_t *mutex) {
139 MALLOC_MUTEX_LOCK(mutex);
143 malloc_mutex_trylock_final(malloc_mutex_t *mutex) {
144 return MALLOC_MUTEX_TRYLOCK(mutex);
148 mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) {
150 mutex_prof_data_t *data = &mutex->prof_data;
152 if (data->prev_owner != tsdn) {
153 data->prev_owner = tsdn;
154 data->n_owner_switches++;
159 /* Trylock: return false if the lock is successfully acquired. */
161 malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
162 witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
164 if (malloc_mutex_trylock_final(mutex)) {
167 mutex_owner_stats_update(tsdn, mutex);
169 witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
174 /* Aggregate lock prof data. */
176 malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
177 nstime_add(&sum->tot_wait_time, &data->tot_wait_time);
178 if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) {
179 nstime_copy(&sum->max_wait_time, &data->max_wait_time);
182 sum->n_wait_times += data->n_wait_times;
183 sum->n_spin_acquired += data->n_spin_acquired;
185 if (sum->max_n_thds < data->max_n_thds) {
186 sum->max_n_thds = data->max_n_thds;
188 uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds,
190 uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32(
191 &data->n_waiting_thds, ATOMIC_RELAXED);
192 atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds,
194 sum->n_owner_switches += data->n_owner_switches;
195 sum->n_lock_ops += data->n_lock_ops;
199 malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
200 witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
202 if (malloc_mutex_trylock_final(mutex)) {
203 malloc_mutex_lock_slow(mutex);
205 mutex_owner_stats_update(tsdn, mutex);
207 witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
211 malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
212 witness_unlock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
214 MALLOC_MUTEX_UNLOCK(mutex);
219 malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
220 witness_assert_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
224 malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
225 witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
228 /* Copy the prof data from mutex for processing. */
230 malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
231 malloc_mutex_t *mutex) {
232 mutex_prof_data_t *source = &mutex->prof_data;
233 /* Can only read holding the mutex. */
234 malloc_mutex_assert_owner(tsdn, mutex);
237 * Not *really* allowed (we shouldn't be doing non-atomic loads of
238 * atomic data), but the mutex protection makes this safe, and writing
239 * a member-for-member copy is tedious for this situation.
242 /* n_wait_thds is not reported (modified w/o locking). */
243 atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
246 #endif /* JEMALLOC_INTERNAL_MUTEX_H */