1 #define JEMALLOC_TCACHE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 malloc_tsd_data(, tcache, tcache_t *, NULL)
8 malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default)
10 bool opt_tcache = true;
11 ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
13 tcache_bin_info_t *tcache_bin_info;
14 static unsigned stack_nelms; /* Total stack elms per tcache. */
17 size_t tcache_maxclass;
19 /******************************************************************************/
21 size_t tcache_salloc(const void *ptr)
24 return (arena_salloc(ptr, false));
28 tcache_event_hard(tcache_t *tcache)
30 size_t binind = tcache->next_gc_bin;
31 tcache_bin_t *tbin = &tcache->tbins[binind];
32 tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
34 if (tbin->low_water > 0) {
36 * Flush (ceiling) 3/4 of the objects below the low water mark.
39 tcache_bin_flush_small(tbin, binind, tbin->ncached -
40 tbin->low_water + (tbin->low_water >> 2), tcache);
42 tcache_bin_flush_large(tbin, binind, tbin->ncached -
43 tbin->low_water + (tbin->low_water >> 2), tcache);
46 * Reduce fill count by 2X. Limit lg_fill_div such that the
47 * fill count is always at least 1.
49 if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1)
51 } else if (tbin->low_water < 0) {
53 * Increase fill count by 2X. Make sure lg_fill_div stays
56 if (tbin->lg_fill_div > 1)
59 tbin->low_water = tbin->ncached;
61 tcache->next_gc_bin++;
62 if (tcache->next_gc_bin == nhbins)
63 tcache->next_gc_bin = 0;
68 tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
72 arena_tcache_fill_small(tcache->arena, tbin, binind,
73 config_prof ? tcache->prof_accumbytes : 0);
75 tcache->prof_accumbytes = 0;
76 ret = tcache_alloc_easy(tbin);
82 tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
86 unsigned i, nflush, ndeferred;
87 bool merged_stats = false;
89 assert(binind < NBINS);
90 assert(rem <= tbin->ncached);
92 for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
93 /* Lock the arena bin associated with the first object. */
94 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
96 arena_t *arena = chunk->arena;
97 arena_bin_t *bin = &arena->bins[binind];
99 if (config_prof && arena == tcache->arena) {
100 if (arena_prof_accum(arena, tcache->prof_accumbytes))
102 tcache->prof_accumbytes = 0;
105 malloc_mutex_lock(&bin->lock);
106 if (config_stats && arena == tcache->arena) {
107 assert(merged_stats == false);
109 bin->stats.nflushes++;
110 bin->stats.nrequests += tbin->tstats.nrequests;
111 tbin->tstats.nrequests = 0;
114 for (i = 0; i < nflush; i++) {
115 ptr = tbin->avail[i];
117 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
118 if (chunk->arena == arena) {
119 size_t pageind = ((uintptr_t)ptr -
120 (uintptr_t)chunk) >> LG_PAGE;
121 arena_chunk_map_t *mapelm =
122 arena_mapp_get(chunk, pageind);
123 if (config_fill && opt_junk) {
124 arena_alloc_junk_small(ptr,
125 &arena_bin_info[binind], true);
127 arena_dalloc_bin_locked(arena, chunk, ptr,
131 * This object was allocated via a different
132 * arena bin than the one that is currently
133 * locked. Stash the object, so that it can be
134 * handled in a future pass.
136 tbin->avail[ndeferred] = ptr;
140 malloc_mutex_unlock(&bin->lock);
142 if (config_stats && merged_stats == false) {
144 * The flush loop didn't happen to flush to this thread's
145 * arena, so the stats didn't get merged. Manually do so now.
147 arena_bin_t *bin = &tcache->arena->bins[binind];
148 malloc_mutex_lock(&bin->lock);
149 bin->stats.nflushes++;
150 bin->stats.nrequests += tbin->tstats.nrequests;
151 tbin->tstats.nrequests = 0;
152 malloc_mutex_unlock(&bin->lock);
155 memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
156 rem * sizeof(void *));
158 if ((int)tbin->ncached < tbin->low_water)
159 tbin->low_water = tbin->ncached;
163 tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
167 unsigned i, nflush, ndeferred;
168 bool merged_stats = false;
170 assert(binind < nhbins);
171 assert(rem <= tbin->ncached);
173 for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
174 /* Lock the arena associated with the first object. */
175 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
177 arena_t *arena = chunk->arena;
182 malloc_mutex_lock(&arena->lock);
183 if ((config_prof || config_stats) && arena == tcache->arena) {
185 idump = arena_prof_accum_locked(arena,
186 tcache->prof_accumbytes);
187 tcache->prof_accumbytes = 0;
191 arena->stats.nrequests_large +=
192 tbin->tstats.nrequests;
193 arena->stats.lstats[binind - NBINS].nrequests +=
194 tbin->tstats.nrequests;
195 tbin->tstats.nrequests = 0;
199 for (i = 0; i < nflush; i++) {
200 ptr = tbin->avail[i];
202 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
203 if (chunk->arena == arena)
204 arena_dalloc_large_locked(arena, chunk, ptr);
207 * This object was allocated via a different
208 * arena than the one that is currently locked.
209 * Stash the object, so that it can be handled
212 tbin->avail[ndeferred] = ptr;
216 malloc_mutex_unlock(&arena->lock);
217 if (config_prof && idump)
220 if (config_stats && merged_stats == false) {
222 * The flush loop didn't happen to flush to this thread's
223 * arena, so the stats didn't get merged. Manually do so now.
225 arena_t *arena = tcache->arena;
226 malloc_mutex_lock(&arena->lock);
227 arena->stats.nrequests_large += tbin->tstats.nrequests;
228 arena->stats.lstats[binind - NBINS].nrequests +=
229 tbin->tstats.nrequests;
230 tbin->tstats.nrequests = 0;
231 malloc_mutex_unlock(&arena->lock);
234 memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
235 rem * sizeof(void *));
237 if ((int)tbin->ncached < tbin->low_water)
238 tbin->low_water = tbin->ncached;
242 tcache_arena_associate(tcache_t *tcache, arena_t *arena)
246 /* Link into list of extant tcaches. */
247 malloc_mutex_lock(&arena->lock);
248 ql_elm_new(tcache, link);
249 ql_tail_insert(&arena->tcache_ql, tcache, link);
250 malloc_mutex_unlock(&arena->lock);
252 tcache->arena = arena;
256 tcache_arena_dissociate(tcache_t *tcache)
260 /* Unlink from list of extant tcaches. */
261 malloc_mutex_lock(&tcache->arena->lock);
262 ql_remove(&tcache->arena->tcache_ql, tcache, link);
263 malloc_mutex_unlock(&tcache->arena->lock);
264 tcache_stats_merge(tcache, tcache->arena);
269 tcache_create(arena_t *arena)
272 size_t size, stack_offset;
275 size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
276 /* Naturally align the pointer stacks. */
277 size = PTR_CEILING(size);
279 size += stack_nelms * sizeof(void *);
281 * Round up to the nearest multiple of the cacheline size, in order to
282 * avoid the possibility of false cacheline sharing.
284 * That this works relies on the same logic as in ipalloc(), but we
285 * cannot directly call ipalloc() here due to tcache bootstrapping
288 size = (size + CACHELINE_MASK) & (-CACHELINE);
290 if (size <= SMALL_MAXCLASS)
291 tcache = (tcache_t *)arena_malloc_small(arena, size, true);
292 else if (size <= tcache_maxclass)
293 tcache = (tcache_t *)arena_malloc_large(arena, size, true);
295 tcache = (tcache_t *)icallocx(size, false, arena);
300 tcache_arena_associate(tcache, arena);
302 assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
303 for (i = 0; i < nhbins; i++) {
304 tcache->tbins[i].lg_fill_div = 1;
305 tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
306 (uintptr_t)stack_offset);
307 stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
310 tcache_tsd_set(&tcache);
316 tcache_destroy(tcache_t *tcache)
321 tcache_arena_dissociate(tcache);
323 for (i = 0; i < NBINS; i++) {
324 tcache_bin_t *tbin = &tcache->tbins[i];
325 tcache_bin_flush_small(tbin, i, 0, tcache);
327 if (config_stats && tbin->tstats.nrequests != 0) {
328 arena_t *arena = tcache->arena;
329 arena_bin_t *bin = &arena->bins[i];
330 malloc_mutex_lock(&bin->lock);
331 bin->stats.nrequests += tbin->tstats.nrequests;
332 malloc_mutex_unlock(&bin->lock);
336 for (; i < nhbins; i++) {
337 tcache_bin_t *tbin = &tcache->tbins[i];
338 tcache_bin_flush_large(tbin, i, 0, tcache);
340 if (config_stats && tbin->tstats.nrequests != 0) {
341 arena_t *arena = tcache->arena;
342 malloc_mutex_lock(&arena->lock);
343 arena->stats.nrequests_large += tbin->tstats.nrequests;
344 arena->stats.lstats[i - NBINS].nrequests +=
345 tbin->tstats.nrequests;
346 malloc_mutex_unlock(&arena->lock);
350 if (config_prof && tcache->prof_accumbytes > 0 &&
351 arena_prof_accum(tcache->arena, tcache->prof_accumbytes))
354 tcache_size = arena_salloc(tcache, false);
355 if (tcache_size <= SMALL_MAXCLASS) {
356 arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
357 arena_t *arena = chunk->arena;
358 size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
360 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
362 arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm);
363 } else if (tcache_size <= tcache_maxclass) {
364 arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
365 arena_t *arena = chunk->arena;
367 arena_dalloc_large(arena, chunk, tcache);
369 idallocx(tcache, false);
373 tcache_thread_cleanup(void *arg)
375 tcache_t *tcache = *(tcache_t **)arg;
377 if (tcache == TCACHE_STATE_DISABLED) {
379 } else if (tcache == TCACHE_STATE_REINCARNATED) {
381 * Another destructor called an allocator function after this
382 * destructor was called. Reset tcache to
383 * TCACHE_STATE_PURGATORY in order to receive another callback.
385 tcache = TCACHE_STATE_PURGATORY;
386 tcache_tsd_set(&tcache);
387 } else if (tcache == TCACHE_STATE_PURGATORY) {
389 * The previous time this destructor was called, we set the key
390 * to TCACHE_STATE_PURGATORY so that other destructors wouldn't
391 * cause re-creation of the tcache. This time, do nothing, so
392 * that the destructor will not be called again.
394 } else if (tcache != NULL) {
395 assert(tcache != TCACHE_STATE_PURGATORY);
396 tcache_destroy(tcache);
397 tcache = TCACHE_STATE_PURGATORY;
398 tcache_tsd_set(&tcache);
403 tcache_stats_merge(tcache_t *tcache, arena_t *arena)
407 /* Merge and reset tcache stats. */
408 for (i = 0; i < NBINS; i++) {
409 arena_bin_t *bin = &arena->bins[i];
410 tcache_bin_t *tbin = &tcache->tbins[i];
411 malloc_mutex_lock(&bin->lock);
412 bin->stats.nrequests += tbin->tstats.nrequests;
413 malloc_mutex_unlock(&bin->lock);
414 tbin->tstats.nrequests = 0;
417 for (; i < nhbins; i++) {
418 malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
419 tcache_bin_t *tbin = &tcache->tbins[i];
420 arena->stats.nrequests_large += tbin->tstats.nrequests;
421 lstats->nrequests += tbin->tstats.nrequests;
422 tbin->tstats.nrequests = 0;
432 * If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is
435 if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
436 tcache_maxclass = SMALL_MAXCLASS;
437 else if ((1U << opt_lg_tcache_max) > arena_maxclass)
438 tcache_maxclass = arena_maxclass;
440 tcache_maxclass = (1U << opt_lg_tcache_max);
442 nhbins = NBINS + (tcache_maxclass >> LG_PAGE);
444 /* Initialize tcache_bin_info. */
445 tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
446 sizeof(tcache_bin_info_t));
447 if (tcache_bin_info == NULL)
450 for (i = 0; i < NBINS; i++) {
451 if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) {
452 tcache_bin_info[i].ncached_max =
453 (arena_bin_info[i].nregs << 1);
455 tcache_bin_info[i].ncached_max =
456 TCACHE_NSLOTS_SMALL_MAX;
458 stack_nelms += tcache_bin_info[i].ncached_max;
460 for (; i < nhbins; i++) {
461 tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
462 stack_nelms += tcache_bin_info[i].ncached_max;
472 if (tcache_tsd_boot() || tcache_enabled_tsd_boot())