1 #define JEMALLOC_TCACHE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 malloc_tsd_data(, tcache, tcache_t *, NULL)
8 malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default)
10 bool opt_tcache = true;
11 ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
13 tcache_bin_info_t *tcache_bin_info;
14 static unsigned stack_nelms; /* Total stack elms per tcache. */
17 size_t tcache_maxclass;
19 /******************************************************************************/
21 size_t tcache_salloc(const void *ptr)
24 return (arena_salloc(ptr, false));
28 tcache_event_hard(tcache_t *tcache)
30 size_t binind = tcache->next_gc_bin;
31 tcache_bin_t *tbin = &tcache->tbins[binind];
32 tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
34 if (tbin->low_water > 0) {
36 * Flush (ceiling) 3/4 of the objects below the low water mark.
39 tcache_bin_flush_small(tbin, binind, tbin->ncached -
40 tbin->low_water + (tbin->low_water >> 2), tcache);
42 tcache_bin_flush_large(tbin, binind, tbin->ncached -
43 tbin->low_water + (tbin->low_water >> 2), tcache);
46 * Reduce fill count by 2X. Limit lg_fill_div such that the
47 * fill count is always at least 1.
49 if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1)
51 } else if (tbin->low_water < 0) {
53 * Increase fill count by 2X. Make sure lg_fill_div stays
56 if (tbin->lg_fill_div > 1)
59 tbin->low_water = tbin->ncached;
61 tcache->next_gc_bin++;
62 if (tcache->next_gc_bin == nhbins)
63 tcache->next_gc_bin = 0;
68 tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
72 arena_tcache_fill_small(tcache->arena, tbin, binind,
73 config_prof ? tcache->prof_accumbytes : 0);
75 tcache->prof_accumbytes = 0;
76 ret = tcache_alloc_easy(tbin);
82 tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
86 unsigned i, nflush, ndeferred;
87 bool merged_stats = false;
89 assert(binind < NBINS);
90 assert(rem <= tbin->ncached);
92 for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
93 /* Lock the arena bin associated with the first object. */
94 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
96 arena_t *arena = chunk->arena;
97 arena_bin_t *bin = &arena->bins[binind];
99 if (config_prof && arena == tcache->arena) {
100 arena_prof_accum(arena, tcache->prof_accumbytes);
101 tcache->prof_accumbytes = 0;
104 malloc_mutex_lock(&bin->lock);
105 if (config_stats && arena == tcache->arena) {
106 assert(merged_stats == false);
108 bin->stats.nflushes++;
109 bin->stats.nrequests += tbin->tstats.nrequests;
110 tbin->tstats.nrequests = 0;
113 for (i = 0; i < nflush; i++) {
114 ptr = tbin->avail[i];
116 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
117 if (chunk->arena == arena) {
118 size_t pageind = ((uintptr_t)ptr -
119 (uintptr_t)chunk) >> LG_PAGE;
120 arena_chunk_map_t *mapelm =
121 arena_mapp_get(chunk, pageind);
122 if (config_fill && opt_junk) {
123 arena_alloc_junk_small(ptr,
124 &arena_bin_info[binind], true);
126 arena_dalloc_bin_locked(arena, chunk, ptr,
130 * This object was allocated via a different
131 * arena bin than the one that is currently
132 * locked. Stash the object, so that it can be
133 * handled in a future pass.
135 tbin->avail[ndeferred] = ptr;
139 malloc_mutex_unlock(&bin->lock);
141 if (config_stats && merged_stats == false) {
143 * The flush loop didn't happen to flush to this thread's
144 * arena, so the stats didn't get merged. Manually do so now.
146 arena_bin_t *bin = &tcache->arena->bins[binind];
147 malloc_mutex_lock(&bin->lock);
148 bin->stats.nflushes++;
149 bin->stats.nrequests += tbin->tstats.nrequests;
150 tbin->tstats.nrequests = 0;
151 malloc_mutex_unlock(&bin->lock);
154 memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
155 rem * sizeof(void *));
157 if ((int)tbin->ncached < tbin->low_water)
158 tbin->low_water = tbin->ncached;
162 tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
166 unsigned i, nflush, ndeferred;
167 bool merged_stats = false;
169 assert(binind < nhbins);
170 assert(rem <= tbin->ncached);
172 for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
173 /* Lock the arena associated with the first object. */
174 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
176 arena_t *arena = chunk->arena;
178 malloc_mutex_lock(&arena->lock);
179 if ((config_prof || config_stats) && arena == tcache->arena) {
181 arena_prof_accum_locked(arena,
182 tcache->prof_accumbytes);
183 tcache->prof_accumbytes = 0;
187 arena->stats.nrequests_large +=
188 tbin->tstats.nrequests;
189 arena->stats.lstats[binind - NBINS].nrequests +=
190 tbin->tstats.nrequests;
191 tbin->tstats.nrequests = 0;
195 for (i = 0; i < nflush; i++) {
196 ptr = tbin->avail[i];
198 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
199 if (chunk->arena == arena)
200 arena_dalloc_large_locked(arena, chunk, ptr);
203 * This object was allocated via a different
204 * arena than the one that is currently locked.
205 * Stash the object, so that it can be handled
208 tbin->avail[ndeferred] = ptr;
212 malloc_mutex_unlock(&arena->lock);
214 if (config_stats && merged_stats == false) {
216 * The flush loop didn't happen to flush to this thread's
217 * arena, so the stats didn't get merged. Manually do so now.
219 arena_t *arena = tcache->arena;
220 malloc_mutex_lock(&arena->lock);
221 arena->stats.nrequests_large += tbin->tstats.nrequests;
222 arena->stats.lstats[binind - NBINS].nrequests +=
223 tbin->tstats.nrequests;
224 tbin->tstats.nrequests = 0;
225 malloc_mutex_unlock(&arena->lock);
228 memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
229 rem * sizeof(void *));
231 if ((int)tbin->ncached < tbin->low_water)
232 tbin->low_water = tbin->ncached;
236 tcache_arena_associate(tcache_t *tcache, arena_t *arena)
240 /* Link into list of extant tcaches. */
241 malloc_mutex_lock(&arena->lock);
242 ql_elm_new(tcache, link);
243 ql_tail_insert(&arena->tcache_ql, tcache, link);
244 malloc_mutex_unlock(&arena->lock);
246 tcache->arena = arena;
250 tcache_arena_dissociate(tcache_t *tcache)
254 /* Unlink from list of extant tcaches. */
255 malloc_mutex_lock(&tcache->arena->lock);
256 ql_remove(&tcache->arena->tcache_ql, tcache, link);
257 malloc_mutex_unlock(&tcache->arena->lock);
258 tcache_stats_merge(tcache, tcache->arena);
263 tcache_create(arena_t *arena)
266 size_t size, stack_offset;
269 size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
270 /* Naturally align the pointer stacks. */
271 size = PTR_CEILING(size);
273 size += stack_nelms * sizeof(void *);
275 * Round up to the nearest multiple of the cacheline size, in order to
276 * avoid the possibility of false cacheline sharing.
278 * That this works relies on the same logic as in ipalloc(), but we
279 * cannot directly call ipalloc() here due to tcache bootstrapping
282 size = (size + CACHELINE_MASK) & (-CACHELINE);
284 if (size <= SMALL_MAXCLASS)
285 tcache = (tcache_t *)arena_malloc_small(arena, size, true);
286 else if (size <= tcache_maxclass)
287 tcache = (tcache_t *)arena_malloc_large(arena, size, true);
289 tcache = (tcache_t *)icallocx(size, false, arena);
294 tcache_arena_associate(tcache, arena);
296 assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
297 for (i = 0; i < nhbins; i++) {
298 tcache->tbins[i].lg_fill_div = 1;
299 tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
300 (uintptr_t)stack_offset);
301 stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
304 tcache_tsd_set(&tcache);
310 tcache_destroy(tcache_t *tcache)
315 tcache_arena_dissociate(tcache);
317 for (i = 0; i < NBINS; i++) {
318 tcache_bin_t *tbin = &tcache->tbins[i];
319 tcache_bin_flush_small(tbin, i, 0, tcache);
321 if (config_stats && tbin->tstats.nrequests != 0) {
322 arena_t *arena = tcache->arena;
323 arena_bin_t *bin = &arena->bins[i];
324 malloc_mutex_lock(&bin->lock);
325 bin->stats.nrequests += tbin->tstats.nrequests;
326 malloc_mutex_unlock(&bin->lock);
330 for (; i < nhbins; i++) {
331 tcache_bin_t *tbin = &tcache->tbins[i];
332 tcache_bin_flush_large(tbin, i, 0, tcache);
334 if (config_stats && tbin->tstats.nrequests != 0) {
335 arena_t *arena = tcache->arena;
336 malloc_mutex_lock(&arena->lock);
337 arena->stats.nrequests_large += tbin->tstats.nrequests;
338 arena->stats.lstats[i - NBINS].nrequests +=
339 tbin->tstats.nrequests;
340 malloc_mutex_unlock(&arena->lock);
344 if (config_prof && tcache->prof_accumbytes > 0)
345 arena_prof_accum(tcache->arena, tcache->prof_accumbytes);
347 tcache_size = arena_salloc(tcache, false);
348 if (tcache_size <= SMALL_MAXCLASS) {
349 arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
350 arena_t *arena = chunk->arena;
351 size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
353 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
355 arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm);
356 } else if (tcache_size <= tcache_maxclass) {
357 arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
358 arena_t *arena = chunk->arena;
360 arena_dalloc_large(arena, chunk, tcache);
362 idallocx(tcache, false);
366 tcache_thread_cleanup(void *arg)
368 tcache_t *tcache = *(tcache_t **)arg;
370 if (tcache == TCACHE_STATE_DISABLED) {
372 } else if (tcache == TCACHE_STATE_REINCARNATED) {
374 * Another destructor called an allocator function after this
375 * destructor was called. Reset tcache to
376 * TCACHE_STATE_PURGATORY in order to receive another callback.
378 tcache = TCACHE_STATE_PURGATORY;
379 tcache_tsd_set(&tcache);
380 } else if (tcache == TCACHE_STATE_PURGATORY) {
382 * The previous time this destructor was called, we set the key
383 * to TCACHE_STATE_PURGATORY so that other destructors wouldn't
384 * cause re-creation of the tcache. This time, do nothing, so
385 * that the destructor will not be called again.
387 } else if (tcache != NULL) {
388 assert(tcache != TCACHE_STATE_PURGATORY);
389 tcache_destroy(tcache);
390 tcache = TCACHE_STATE_PURGATORY;
391 tcache_tsd_set(&tcache);
396 tcache_stats_merge(tcache_t *tcache, arena_t *arena)
400 /* Merge and reset tcache stats. */
401 for (i = 0; i < NBINS; i++) {
402 arena_bin_t *bin = &arena->bins[i];
403 tcache_bin_t *tbin = &tcache->tbins[i];
404 malloc_mutex_lock(&bin->lock);
405 bin->stats.nrequests += tbin->tstats.nrequests;
406 malloc_mutex_unlock(&bin->lock);
407 tbin->tstats.nrequests = 0;
410 for (; i < nhbins; i++) {
411 malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
412 tcache_bin_t *tbin = &tcache->tbins[i];
413 arena->stats.nrequests_large += tbin->tstats.nrequests;
414 lstats->nrequests += tbin->tstats.nrequests;
415 tbin->tstats.nrequests = 0;
425 * If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is
428 if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
429 tcache_maxclass = SMALL_MAXCLASS;
430 else if ((1U << opt_lg_tcache_max) > arena_maxclass)
431 tcache_maxclass = arena_maxclass;
433 tcache_maxclass = (1U << opt_lg_tcache_max);
435 nhbins = NBINS + (tcache_maxclass >> LG_PAGE);
437 /* Initialize tcache_bin_info. */
438 tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
439 sizeof(tcache_bin_info_t));
440 if (tcache_bin_info == NULL)
443 for (i = 0; i < NBINS; i++) {
444 if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) {
445 tcache_bin_info[i].ncached_max =
446 (arena_bin_info[i].nregs << 1);
448 tcache_bin_info[i].ncached_max =
449 TCACHE_NSLOTS_SMALL_MAX;
451 stack_nelms += tcache_bin_info[i].ncached_max;
453 for (; i < nhbins; i++) {
454 tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
455 stack_nelms += tcache_bin_info[i].ncached_max;
465 if (tcache_tsd_boot() || tcache_enabled_tsd_boot())