1 #define JEMALLOC_ARENA_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/div.h"
7 #include "jemalloc/internal/extent_dss.h"
8 #include "jemalloc/internal/extent_mmap.h"
9 #include "jemalloc/internal/mutex.h"
10 #include "jemalloc/internal/rtree.h"
11 #include "jemalloc/internal/safety_check.h"
12 #include "jemalloc/internal/util.h"
14 JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
16 /******************************************************************************/
20 * Define names for both unininitialized and initialized phases, so that
21 * options and mallctl processing are straightforward.
23 const char *percpu_arena_mode_names[] = {
30 percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT;
32 ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT;
33 ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
35 static atomic_zd_t dirty_decay_ms_default;
36 static atomic_zd_t muzzy_decay_ms_default;
38 const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
39 #define STEP(step, h, x, y) \
45 static div_info_t arena_binind_div_info[SC_NBINS];
47 size_t opt_oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT;
48 size_t oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT;
49 static unsigned huge_arena_ind;
51 /******************************************************************************/
53 * Function prototypes for static functions that are referenced prior to
57 static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
58 arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit,
59 size_t npages_decay_max, bool is_background_thread);
60 static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
61 bool is_background_thread, bool all);
62 static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
64 static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
67 /******************************************************************************/
70 arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
71 const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
72 size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
73 *nthreads += arena_nthreads_get(arena, false);
74 *dss = dss_prec_names[arena_dss_prec_get(arena)];
75 *dirty_decay_ms = arena_dirty_decay_ms_get(arena);
76 *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
77 *nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED);
78 *ndirty += extents_npages_get(&arena->extents_dirty);
79 *nmuzzy += extents_npages_get(&arena->extents_muzzy);
83 arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
84 const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
85 size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
86 bin_stats_t *bstats, arena_stats_large_t *lstats,
87 arena_stats_extents_t *estats) {
88 cassert(config_stats);
90 arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms,
91 muzzy_decay_ms, nactive, ndirty, nmuzzy);
93 size_t base_allocated, base_resident, base_mapped, metadata_thp;
94 base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
95 &base_mapped, &metadata_thp);
97 arena_stats_lock(tsdn, &arena->stats);
99 arena_stats_accum_zu(&astats->mapped, base_mapped
100 + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped));
101 arena_stats_accum_zu(&astats->retained,
102 extents_npages_get(&arena->extents_retained) << LG_PAGE);
104 atomic_store_zu(&astats->extent_avail,
105 atomic_load_zu(&arena->extent_avail_cnt, ATOMIC_RELAXED),
108 arena_stats_accum_u64(&astats->decay_dirty.npurge,
109 arena_stats_read_u64(tsdn, &arena->stats,
110 &arena->stats.decay_dirty.npurge));
111 arena_stats_accum_u64(&astats->decay_dirty.nmadvise,
112 arena_stats_read_u64(tsdn, &arena->stats,
113 &arena->stats.decay_dirty.nmadvise));
114 arena_stats_accum_u64(&astats->decay_dirty.purged,
115 arena_stats_read_u64(tsdn, &arena->stats,
116 &arena->stats.decay_dirty.purged));
118 arena_stats_accum_u64(&astats->decay_muzzy.npurge,
119 arena_stats_read_u64(tsdn, &arena->stats,
120 &arena->stats.decay_muzzy.npurge));
121 arena_stats_accum_u64(&astats->decay_muzzy.nmadvise,
122 arena_stats_read_u64(tsdn, &arena->stats,
123 &arena->stats.decay_muzzy.nmadvise));
124 arena_stats_accum_u64(&astats->decay_muzzy.purged,
125 arena_stats_read_u64(tsdn, &arena->stats,
126 &arena->stats.decay_muzzy.purged));
128 arena_stats_accum_zu(&astats->base, base_allocated);
129 arena_stats_accum_zu(&astats->internal, arena_internal_get(arena));
130 arena_stats_accum_zu(&astats->metadata_thp, metadata_thp);
131 arena_stats_accum_zu(&astats->resident, base_resident +
132 (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) +
133 extents_npages_get(&arena->extents_dirty) +
134 extents_npages_get(&arena->extents_muzzy)) << LG_PAGE)));
135 arena_stats_accum_zu(&astats->abandoned_vm, atomic_load_zu(
136 &arena->stats.abandoned_vm, ATOMIC_RELAXED));
138 for (szind_t i = 0; i < SC_NSIZES - SC_NBINS; i++) {
139 uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats,
140 &arena->stats.lstats[i].nmalloc);
141 arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc);
142 arena_stats_accum_u64(&astats->nmalloc_large, nmalloc);
144 uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats,
145 &arena->stats.lstats[i].ndalloc);
146 arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc);
147 arena_stats_accum_u64(&astats->ndalloc_large, ndalloc);
149 uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats,
150 &arena->stats.lstats[i].nrequests);
151 arena_stats_accum_u64(&lstats[i].nrequests,
152 nmalloc + nrequests);
153 arena_stats_accum_u64(&astats->nrequests_large,
154 nmalloc + nrequests);
156 /* nfill == nmalloc for large currently. */
157 arena_stats_accum_u64(&lstats[i].nfills, nmalloc);
158 arena_stats_accum_u64(&astats->nfills_large, nmalloc);
160 uint64_t nflush = arena_stats_read_u64(tsdn, &arena->stats,
161 &arena->stats.lstats[i].nflushes);
162 arena_stats_accum_u64(&lstats[i].nflushes, nflush);
163 arena_stats_accum_u64(&astats->nflushes_large, nflush);
165 assert(nmalloc >= ndalloc);
166 assert(nmalloc - ndalloc <= SIZE_T_MAX);
167 size_t curlextents = (size_t)(nmalloc - ndalloc);
168 lstats[i].curlextents += curlextents;
169 arena_stats_accum_zu(&astats->allocated_large,
170 curlextents * sz_index2size(SC_NBINS + i));
173 for (pszind_t i = 0; i < SC_NPSIZES; i++) {
174 size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes,
176 dirty = extents_nextents_get(&arena->extents_dirty, i);
177 muzzy = extents_nextents_get(&arena->extents_muzzy, i);
178 retained = extents_nextents_get(&arena->extents_retained, i);
179 dirty_bytes = extents_nbytes_get(&arena->extents_dirty, i);
180 muzzy_bytes = extents_nbytes_get(&arena->extents_muzzy, i);
182 extents_nbytes_get(&arena->extents_retained, i);
184 atomic_store_zu(&estats[i].ndirty, dirty, ATOMIC_RELAXED);
185 atomic_store_zu(&estats[i].nmuzzy, muzzy, ATOMIC_RELAXED);
186 atomic_store_zu(&estats[i].nretained, retained, ATOMIC_RELAXED);
187 atomic_store_zu(&estats[i].dirty_bytes, dirty_bytes,
189 atomic_store_zu(&estats[i].muzzy_bytes, muzzy_bytes,
191 atomic_store_zu(&estats[i].retained_bytes, retained_bytes,
195 arena_stats_unlock(tsdn, &arena->stats);
197 /* tcache_bytes counts currently cached bytes. */
198 atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED);
199 malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
200 cache_bin_array_descriptor_t *descriptor;
201 ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) {
203 for (; i < SC_NBINS; i++) {
204 cache_bin_t *tbin = &descriptor->bins_small[i];
205 arena_stats_accum_zu(&astats->tcache_bytes,
206 tbin->ncached * sz_index2size(i));
208 for (; i < nhbins; i++) {
209 cache_bin_t *tbin = &descriptor->bins_large[i];
210 arena_stats_accum_zu(&astats->tcache_bytes,
211 tbin->ncached * sz_index2size(i));
214 malloc_mutex_prof_read(tsdn,
215 &astats->mutex_prof_data[arena_prof_mutex_tcache_list],
216 &arena->tcache_ql_mtx);
217 malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
219 #define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \
220 malloc_mutex_lock(tsdn, &arena->mtx); \
221 malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \
223 malloc_mutex_unlock(tsdn, &arena->mtx);
225 /* Gather per arena mutex profiling data. */
226 READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
227 READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx,
228 arena_prof_mutex_extent_avail)
229 READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx,
230 arena_prof_mutex_extents_dirty)
231 READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx,
232 arena_prof_mutex_extents_muzzy)
233 READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx,
234 arena_prof_mutex_extents_retained)
235 READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx,
236 arena_prof_mutex_decay_dirty)
237 READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx,
238 arena_prof_mutex_decay_muzzy)
239 READ_ARENA_MUTEX_PROF_DATA(base->mtx,
240 arena_prof_mutex_base)
241 #undef READ_ARENA_MUTEX_PROF_DATA
243 nstime_copy(&astats->uptime, &arena->create_time);
244 nstime_update(&astats->uptime);
245 nstime_subtract(&astats->uptime, &arena->create_time);
247 for (szind_t i = 0; i < SC_NBINS; i++) {
248 for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
249 bin_stats_merge(tsdn, &bstats[i],
250 &arena->bins[i].bin_shards[j]);
256 arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
257 extent_hooks_t **r_extent_hooks, extent_t *extent) {
258 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
259 WITNESS_RANK_CORE, 0);
261 extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty,
263 if (arena_dirty_decay_ms_get(arena) == 0) {
264 arena_decay_dirty(tsdn, arena, false, true);
266 arena_background_thread_inactivity_check(tsdn, arena, false);
271 arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) {
273 arena_slab_data_t *slab_data = extent_slab_data_get(slab);
276 assert(extent_nfree_get(slab) > 0);
277 assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
279 regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
280 ret = (void *)((uintptr_t)extent_addr_get(slab) +
281 (uintptr_t)(bin_info->reg_size * regind));
282 extent_nfree_dec(slab);
287 arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
288 unsigned cnt, void** ptrs) {
289 arena_slab_data_t *slab_data = extent_slab_data_get(slab);
291 assert(extent_nfree_get(slab) >= cnt);
292 assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
294 #if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE)
295 for (unsigned i = 0; i < cnt; i++) {
296 size_t regind = bitmap_sfu(slab_data->bitmap,
297 &bin_info->bitmap_info);
298 *(ptrs + i) = (void *)((uintptr_t)extent_addr_get(slab) +
299 (uintptr_t)(bin_info->reg_size * regind));
303 bitmap_t g = slab_data->bitmap[group];
307 g = slab_data->bitmap[++group];
309 size_t shift = group << LG_BITMAP_GROUP_NBITS;
310 size_t pop = popcount_lu(g);
311 if (pop > (cnt - i)) {
316 * Load from memory locations only once, outside the
319 uintptr_t base = (uintptr_t)extent_addr_get(slab);
320 uintptr_t regsize = (uintptr_t)bin_info->reg_size;
322 size_t bit = cfs_lu(&g);
323 size_t regind = shift + bit;
324 *(ptrs + i) = (void *)(base + regsize * regind);
328 slab_data->bitmap[group] = g;
331 extent_nfree_sub(slab, cnt);
338 arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
341 /* Freeing a pointer outside the slab can cause assertion failure. */
342 assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab));
343 assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab));
344 /* Freeing an interior pointer can cause assertion failure. */
345 assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) %
346 (uintptr_t)bin_infos[binind].reg_size == 0);
348 diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
350 /* Avoid doing division with a variable divisor. */
351 regind = div_compute(&arena_binind_div_info[binind], diff);
353 assert(regind < bin_infos[binind].nregs);
359 arena_slab_reg_dalloc(extent_t *slab, arena_slab_data_t *slab_data, void *ptr) {
360 szind_t binind = extent_szind_get(slab);
361 const bin_info_t *bin_info = &bin_infos[binind];
362 size_t regind = arena_slab_regind(slab, binind, ptr);
364 assert(extent_nfree_get(slab) < bin_info->nregs);
365 /* Freeing an unallocated pointer can cause assertion failure. */
366 assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
368 bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
369 extent_nfree_inc(slab);
373 arena_nactive_add(arena_t *arena, size_t add_pages) {
374 atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED);
378 arena_nactive_sub(arena_t *arena, size_t sub_pages) {
379 assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages);
380 atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED);
384 arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
385 szind_t index, hindex;
387 cassert(config_stats);
389 if (usize < SC_LARGE_MINCLASS) {
390 usize = SC_LARGE_MINCLASS;
392 index = sz_size2index(usize);
393 hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
395 arena_stats_add_u64(tsdn, &arena->stats,
396 &arena->stats.lstats[hindex].nmalloc, 1);
400 arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
401 szind_t index, hindex;
403 cassert(config_stats);
405 if (usize < SC_LARGE_MINCLASS) {
406 usize = SC_LARGE_MINCLASS;
408 index = sz_size2index(usize);
409 hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
411 arena_stats_add_u64(tsdn, &arena->stats,
412 &arena->stats.lstats[hindex].ndalloc, 1);
416 arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize,
418 arena_large_dalloc_stats_update(tsdn, arena, oldusize);
419 arena_large_malloc_stats_update(tsdn, arena, usize);
423 arena_may_have_muzzy(arena_t *arena) {
424 return (pages_can_purge_lazy && (arena_muzzy_decay_ms_get(arena) != 0));
428 arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
429 size_t alignment, bool *zero) {
430 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
432 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
433 WITNESS_RANK_CORE, 0);
435 szind_t szind = sz_size2index(usize);
438 extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks,
439 &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false,
440 szind, zero, &commit);
441 if (extent == NULL && arena_may_have_muzzy(arena)) {
442 extent = extents_alloc(tsdn, arena, &extent_hooks,
443 &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment,
444 false, szind, zero, &commit);
446 size_t size = usize + sz_large_pad;
447 if (extent == NULL) {
448 extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL,
449 usize, sz_large_pad, alignment, false, szind, zero,
453 * extent may be NULL on OOM, but in that case
454 * mapped_add isn't used below, so there's no need to
455 * conditionlly set it to 0 here.
459 } else if (config_stats) {
463 if (extent != NULL) {
465 arena_stats_lock(tsdn, &arena->stats);
466 arena_large_malloc_stats_update(tsdn, arena, usize);
467 if (mapped_add != 0) {
468 arena_stats_add_zu(tsdn, &arena->stats,
469 &arena->stats.mapped, mapped_add);
471 arena_stats_unlock(tsdn, &arena->stats);
473 arena_nactive_add(arena, size >> LG_PAGE);
480 arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
482 arena_stats_lock(tsdn, &arena->stats);
483 arena_large_dalloc_stats_update(tsdn, arena,
484 extent_usize_get(extent));
485 arena_stats_unlock(tsdn, &arena->stats);
487 arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
491 arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
493 size_t usize = extent_usize_get(extent);
494 size_t udiff = oldusize - usize;
497 arena_stats_lock(tsdn, &arena->stats);
498 arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
499 arena_stats_unlock(tsdn, &arena->stats);
501 arena_nactive_sub(arena, udiff >> LG_PAGE);
505 arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
507 size_t usize = extent_usize_get(extent);
508 size_t udiff = usize - oldusize;
511 arena_stats_lock(tsdn, &arena->stats);
512 arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
513 arena_stats_unlock(tsdn, &arena->stats);
515 arena_nactive_add(arena, udiff >> LG_PAGE);
519 arena_decay_ms_read(arena_decay_t *decay) {
520 return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
524 arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) {
525 atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED);
529 arena_decay_deadline_init(arena_decay_t *decay) {
531 * Generate a new deadline that is uniformly random within the next
532 * epoch after the current one.
534 nstime_copy(&decay->deadline, &decay->epoch);
535 nstime_add(&decay->deadline, &decay->interval);
536 if (arena_decay_ms_read(decay) > 0) {
539 nstime_init(&jitter, prng_range_u64(&decay->jitter_state,
540 nstime_ns(&decay->interval)));
541 nstime_add(&decay->deadline, &jitter);
546 arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) {
547 return (nstime_compare(&decay->deadline, time) <= 0);
551 arena_decay_backlog_npages_limit(const arena_decay_t *decay) {
553 size_t npages_limit_backlog;
557 * For each element of decay_backlog, multiply by the corresponding
558 * fixed-point smoothstep decay factor. Sum the products, then divide
559 * to round down to the nearest whole number of pages.
562 for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
563 sum += decay->backlog[i] * h_steps[i];
565 npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
567 return npages_limit_backlog;
571 arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) {
572 size_t npages_delta = (current_npages > decay->nunpurged) ?
573 current_npages - decay->nunpurged : 0;
574 decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta;
577 if (current_npages > decay->ceil_npages) {
578 decay->ceil_npages = current_npages;
580 size_t npages_limit = arena_decay_backlog_npages_limit(decay);
581 assert(decay->ceil_npages >= npages_limit);
582 if (decay->ceil_npages > npages_limit) {
583 decay->ceil_npages = npages_limit;
589 arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64,
590 size_t current_npages) {
591 if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
592 memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
595 size_t nadvance_z = (size_t)nadvance_u64;
597 assert((uint64_t)nadvance_z == nadvance_u64);
599 memmove(decay->backlog, &decay->backlog[nadvance_z],
600 (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
601 if (nadvance_z > 1) {
602 memset(&decay->backlog[SMOOTHSTEP_NSTEPS -
603 nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
607 arena_decay_backlog_update_last(decay, current_npages);
611 arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
612 extents_t *extents, size_t current_npages, size_t npages_limit,
613 bool is_background_thread) {
614 if (current_npages > npages_limit) {
615 arena_decay_to_limit(tsdn, arena, decay, extents, false,
616 npages_limit, current_npages - npages_limit,
617 is_background_thread);
622 arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time,
623 size_t current_npages) {
624 assert(arena_decay_deadline_reached(decay, time));
627 nstime_copy(&delta, time);
628 nstime_subtract(&delta, &decay->epoch);
630 uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval);
631 assert(nadvance_u64 > 0);
633 /* Add nadvance_u64 decay intervals to epoch. */
634 nstime_copy(&delta, &decay->interval);
635 nstime_imultiply(&delta, nadvance_u64);
636 nstime_add(&decay->epoch, &delta);
638 /* Set a new deadline. */
639 arena_decay_deadline_init(decay);
641 /* Update the backlog. */
642 arena_decay_backlog_update(decay, nadvance_u64, current_npages);
646 arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
647 extents_t *extents, const nstime_t *time, bool is_background_thread) {
648 size_t current_npages = extents_npages_get(extents);
649 arena_decay_epoch_advance_helper(decay, time, current_npages);
651 size_t npages_limit = arena_decay_backlog_npages_limit(decay);
652 /* We may unlock decay->mtx when try_purge(). Finish logging first. */
653 decay->nunpurged = (npages_limit > current_npages) ? npages_limit :
656 if (!background_thread_enabled() || is_background_thread) {
657 arena_decay_try_purge(tsdn, arena, decay, extents,
658 current_npages, npages_limit, is_background_thread);
663 arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) {
664 arena_decay_ms_write(decay, decay_ms);
666 nstime_init(&decay->interval, (uint64_t)decay_ms *
668 nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS);
671 nstime_init(&decay->epoch, 0);
672 nstime_update(&decay->epoch);
673 decay->jitter_state = (uint64_t)(uintptr_t)decay;
674 arena_decay_deadline_init(decay);
675 decay->nunpurged = 0;
676 memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
680 arena_decay_init(arena_decay_t *decay, ssize_t decay_ms,
681 arena_stats_decay_t *stats) {
683 for (size_t i = 0; i < sizeof(arena_decay_t); i++) {
684 assert(((char *)decay)[i] == 0);
686 decay->ceil_npages = 0;
688 if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY,
689 malloc_mutex_rank_exclusive)) {
692 decay->purging = false;
693 arena_decay_reinit(decay, decay_ms);
694 /* Memory is zeroed, so there is no need to clear stats. */
696 decay->stats = stats;
702 arena_decay_ms_valid(ssize_t decay_ms) {
706 if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX *
714 arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
715 extents_t *extents, bool is_background_thread) {
716 malloc_mutex_assert_owner(tsdn, &decay->mtx);
718 /* Purge all or nothing if the option is disabled. */
719 ssize_t decay_ms = arena_decay_ms_read(decay);
722 arena_decay_to_limit(tsdn, arena, decay, extents, false,
723 0, extents_npages_get(extents),
724 is_background_thread);
730 nstime_init(&time, 0);
731 nstime_update(&time);
732 if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time)
735 * Time went backwards. Move the epoch back in time and
736 * generate a new deadline, with the expectation that time
737 * typically flows forward for long enough periods of time that
738 * epochs complete. Unfortunately, this strategy is susceptible
739 * to clock jitter triggering premature epoch advances, but
740 * clock jitter estimation and compensation isn't feasible here
741 * because calls into this code are event-driven.
743 nstime_copy(&decay->epoch, &time);
744 arena_decay_deadline_init(decay);
746 /* Verify that time does not go backwards. */
747 assert(nstime_compare(&decay->epoch, &time) <= 0);
751 * If the deadline has been reached, advance to the current epoch and
752 * purge to the new limit if necessary. Note that dirty pages created
753 * during the current epoch are not subject to purge until a future
754 * epoch, so as a result purging only happens during epoch advances, or
755 * being triggered by background threads (scheduled event).
757 bool advance_epoch = arena_decay_deadline_reached(decay, &time);
759 arena_decay_epoch_advance(tsdn, arena, decay, extents, &time,
760 is_background_thread);
761 } else if (is_background_thread) {
762 arena_decay_try_purge(tsdn, arena, decay, extents,
763 extents_npages_get(extents),
764 arena_decay_backlog_npages_limit(decay),
765 is_background_thread);
768 return advance_epoch;
772 arena_decay_ms_get(arena_decay_t *decay) {
773 return arena_decay_ms_read(decay);
777 arena_dirty_decay_ms_get(arena_t *arena) {
778 return arena_decay_ms_get(&arena->decay_dirty);
782 arena_muzzy_decay_ms_get(arena_t *arena) {
783 return arena_decay_ms_get(&arena->decay_muzzy);
787 arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
788 extents_t *extents, ssize_t decay_ms) {
789 if (!arena_decay_ms_valid(decay_ms)) {
793 malloc_mutex_lock(tsdn, &decay->mtx);
795 * Restart decay backlog from scratch, which may cause many dirty pages
796 * to be immediately purged. It would conceptually be possible to map
797 * the old backlog onto the new backlog, but there is no justification
798 * for such complexity since decay_ms changes are intended to be
799 * infrequent, either between the {-1, 0, >0} states, or a one-time
800 * arbitrary change during initial arena configuration.
802 arena_decay_reinit(decay, decay_ms);
803 arena_maybe_decay(tsdn, arena, decay, extents, false);
804 malloc_mutex_unlock(tsdn, &decay->mtx);
810 arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
812 return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty,
813 &arena->extents_dirty, decay_ms);
817 arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
819 return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy,
820 &arena->extents_muzzy, decay_ms);
824 arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
825 extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit,
826 size_t npages_decay_max, extent_list_t *decay_extents) {
827 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
828 WITNESS_RANK_CORE, 0);
830 /* Stash extents according to npages_limit. */
833 while (nstashed < npages_decay_max &&
834 (extent = extents_evict(tsdn, arena, r_extent_hooks, extents,
835 npages_limit)) != NULL) {
836 extent_list_append(decay_extents, extent);
837 nstashed += extent_size_get(extent) >> LG_PAGE;
843 arena_decay_stashed(tsdn_t *tsdn, arena_t *arena,
844 extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents,
845 bool all, extent_list_t *decay_extents, bool is_background_thread) {
846 size_t nmadvise, nunmapped;
855 ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
856 for (extent_t *extent = extent_list_first(decay_extents); extent !=
857 NULL; extent = extent_list_first(decay_extents)) {
861 size_t npages = extent_size_get(extent) >> LG_PAGE;
863 extent_list_remove(decay_extents, extent);
864 switch (extents_state_get(extents)) {
865 case extent_state_active:
867 case extent_state_dirty:
868 if (!all && muzzy_decay_ms != 0 &&
869 !extent_purge_lazy_wrapper(tsdn, arena,
870 r_extent_hooks, extent, 0,
871 extent_size_get(extent))) {
872 extents_dalloc(tsdn, arena, r_extent_hooks,
873 &arena->extents_muzzy, extent);
874 arena_background_thread_inactivity_check(tsdn,
875 arena, is_background_thread);
879 case extent_state_muzzy:
880 extent_dalloc_wrapper(tsdn, arena, r_extent_hooks,
886 case extent_state_retained:
893 arena_stats_lock(tsdn, &arena->stats);
894 arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge,
896 arena_stats_add_u64(tsdn, &arena->stats,
897 &decay->stats->nmadvise, nmadvise);
898 arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged,
900 arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
901 nunmapped << LG_PAGE);
902 arena_stats_unlock(tsdn, &arena->stats);
909 * npages_limit: Decay at most npages_decay_max pages without violating the
910 * invariant: (extents_npages_get(extents) >= npages_limit). We need an upper
911 * bound on number of pages in order to prevent unbounded growth (namely in
912 * stashed), otherwise unbounded new pages could be added to extents during the
913 * current decay run, so that the purging thread never finishes.
916 arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
917 extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max,
918 bool is_background_thread) {
919 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
920 WITNESS_RANK_CORE, 1);
921 malloc_mutex_assert_owner(tsdn, &decay->mtx);
923 if (decay->purging) {
926 decay->purging = true;
927 malloc_mutex_unlock(tsdn, &decay->mtx);
929 extent_hooks_t *extent_hooks = extent_hooks_get(arena);
931 extent_list_t decay_extents;
932 extent_list_init(&decay_extents);
934 size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents,
935 npages_limit, npages_decay_max, &decay_extents);
937 size_t npurged = arena_decay_stashed(tsdn, arena,
938 &extent_hooks, decay, extents, all, &decay_extents,
939 is_background_thread);
940 assert(npurged == npurge);
943 malloc_mutex_lock(tsdn, &decay->mtx);
944 decay->purging = false;
948 arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
949 extents_t *extents, bool is_background_thread, bool all) {
951 malloc_mutex_lock(tsdn, &decay->mtx);
952 arena_decay_to_limit(tsdn, arena, decay, extents, all, 0,
953 extents_npages_get(extents), is_background_thread);
954 malloc_mutex_unlock(tsdn, &decay->mtx);
959 if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
960 /* No need to wait if another thread is in progress. */
964 bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents,
965 is_background_thread);
967 if (epoch_advanced) {
968 /* Backlog is updated on epoch advance. */
969 npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1];
971 malloc_mutex_unlock(tsdn, &decay->mtx);
973 if (have_background_thread && background_thread_enabled() &&
974 epoch_advanced && !is_background_thread) {
975 background_thread_interval_check(tsdn, arena, decay,
983 arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
985 return arena_decay_impl(tsdn, arena, &arena->decay_dirty,
986 &arena->extents_dirty, is_background_thread, all);
990 arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
992 return arena_decay_impl(tsdn, arena, &arena->decay_muzzy,
993 &arena->extents_muzzy, is_background_thread, all);
997 arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) {
998 if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) {
1001 arena_decay_muzzy(tsdn, arena, is_background_thread, all);
1005 arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
1006 arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
1008 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
1009 arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab);
1013 arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) {
1014 assert(extent_nfree_get(slab) > 0);
1015 extent_heap_insert(&bin->slabs_nonfull, slab);
1017 bin->stats.nonfull_slabs++;
1022 arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) {
1023 extent_heap_remove(&bin->slabs_nonfull, slab);
1025 bin->stats.nonfull_slabs--;
1030 arena_bin_slabs_nonfull_tryget(bin_t *bin) {
1031 extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
1036 bin->stats.reslabs++;
1037 bin->stats.nonfull_slabs--;
1043 arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) {
1044 assert(extent_nfree_get(slab) == 0);
1046 * Tracking extents is required by arena_reset, which is not allowed
1047 * for auto arenas. Bypass this step to avoid touching the extent
1048 * linkage (often results in cache misses) for auto arenas.
1050 if (arena_is_auto(arena)) {
1053 extent_list_append(&bin->slabs_full, slab);
1057 arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) {
1058 if (arena_is_auto(arena)) {
1061 extent_list_remove(&bin->slabs_full, slab);
1065 arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
1068 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1069 if (bin->slabcur != NULL) {
1070 slab = bin->slabcur;
1071 bin->slabcur = NULL;
1072 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1073 arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
1074 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1076 while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != NULL) {
1077 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1078 arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
1079 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1081 for (slab = extent_list_first(&bin->slabs_full); slab != NULL;
1082 slab = extent_list_first(&bin->slabs_full)) {
1083 arena_bin_slabs_full_remove(arena, bin, slab);
1084 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1085 arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
1086 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1089 bin->stats.curregs = 0;
1090 bin->stats.curslabs = 0;
1092 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1096 arena_reset(tsd_t *tsd, arena_t *arena) {
1098 * Locking in this function is unintuitive. The caller guarantees that
1099 * no concurrent operations are happening in this arena, but there are
1100 * still reasons that some locking is necessary:
1102 * - Some of the functions in the transitive closure of calls assume
1103 * appropriate locks are held, and in some cases these locks are
1104 * temporarily dropped to avoid lock order reversal or deadlock due to
1106 * - mallctl("epoch", ...) may concurrently refresh stats. While
1107 * strictly speaking this is a "concurrent operation", disallowing
1108 * stats refreshes would impose an inconvenient burden.
1111 /* Large allocations. */
1112 malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
1114 for (extent_t *extent = extent_list_first(&arena->large); extent !=
1115 NULL; extent = extent_list_first(&arena->large)) {
1116 void *ptr = extent_base_get(extent);
1119 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
1120 alloc_ctx_t alloc_ctx;
1121 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
1122 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
1123 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
1124 assert(alloc_ctx.szind != SC_NSIZES);
1126 if (config_stats || (config_prof && opt_prof)) {
1127 usize = sz_index2size(alloc_ctx.szind);
1128 assert(usize == isalloc(tsd_tsdn(tsd), ptr));
1130 /* Remove large allocation from prof sample set. */
1131 if (config_prof && opt_prof) {
1132 prof_free(tsd, ptr, usize, &alloc_ctx);
1134 large_dalloc(tsd_tsdn(tsd), extent);
1135 malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
1137 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
1140 for (unsigned i = 0; i < SC_NBINS; i++) {
1141 for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
1142 arena_bin_reset(tsd, arena,
1143 &arena->bins[i].bin_shards[j]);
1147 atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
1151 arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
1153 * Iterate over the retained extents and destroy them. This gives the
1154 * extent allocator underlying the extent hooks an opportunity to unmap
1155 * all retained memory without having to keep its own metadata
1156 * structures. In practice, virtual memory for dss-allocated extents is
1157 * leaked here, so best practice is to avoid dss for arenas to be
1158 * destroyed, or provide custom extent hooks that track retained
1159 * dss-based extents for later reuse.
1161 extent_hooks_t *extent_hooks = extent_hooks_get(arena);
1163 while ((extent = extents_evict(tsdn, arena, &extent_hooks,
1164 &arena->extents_retained, 0)) != NULL) {
1165 extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent);
1170 arena_destroy(tsd_t *tsd, arena_t *arena) {
1171 assert(base_ind_get(arena->base) >= narenas_auto);
1172 assert(arena_nthreads_get(arena, false) == 0);
1173 assert(arena_nthreads_get(arena, true) == 0);
1176 * No allocations have occurred since arena_reset() was called.
1177 * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
1178 * extents, so only retained extents may remain.
1180 assert(extents_npages_get(&arena->extents_dirty) == 0);
1181 assert(extents_npages_get(&arena->extents_muzzy) == 0);
1183 /* Deallocate retained memory. */
1184 arena_destroy_retained(tsd_tsdn(tsd), arena);
1187 * Remove the arena pointer from the arenas array. We rely on the fact
1188 * that there is no way for the application to get a dirty read from the
1189 * arenas array unless there is an inherent race in the application
1190 * involving access of an arena being concurrently destroyed. The
1191 * application must synchronize knowledge of the arena's validity, so as
1192 * long as we use an atomic write to update the arenas array, the
1193 * application will get a clean read any time after it synchronizes
1194 * knowledge that the arena is no longer valid.
1196 arena_set(base_ind_get(arena->base), NULL);
1199 * Destroy the base allocator, which manages all metadata ever mapped by
1202 base_delete(tsd_tsdn(tsd), arena->base);
1206 arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
1207 extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info,
1212 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1213 WITNESS_RANK_CORE, 0);
1217 slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL,
1218 bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit);
1220 if (config_stats && slab != NULL) {
1221 arena_stats_mapped_add(tsdn, &arena->stats,
1222 bin_info->slab_size);
1229 arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard,
1230 const bin_info_t *bin_info) {
1231 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1232 WITNESS_RANK_CORE, 0);
1234 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
1235 szind_t szind = sz_size2index(bin_info->reg_size);
1238 extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks,
1239 &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true,
1240 binind, &zero, &commit);
1241 if (slab == NULL && arena_may_have_muzzy(arena)) {
1242 slab = extents_alloc(tsdn, arena, &extent_hooks,
1243 &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE,
1244 true, binind, &zero, &commit);
1247 slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks,
1253 assert(extent_slab_get(slab));
1255 /* Initialize slab internals. */
1256 arena_slab_data_t *slab_data = extent_slab_data_get(slab);
1257 extent_nfree_binshard_set(slab, bin_info->nregs, binshard);
1258 bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
1260 arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
1266 arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
1267 szind_t binind, unsigned binshard) {
1269 const bin_info_t *bin_info;
1271 /* Look for a usable slab. */
1272 slab = arena_bin_slabs_nonfull_tryget(bin);
1276 /* No existing slabs have any space available. */
1278 bin_info = &bin_infos[binind];
1280 /* Allocate a new slab. */
1281 malloc_mutex_unlock(tsdn, &bin->lock);
1282 /******************************/
1283 slab = arena_slab_alloc(tsdn, arena, binind, binshard, bin_info);
1284 /********************************/
1285 malloc_mutex_lock(tsdn, &bin->lock);
1288 bin->stats.nslabs++;
1289 bin->stats.curslabs++;
1295 * arena_slab_alloc() failed, but another thread may have made
1296 * sufficient memory available while this one dropped bin->lock above,
1297 * so search one more time.
1299 slab = arena_bin_slabs_nonfull_tryget(bin);
1307 /* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
1309 arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
1310 szind_t binind, unsigned binshard) {
1311 const bin_info_t *bin_info;
1314 bin_info = &bin_infos[binind];
1315 if (!arena_is_auto(arena) && bin->slabcur != NULL) {
1316 arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1317 bin->slabcur = NULL;
1319 slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind, binshard);
1320 if (bin->slabcur != NULL) {
1322 * Another thread updated slabcur while this one ran without the
1323 * bin lock in arena_bin_nonfull_slab_get().
1325 if (extent_nfree_get(bin->slabcur) > 0) {
1326 void *ret = arena_slab_reg_alloc(bin->slabcur,
1330 * arena_slab_alloc() may have allocated slab,
1331 * or it may have been pulled from
1332 * slabs_nonfull. Therefore it is unsafe to
1333 * make any assumptions about how slab has
1334 * previously been used, and
1335 * arena_bin_lower_slab() must be called, as if
1336 * a region were just deallocated from the slab.
1338 if (extent_nfree_get(slab) == bin_info->nregs) {
1339 arena_dalloc_bin_slab(tsdn, arena, slab,
1342 arena_bin_lower_slab(tsdn, arena, slab,
1349 arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1350 bin->slabcur = NULL;
1356 bin->slabcur = slab;
1358 assert(extent_nfree_get(bin->slabcur) > 0);
1360 return arena_slab_reg_alloc(slab, bin_info);
1363 /* Choose a bin shard and return the locked bin. */
1365 arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind,
1366 unsigned *binshard) {
1368 if (tsdn_null(tsdn) || tsd_arena_get(tsdn_tsd(tsdn)) == NULL) {
1371 *binshard = tsd_binshardsp_get(tsdn_tsd(tsdn))->binshard[binind];
1373 assert(*binshard < bin_infos[binind].n_shards);
1374 bin = &arena->bins[binind].bin_shards[*binshard];
1375 malloc_mutex_lock(tsdn, &bin->lock);
1381 arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
1382 cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) {
1383 unsigned i, nfill, cnt;
1385 assert(tbin->ncached == 0);
1387 if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) {
1392 bin_t *bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard);
1394 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
1395 tcache->lg_fill_div[binind]); i < nfill; i += cnt) {
1397 if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) >
1399 unsigned tofill = nfill - i;
1400 cnt = tofill < extent_nfree_get(slab) ?
1401 tofill : extent_nfree_get(slab);
1402 arena_slab_reg_alloc_batch(
1403 slab, &bin_infos[binind], cnt,
1404 tbin->avail - nfill + i);
1407 void *ptr = arena_bin_malloc_hard(tsdn, arena, bin,
1410 * OOM. tbin->avail isn't yet filled down to its first
1411 * element, so the successful allocations (if any) must
1412 * be moved just before tbin->avail before bailing out.
1416 memmove(tbin->avail - i,
1417 tbin->avail - nfill,
1418 i * sizeof(void *));
1422 /* Insert such that low regions get used first. */
1423 *(tbin->avail - nfill + i) = ptr;
1425 if (config_fill && unlikely(opt_junk_alloc)) {
1426 for (unsigned j = 0; j < cnt; j++) {
1427 void* ptr = *(tbin->avail - nfill + i + j);
1428 arena_alloc_junk_small(ptr, &bin_infos[binind],
1434 bin->stats.nmalloc += i;
1435 bin->stats.nrequests += tbin->tstats.nrequests;
1436 bin->stats.curregs += i;
1437 bin->stats.nfills++;
1438 tbin->tstats.nrequests = 0;
1440 malloc_mutex_unlock(tsdn, &bin->lock);
1442 arena_decay_tick(tsdn, arena);
1446 arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) {
1448 memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size);
1453 arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) {
1454 memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size);
1456 arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small =
1457 arena_dalloc_junk_small_impl;
1460 arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
1466 assert(binind < SC_NBINS);
1467 usize = sz_index2size(binind);
1469 bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard);
1471 if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
1472 ret = arena_slab_reg_alloc(slab, &bin_infos[binind]);
1474 ret = arena_bin_malloc_hard(tsdn, arena, bin, binind, binshard);
1478 malloc_mutex_unlock(tsdn, &bin->lock);
1483 bin->stats.nmalloc++;
1484 bin->stats.nrequests++;
1485 bin->stats.curregs++;
1487 malloc_mutex_unlock(tsdn, &bin->lock);
1488 if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
1494 if (unlikely(opt_junk_alloc)) {
1495 arena_alloc_junk_small(ret,
1496 &bin_infos[binind], false);
1497 } else if (unlikely(opt_zero)) {
1498 memset(ret, 0, usize);
1502 if (config_fill && unlikely(opt_junk_alloc)) {
1503 arena_alloc_junk_small(ret, &bin_infos[binind],
1506 memset(ret, 0, usize);
1509 arena_decay_tick(tsdn, arena);
1514 arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
1516 assert(!tsdn_null(tsdn) || arena != NULL);
1518 if (likely(!tsdn_null(tsdn))) {
1519 arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, size);
1521 if (unlikely(arena == NULL)) {
1525 if (likely(size <= SC_SMALL_MAXCLASS)) {
1526 return arena_malloc_small(tsdn, arena, ind, zero);
1528 return large_malloc(tsdn, arena, sz_index2size(ind), zero);
1532 arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
1533 bool zero, tcache_t *tcache) {
1536 if (usize <= SC_SMALL_MAXCLASS
1537 && (alignment < PAGE
1538 || (alignment == PAGE && (usize & PAGE_MASK) == 0))) {
1539 /* Small; alignment doesn't require special slab placement. */
1540 ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize),
1541 zero, tcache, true);
1543 if (likely(alignment <= CACHELINE)) {
1544 ret = large_malloc(tsdn, arena, usize, zero);
1546 ret = large_palloc(tsdn, arena, usize, alignment, zero);
1553 arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) {
1554 cassert(config_prof);
1555 assert(ptr != NULL);
1556 assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
1557 assert(usize <= SC_SMALL_MAXCLASS);
1559 if (config_opt_safety_checks) {
1560 safety_check_set_redzone(ptr, usize, SC_LARGE_MINCLASS);
1563 rtree_ctx_t rtree_ctx_fallback;
1564 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1566 extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
1567 (uintptr_t)ptr, true);
1568 arena_t *arena = extent_arena_get(extent);
1570 szind_t szind = sz_size2index(usize);
1571 extent_szind_set(extent, szind);
1572 rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
1575 prof_accum_cancel(tsdn, &arena->prof_accum, usize);
1577 assert(isalloc(tsdn, ptr) == usize);
1581 arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
1582 cassert(config_prof);
1583 assert(ptr != NULL);
1585 extent_szind_set(extent, SC_NBINS);
1586 rtree_ctx_t rtree_ctx_fallback;
1587 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1588 rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
1591 assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
1593 return SC_LARGE_MINCLASS;
1597 arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
1599 cassert(config_prof);
1602 extent_t *extent = iealloc(tsdn, ptr);
1603 size_t usize = extent_usize_get(extent);
1604 size_t bumped_usize = arena_prof_demote(tsdn, extent, ptr);
1605 if (config_opt_safety_checks && usize < SC_LARGE_MINCLASS) {
1607 * Currently, we only do redzoning for small sampled
1610 assert(bumped_usize == SC_LARGE_MINCLASS);
1611 safety_check_verify_redzone(ptr, usize, bumped_usize);
1613 if (bumped_usize <= tcache_maxclass && tcache != NULL) {
1614 tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
1615 sz_size2index(bumped_usize), slow_path);
1617 large_dalloc(tsdn, extent);
1622 arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
1623 /* Dissociate slab from bin. */
1624 if (slab == bin->slabcur) {
1625 bin->slabcur = NULL;
1627 szind_t binind = extent_szind_get(slab);
1628 const bin_info_t *bin_info = &bin_infos[binind];
1631 * The following block's conditional is necessary because if the
1632 * slab only contains one region, then it never gets inserted
1633 * into the non-full slabs heap.
1635 if (bin_info->nregs == 1) {
1636 arena_bin_slabs_full_remove(arena, bin, slab);
1638 arena_bin_slabs_nonfull_remove(bin, slab);
1644 arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
1646 assert(slab != bin->slabcur);
1648 malloc_mutex_unlock(tsdn, &bin->lock);
1649 /******************************/
1650 arena_slab_dalloc(tsdn, arena, slab);
1651 /****************************/
1652 malloc_mutex_lock(tsdn, &bin->lock);
1654 bin->stats.curslabs--;
1659 arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
1661 assert(extent_nfree_get(slab) > 0);
1664 * Make sure that if bin->slabcur is non-NULL, it refers to the
1665 * oldest/lowest non-full slab. It is okay to NULL slabcur out rather
1666 * than proactively keeping it pointing at the oldest/lowest non-full
1669 if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) {
1670 /* Switch slabcur. */
1671 if (extent_nfree_get(bin->slabcur) > 0) {
1672 arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
1674 arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1676 bin->slabcur = slab;
1678 bin->stats.reslabs++;
1681 arena_bin_slabs_nonfull_insert(bin, slab);
1686 arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
1687 szind_t binind, extent_t *slab, void *ptr, bool junked) {
1688 arena_slab_data_t *slab_data = extent_slab_data_get(slab);
1689 const bin_info_t *bin_info = &bin_infos[binind];
1691 if (!junked && config_fill && unlikely(opt_junk_free)) {
1692 arena_dalloc_junk_small(ptr, bin_info);
1695 arena_slab_reg_dalloc(slab, slab_data, ptr);
1696 unsigned nfree = extent_nfree_get(slab);
1697 if (nfree == bin_info->nregs) {
1698 arena_dissociate_bin_slab(arena, slab, bin);
1699 arena_dalloc_bin_slab(tsdn, arena, slab, bin);
1700 } else if (nfree == 1 && slab != bin->slabcur) {
1701 arena_bin_slabs_full_remove(arena, bin, slab);
1702 arena_bin_lower_slab(tsdn, arena, slab, bin);
1706 bin->stats.ndalloc++;
1707 bin->stats.curregs--;
1712 arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
1713 szind_t binind, extent_t *extent, void *ptr) {
1714 arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr,
1719 arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) {
1720 szind_t binind = extent_szind_get(extent);
1721 unsigned binshard = extent_binshard_get(extent);
1722 bin_t *bin = &arena->bins[binind].bin_shards[binshard];
1724 malloc_mutex_lock(tsdn, &bin->lock);
1725 arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr,
1727 malloc_mutex_unlock(tsdn, &bin->lock);
1731 arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
1732 extent_t *extent = iealloc(tsdn, ptr);
1733 arena_t *arena = extent_arena_get(extent);
1735 arena_dalloc_bin(tsdn, arena, extent, ptr);
1736 arena_decay_tick(tsdn, arena);
1740 arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
1741 size_t extra, bool zero, size_t *newsize) {
1743 /* Calls with non-zero extra had to clamp extra. */
1744 assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS);
1746 extent_t *extent = iealloc(tsdn, ptr);
1747 if (unlikely(size > SC_LARGE_MAXCLASS)) {
1752 size_t usize_min = sz_s2u(size);
1753 size_t usize_max = sz_s2u(size + extra);
1754 if (likely(oldsize <= SC_SMALL_MAXCLASS && usize_min
1755 <= SC_SMALL_MAXCLASS)) {
1757 * Avoid moving the allocation if the size class can be left the
1760 assert(bin_infos[sz_size2index(oldsize)].reg_size ==
1762 if ((usize_max > SC_SMALL_MAXCLASS
1763 || sz_size2index(usize_max) != sz_size2index(oldsize))
1764 && (size > oldsize || usize_max < oldsize)) {
1769 arena_decay_tick(tsdn, extent_arena_get(extent));
1771 } else if (oldsize >= SC_LARGE_MINCLASS
1772 && usize_max >= SC_LARGE_MINCLASS) {
1773 ret = large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
1779 assert(extent == iealloc(tsdn, ptr));
1780 *newsize = extent_usize_get(extent);
1786 arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
1787 size_t alignment, bool zero, tcache_t *tcache) {
1788 if (alignment == 0) {
1789 return arena_malloc(tsdn, arena, usize, sz_size2index(usize),
1790 zero, tcache, true);
1792 usize = sz_sa2u(usize, alignment);
1793 if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
1796 return ipalloct(tsdn, usize, alignment, zero, tcache, arena);
1800 arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
1801 size_t size, size_t alignment, bool zero, tcache_t *tcache,
1802 hook_ralloc_args_t *hook_args) {
1803 size_t usize = sz_s2u(size);
1804 if (unlikely(usize == 0 || size > SC_LARGE_MAXCLASS)) {
1808 if (likely(usize <= SC_SMALL_MAXCLASS)) {
1809 /* Try to avoid moving the allocation. */
1810 UNUSED size_t newsize;
1811 if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero,
1813 hook_invoke_expand(hook_args->is_realloc
1814 ? hook_expand_realloc : hook_expand_rallocx,
1815 ptr, oldsize, usize, (uintptr_t)ptr,
1821 if (oldsize >= SC_LARGE_MINCLASS
1822 && usize >= SC_LARGE_MINCLASS) {
1823 return large_ralloc(tsdn, arena, ptr, usize,
1824 alignment, zero, tcache, hook_args);
1828 * size and oldsize are different enough that we need to move the
1829 * object. In that case, fall back to allocating new space and copying.
1831 void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment,
1837 hook_invoke_alloc(hook_args->is_realloc
1838 ? hook_alloc_realloc : hook_alloc_rallocx, ret, (uintptr_t)ret,
1840 hook_invoke_dalloc(hook_args->is_realloc
1841 ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
1844 * Junk/zero-filling were already done by
1845 * ipalloc()/arena_malloc().
1847 size_t copysize = (usize < oldsize) ? usize : oldsize;
1848 memcpy(ret, ptr, copysize);
1849 isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
1854 arena_dss_prec_get(arena_t *arena) {
1855 return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE);
1859 arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) {
1861 return (dss_prec != dss_prec_disabled);
1863 atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE);
1868 arena_dirty_decay_ms_default_get(void) {
1869 return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED);
1873 arena_dirty_decay_ms_default_set(ssize_t decay_ms) {
1874 if (!arena_decay_ms_valid(decay_ms)) {
1877 atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED);
1882 arena_muzzy_decay_ms_default_get(void) {
1883 return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED);
1887 arena_muzzy_decay_ms_default_set(ssize_t decay_ms) {
1888 if (!arena_decay_ms_valid(decay_ms)) {
1891 atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED);
1896 arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit,
1897 size_t *new_limit) {
1900 pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0);
1901 if (new_limit != NULL) {
1902 size_t limit = *new_limit;
1903 /* Grow no more than the new limit. */
1904 if ((new_ind = sz_psz2ind(limit + 1) - 1) >= SC_NPSIZES) {
1909 malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
1910 if (old_limit != NULL) {
1911 *old_limit = sz_pind2sz(arena->retain_grow_limit);
1913 if (new_limit != NULL) {
1914 arena->retain_grow_limit = new_ind;
1916 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
1922 arena_nthreads_get(arena_t *arena, bool internal) {
1923 return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED);
1927 arena_nthreads_inc(arena_t *arena, bool internal) {
1928 atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
1932 arena_nthreads_dec(arena_t *arena, bool internal) {
1933 atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
1937 arena_extent_sn_next(arena_t *arena) {
1938 return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED);
1942 arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
1950 base = base_new(tsdn, ind, extent_hooks);
1956 unsigned nbins_total = 0;
1957 for (i = 0; i < SC_NBINS; i++) {
1958 nbins_total += bin_infos[i].n_shards;
1960 size_t arena_size = sizeof(arena_t) + sizeof(bin_t) * nbins_total;
1961 arena = (arena_t *)base_alloc(tsdn, base, arena_size, CACHELINE);
1962 if (arena == NULL) {
1966 atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
1967 atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
1968 arena->last_thd = NULL;
1971 if (arena_stats_init(tsdn, &arena->stats)) {
1975 ql_new(&arena->tcache_ql);
1976 ql_new(&arena->cache_bin_array_descriptor_ql);
1977 if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql",
1978 WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) {
1984 if (prof_accum_init(tsdn, &arena->prof_accum)) {
1989 if (config_cache_oblivious) {
1991 * A nondeterministic seed based on the address of arena reduces
1992 * the likelihood of lockstep non-uniform cache index
1993 * utilization among identical concurrent processes, but at the
1994 * cost of test repeatability. For debug builds, instead use a
1995 * deterministic seed.
1997 atomic_store_zu(&arena->offset_state, config_debug ? ind :
1998 (size_t)(uintptr_t)arena, ATOMIC_RELAXED);
2001 atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED);
2003 atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
2006 atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
2008 extent_list_init(&arena->large);
2009 if (malloc_mutex_init(&arena->large_mtx, "arena_large",
2010 WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
2015 * Delay coalescing for dirty extents despite the disruptive effect on
2016 * memory layout for best-fit extent allocation, since cached extents
2017 * are likely to be reused soon after deallocation, and the cost of
2018 * merging/splitting extents is non-trivial.
2020 if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty,
2025 * Coalesce muzzy extents immediately, because operations on them are in
2026 * the critical path much less often than for dirty extents.
2028 if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy,
2033 * Coalesce retained extents immediately, in part because they will
2034 * never be evicted (and therefore there's no opportunity for delayed
2035 * coalescing), but also because operations on retained extents are not
2036 * in the critical path.
2038 if (extents_init(tsdn, &arena->extents_retained, extent_state_retained,
2043 if (arena_decay_init(&arena->decay_dirty,
2044 arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) {
2047 if (arena_decay_init(&arena->decay_muzzy,
2048 arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) {
2052 arena->extent_grow_next = sz_psz2ind(HUGEPAGE);
2053 arena->retain_grow_limit = sz_psz2ind(SC_LARGE_MAXCLASS);
2054 if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow",
2055 WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
2059 extent_avail_new(&arena->extent_avail);
2060 if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail",
2061 WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) {
2065 /* Initialize bins. */
2066 uintptr_t bin_addr = (uintptr_t)arena + sizeof(arena_t);
2067 atomic_store_u(&arena->binshard_next, 0, ATOMIC_RELEASE);
2068 for (i = 0; i < SC_NBINS; i++) {
2069 unsigned nshards = bin_infos[i].n_shards;
2070 arena->bins[i].bin_shards = (bin_t *)bin_addr;
2071 bin_addr += nshards * sizeof(bin_t);
2072 for (unsigned j = 0; j < nshards; j++) {
2073 bool err = bin_init(&arena->bins[i].bin_shards[j]);
2079 assert(bin_addr == (uintptr_t)arena + arena_size);
2082 /* Set arena before creating background threads. */
2083 arena_set(ind, arena);
2085 nstime_init(&arena->create_time, 0);
2086 nstime_update(&arena->create_time);
2088 /* We don't support reentrancy for arena 0 bootstrapping. */
2091 * If we're here, then arena 0 already exists, so bootstrapping
2092 * is done enough that we should have tsd.
2094 assert(!tsdn_null(tsdn));
2095 pre_reentrancy(tsdn_tsd(tsdn), arena);
2096 if (test_hooks_arena_new_hook) {
2097 test_hooks_arena_new_hook();
2099 post_reentrancy(tsdn_tsd(tsdn));
2105 base_delete(tsdn, base);
2111 arena_choose_huge(tsd_t *tsd) {
2112 /* huge_arena_ind can be 0 during init (will use a0). */
2113 if (huge_arena_ind == 0) {
2114 assert(!malloc_initialized());
2117 arena_t *huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, false);
2118 if (huge_arena == NULL) {
2119 /* Create the huge arena on demand. */
2120 assert(huge_arena_ind != 0);
2121 huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, true);
2122 if (huge_arena == NULL) {
2126 * Purge eagerly for huge allocations, because: 1) number of
2127 * huge allocations is usually small, which means ticker based
2128 * decay is not reliable; and 2) less immediate reuse is
2129 * expected for huge allocations.
2131 if (arena_dirty_decay_ms_default_get() > 0) {
2132 arena_dirty_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0);
2134 if (arena_muzzy_decay_ms_default_get() > 0) {
2135 arena_muzzy_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0);
2143 arena_init_huge(void) {
2146 /* The threshold should be large size class. */
2147 if (opt_oversize_threshold > SC_LARGE_MAXCLASS ||
2148 opt_oversize_threshold < SC_LARGE_MINCLASS) {
2149 opt_oversize_threshold = 0;
2150 oversize_threshold = SC_LARGE_MAXCLASS + PAGE;
2151 huge_enabled = false;
2153 /* Reserve the index for the huge arena. */
2154 huge_arena_ind = narenas_total_get();
2155 oversize_threshold = opt_oversize_threshold;
2156 huge_enabled = true;
2159 return huge_enabled;
2163 arena_is_huge(unsigned arena_ind) {
2164 if (huge_arena_ind == 0) {
2167 return (arena_ind == huge_arena_ind);
2171 arena_boot(sc_data_t *sc_data) {
2172 arena_dirty_decay_ms_default_set(opt_dirty_decay_ms);
2173 arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms);
2174 for (unsigned i = 0; i < SC_NBINS; i++) {
2175 sc_t *sc = &sc_data->sc[i];
2176 div_init(&arena_binind_div_info[i],
2177 (1U << sc->lg_base) + (sc->ndelta << sc->lg_delta));
2182 arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
2183 malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx);
2184 malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx);
2188 arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
2190 malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx);
2195 arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
2196 malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx);
2200 arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
2201 extents_prefork(tsdn, &arena->extents_dirty);
2202 extents_prefork(tsdn, &arena->extents_muzzy);
2203 extents_prefork(tsdn, &arena->extents_retained);
2207 arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
2208 malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx);
2212 arena_prefork5(tsdn_t *tsdn, arena_t *arena) {
2213 base_prefork(tsdn, arena->base);
2217 arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
2218 malloc_mutex_prefork(tsdn, &arena->large_mtx);
2222 arena_prefork7(tsdn_t *tsdn, arena_t *arena) {
2223 for (unsigned i = 0; i < SC_NBINS; i++) {
2224 for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
2225 bin_prefork(tsdn, &arena->bins[i].bin_shards[j]);
2231 arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
2234 for (i = 0; i < SC_NBINS; i++) {
2235 for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
2236 bin_postfork_parent(tsdn,
2237 &arena->bins[i].bin_shards[j]);
2240 malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
2241 base_postfork_parent(tsdn, arena->base);
2242 malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx);
2243 extents_postfork_parent(tsdn, &arena->extents_dirty);
2244 extents_postfork_parent(tsdn, &arena->extents_muzzy);
2245 extents_postfork_parent(tsdn, &arena->extents_retained);
2246 malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx);
2247 malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx);
2248 malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx);
2250 malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx);
2255 arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
2258 atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
2259 atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
2260 if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) {
2261 arena_nthreads_inc(arena, false);
2263 if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) {
2264 arena_nthreads_inc(arena, true);
2267 ql_new(&arena->tcache_ql);
2268 ql_new(&arena->cache_bin_array_descriptor_ql);
2269 tcache_t *tcache = tcache_get(tsdn_tsd(tsdn));
2270 if (tcache != NULL && tcache->arena == arena) {
2271 ql_elm_new(tcache, link);
2272 ql_tail_insert(&arena->tcache_ql, tcache, link);
2273 cache_bin_array_descriptor_init(
2274 &tcache->cache_bin_array_descriptor,
2275 tcache->bins_small, tcache->bins_large);
2276 ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
2277 &tcache->cache_bin_array_descriptor, link);
2281 for (i = 0; i < SC_NBINS; i++) {
2282 for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
2283 bin_postfork_child(tsdn, &arena->bins[i].bin_shards[j]);
2286 malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
2287 base_postfork_child(tsdn, arena->base);
2288 malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx);
2289 extents_postfork_child(tsdn, &arena->extents_dirty);
2290 extents_postfork_child(tsdn, &arena->extents_muzzy);
2291 extents_postfork_child(tsdn, &arena->extents_retained);
2292 malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx);
2293 malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx);
2294 malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx);
2296 malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx);