1 #define JEMALLOC_ARENA_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/extent_dss.h"
7 #include "jemalloc/internal/extent_mmap.h"
8 #include "jemalloc/internal/mutex.h"
9 #include "jemalloc/internal/rtree.h"
10 #include "jemalloc/internal/size_classes.h"
11 #include "jemalloc/internal/util.h"
13 /******************************************************************************/
17 * Define names for both unininitialized and initialized phases, so that
18 * options and mallctl processing are straightforward.
20 const char *percpu_arena_mode_names[] = {
27 percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT;
29 ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT;
30 ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
32 static atomic_zd_t dirty_decay_ms_default;
33 static atomic_zd_t muzzy_decay_ms_default;
35 const arena_bin_info_t arena_bin_info[NBINS] = {
36 #define BIN_INFO_bin_yes(reg_size, slab_size, nregs) \
37 {reg_size, slab_size, nregs, BITMAP_INFO_INITIALIZER(nregs)},
38 #define BIN_INFO_bin_no(reg_size, slab_size, nregs)
39 #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \
41 BIN_INFO_bin_##bin((1U<<lg_grp) + (ndelta<<lg_delta), \
42 (pgs << LG_PAGE), (pgs << LG_PAGE) / ((1U<<lg_grp) + \
45 #undef BIN_INFO_bin_yes
46 #undef BIN_INFO_bin_no
50 const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
51 #define STEP(step, h, x, y) \
57 /******************************************************************************/
59 * Function prototypes for static functions that are referenced prior to
63 static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
64 arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit,
65 bool is_background_thread);
66 static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
67 bool is_background_thread, bool all);
68 static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
70 static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
73 /******************************************************************************/
76 arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) {
78 for (size_t i = 0; i < sizeof(arena_stats_t); i++) {
79 assert(((char *)arena_stats)[i] == 0);
82 #ifndef JEMALLOC_ATOMIC_U64
83 if (malloc_mutex_init(&arena_stats->mtx, "arena_stats",
84 WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) {
88 /* Memory is zeroed, so there is no need to clear stats. */
93 arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
94 #ifndef JEMALLOC_ATOMIC_U64
95 malloc_mutex_lock(tsdn, &arena_stats->mtx);
100 arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
101 #ifndef JEMALLOC_ATOMIC_U64
102 malloc_mutex_unlock(tsdn, &arena_stats->mtx);
107 arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
108 arena_stats_u64_t *p) {
109 #ifdef JEMALLOC_ATOMIC_U64
110 return atomic_load_u64(p, ATOMIC_RELAXED);
112 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
118 arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
119 arena_stats_u64_t *p, uint64_t x) {
120 #ifdef JEMALLOC_ATOMIC_U64
121 atomic_fetch_add_u64(p, x, ATOMIC_RELAXED);
123 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
129 arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
130 arena_stats_u64_t *p, uint64_t x) {
131 #ifdef JEMALLOC_ATOMIC_U64
132 UNUSED uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED);
135 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
137 assert(*p + x >= *p);
142 * Non-atomically sets *dst += src. *dst needs external synchronization.
143 * This lets us avoid the cost of a fetch_add when its unnecessary (note that
144 * the types here are atomic).
147 arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) {
148 #ifdef JEMALLOC_ATOMIC_U64
149 uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
150 atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED);
157 arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) {
158 #ifdef JEMALLOC_ATOMIC_U64
159 return atomic_load_zu(p, ATOMIC_RELAXED);
161 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
162 return atomic_load_zu(p, ATOMIC_RELAXED);
167 arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
169 #ifdef JEMALLOC_ATOMIC_U64
170 atomic_fetch_add_zu(p, x, ATOMIC_RELAXED);
172 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
173 size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
174 atomic_store_zu(p, cur + x, ATOMIC_RELAXED);
179 arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
181 #ifdef JEMALLOC_ATOMIC_U64
182 UNUSED size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED);
185 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
186 size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
187 atomic_store_zu(p, cur - x, ATOMIC_RELAXED);
191 /* Like the _u64 variant, needs an externally synchronized *dst. */
193 arena_stats_accum_zu(atomic_zu_t *dst, size_t src) {
194 size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
195 atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED);
199 arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
200 szind_t szind, uint64_t nrequests) {
201 arena_stats_lock(tsdn, arena_stats);
202 arena_stats_add_u64(tsdn, arena_stats, &arena_stats->lstats[szind -
203 NBINS].nrequests, nrequests);
204 arena_stats_unlock(tsdn, arena_stats);
208 arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) {
209 arena_stats_lock(tsdn, arena_stats);
210 arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size);
211 arena_stats_unlock(tsdn, arena_stats);
215 arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
216 const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
217 size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
218 *nthreads += arena_nthreads_get(arena, false);
219 *dss = dss_prec_names[arena_dss_prec_get(arena)];
220 *dirty_decay_ms = arena_dirty_decay_ms_get(arena);
221 *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
222 *nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED);
223 *ndirty += extents_npages_get(&arena->extents_dirty);
224 *nmuzzy += extents_npages_get(&arena->extents_muzzy);
228 arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
229 const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
230 size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
231 malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats) {
232 cassert(config_stats);
234 arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms,
235 muzzy_decay_ms, nactive, ndirty, nmuzzy);
237 size_t base_allocated, base_resident, base_mapped;
238 base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
241 arena_stats_lock(tsdn, &arena->stats);
243 arena_stats_accum_zu(&astats->mapped, base_mapped
244 + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped));
245 arena_stats_accum_zu(&astats->retained,
246 extents_npages_get(&arena->extents_retained) << LG_PAGE);
248 arena_stats_accum_u64(&astats->decay_dirty.npurge,
249 arena_stats_read_u64(tsdn, &arena->stats,
250 &arena->stats.decay_dirty.npurge));
251 arena_stats_accum_u64(&astats->decay_dirty.nmadvise,
252 arena_stats_read_u64(tsdn, &arena->stats,
253 &arena->stats.decay_dirty.nmadvise));
254 arena_stats_accum_u64(&astats->decay_dirty.purged,
255 arena_stats_read_u64(tsdn, &arena->stats,
256 &arena->stats.decay_dirty.purged));
258 arena_stats_accum_u64(&astats->decay_muzzy.npurge,
259 arena_stats_read_u64(tsdn, &arena->stats,
260 &arena->stats.decay_muzzy.npurge));
261 arena_stats_accum_u64(&astats->decay_muzzy.nmadvise,
262 arena_stats_read_u64(tsdn, &arena->stats,
263 &arena->stats.decay_muzzy.nmadvise));
264 arena_stats_accum_u64(&astats->decay_muzzy.purged,
265 arena_stats_read_u64(tsdn, &arena->stats,
266 &arena->stats.decay_muzzy.purged));
268 arena_stats_accum_zu(&astats->base, base_allocated);
269 arena_stats_accum_zu(&astats->internal, arena_internal_get(arena));
270 arena_stats_accum_zu(&astats->resident, base_resident +
271 (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) +
272 extents_npages_get(&arena->extents_dirty) +
273 extents_npages_get(&arena->extents_muzzy)) << LG_PAGE)));
275 for (szind_t i = 0; i < NSIZES - NBINS; i++) {
276 uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats,
277 &arena->stats.lstats[i].nmalloc);
278 arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc);
279 arena_stats_accum_u64(&astats->nmalloc_large, nmalloc);
281 uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats,
282 &arena->stats.lstats[i].ndalloc);
283 arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc);
284 arena_stats_accum_u64(&astats->ndalloc_large, ndalloc);
286 uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats,
287 &arena->stats.lstats[i].nrequests);
288 arena_stats_accum_u64(&lstats[i].nrequests,
289 nmalloc + nrequests);
290 arena_stats_accum_u64(&astats->nrequests_large,
291 nmalloc + nrequests);
293 assert(nmalloc >= ndalloc);
294 assert(nmalloc - ndalloc <= SIZE_T_MAX);
295 size_t curlextents = (size_t)(nmalloc - ndalloc);
296 lstats[i].curlextents += curlextents;
297 arena_stats_accum_zu(&astats->allocated_large,
298 curlextents * sz_index2size(NBINS + i));
301 arena_stats_unlock(tsdn, &arena->stats);
303 /* tcache_bytes counts currently cached bytes. */
304 atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED);
305 malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
307 ql_foreach(tcache, &arena->tcache_ql, link) {
309 for (; i < NBINS; i++) {
310 tcache_bin_t *tbin = tcache_small_bin_get(tcache, i);
311 arena_stats_accum_zu(&astats->tcache_bytes,
312 tbin->ncached * sz_index2size(i));
314 for (; i < nhbins; i++) {
315 tcache_bin_t *tbin = tcache_large_bin_get(tcache, i);
316 arena_stats_accum_zu(&astats->tcache_bytes,
317 tbin->ncached * sz_index2size(i));
320 malloc_mutex_prof_read(tsdn,
321 &astats->mutex_prof_data[arena_prof_mutex_tcache_list],
322 &arena->tcache_ql_mtx);
323 malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
325 #define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \
326 malloc_mutex_lock(tsdn, &arena->mtx); \
327 malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \
329 malloc_mutex_unlock(tsdn, &arena->mtx);
331 /* Gather per arena mutex profiling data. */
332 READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
333 READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx,
334 arena_prof_mutex_extent_avail)
335 READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx,
336 arena_prof_mutex_extents_dirty)
337 READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx,
338 arena_prof_mutex_extents_muzzy)
339 READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx,
340 arena_prof_mutex_extents_retained)
341 READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx,
342 arena_prof_mutex_decay_dirty)
343 READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx,
344 arena_prof_mutex_decay_muzzy)
345 READ_ARENA_MUTEX_PROF_DATA(base->mtx,
346 arena_prof_mutex_base)
347 #undef READ_ARENA_MUTEX_PROF_DATA
349 nstime_copy(&astats->uptime, &arena->create_time);
350 nstime_update(&astats->uptime);
351 nstime_subtract(&astats->uptime, &arena->create_time);
353 for (szind_t i = 0; i < NBINS; i++) {
354 arena_bin_t *bin = &arena->bins[i];
356 malloc_mutex_lock(tsdn, &bin->lock);
357 malloc_mutex_prof_read(tsdn, &bstats[i].mutex_data, &bin->lock);
358 bstats[i].nmalloc += bin->stats.nmalloc;
359 bstats[i].ndalloc += bin->stats.ndalloc;
360 bstats[i].nrequests += bin->stats.nrequests;
361 bstats[i].curregs += bin->stats.curregs;
362 bstats[i].nfills += bin->stats.nfills;
363 bstats[i].nflushes += bin->stats.nflushes;
364 bstats[i].nslabs += bin->stats.nslabs;
365 bstats[i].reslabs += bin->stats.reslabs;
366 bstats[i].curslabs += bin->stats.curslabs;
367 malloc_mutex_unlock(tsdn, &bin->lock);
372 arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
373 extent_hooks_t **r_extent_hooks, extent_t *extent) {
374 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
375 WITNESS_RANK_CORE, 0);
377 extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty,
379 if (arena_dirty_decay_ms_get(arena) == 0) {
380 arena_decay_dirty(tsdn, arena, false, true);
382 arena_background_thread_inactivity_check(tsdn, arena, false);
387 arena_slab_reg_alloc(tsdn_t *tsdn, extent_t *slab,
388 const arena_bin_info_t *bin_info) {
390 arena_slab_data_t *slab_data = extent_slab_data_get(slab);
393 assert(extent_nfree_get(slab) > 0);
394 assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
396 regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
397 ret = (void *)((uintptr_t)extent_addr_get(slab) +
398 (uintptr_t)(bin_info->reg_size * regind));
399 extent_nfree_dec(slab);
407 arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
410 /* Freeing a pointer outside the slab can cause assertion failure. */
411 assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab));
412 assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab));
413 /* Freeing an interior pointer can cause assertion failure. */
414 assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) %
415 (uintptr_t)arena_bin_info[binind].reg_size == 0);
417 /* Avoid doing division with a variable divisor. */
418 diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
420 #define REGIND_bin_yes(index, reg_size) \
422 regind = diff / (reg_size); \
423 assert(diff == regind * (reg_size)); \
425 #define REGIND_bin_no(index, reg_size)
426 #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \
428 REGIND_bin_##bin(index, (1U<<lg_grp) + (ndelta<<lg_delta))
430 #undef REGIND_bin_yes
433 default: not_reached();
436 assert(regind < arena_bin_info[binind].nregs);
442 arena_slab_reg_dalloc(tsdn_t *tsdn, extent_t *slab,
443 arena_slab_data_t *slab_data, void *ptr) {
444 szind_t binind = extent_szind_get(slab);
445 const arena_bin_info_t *bin_info = &arena_bin_info[binind];
446 size_t regind = arena_slab_regind(slab, binind, ptr);
448 assert(extent_nfree_get(slab) < bin_info->nregs);
449 /* Freeing an unallocated pointer can cause assertion failure. */
450 assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
452 bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
453 extent_nfree_inc(slab);
457 arena_nactive_add(arena_t *arena, size_t add_pages) {
458 atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED);
462 arena_nactive_sub(arena_t *arena, size_t sub_pages) {
463 assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages);
464 atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED);
468 arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
469 szind_t index, hindex;
471 cassert(config_stats);
473 if (usize < LARGE_MINCLASS) {
474 usize = LARGE_MINCLASS;
476 index = sz_size2index(usize);
477 hindex = (index >= NBINS) ? index - NBINS : 0;
479 arena_stats_add_u64(tsdn, &arena->stats,
480 &arena->stats.lstats[hindex].nmalloc, 1);
484 arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
485 szind_t index, hindex;
487 cassert(config_stats);
489 if (usize < LARGE_MINCLASS) {
490 usize = LARGE_MINCLASS;
492 index = sz_size2index(usize);
493 hindex = (index >= NBINS) ? index - NBINS : 0;
495 arena_stats_add_u64(tsdn, &arena->stats,
496 &arena->stats.lstats[hindex].ndalloc, 1);
500 arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize,
502 arena_large_dalloc_stats_update(tsdn, arena, oldusize);
503 arena_large_malloc_stats_update(tsdn, arena, usize);
507 arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
508 size_t alignment, bool *zero) {
509 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
511 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
512 WITNESS_RANK_CORE, 0);
514 szind_t szind = sz_size2index(usize);
517 extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks,
518 &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false,
519 szind, zero, &commit);
520 if (extent == NULL) {
521 extent = extents_alloc(tsdn, arena, &extent_hooks,
522 &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment,
523 false, szind, zero, &commit);
525 size_t size = usize + sz_large_pad;
526 if (extent == NULL) {
527 extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL,
528 usize, sz_large_pad, alignment, false, szind, zero,
532 * extent may be NULL on OOM, but in that case
533 * mapped_add isn't used below, so there's no need to
534 * conditionlly set it to 0 here.
538 } else if (config_stats) {
542 if (extent != NULL) {
544 arena_stats_lock(tsdn, &arena->stats);
545 arena_large_malloc_stats_update(tsdn, arena, usize);
546 if (mapped_add != 0) {
547 arena_stats_add_zu(tsdn, &arena->stats,
548 &arena->stats.mapped, mapped_add);
550 arena_stats_unlock(tsdn, &arena->stats);
552 arena_nactive_add(arena, size >> LG_PAGE);
559 arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
561 arena_stats_lock(tsdn, &arena->stats);
562 arena_large_dalloc_stats_update(tsdn, arena,
563 extent_usize_get(extent));
564 arena_stats_unlock(tsdn, &arena->stats);
566 arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
570 arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
572 size_t usize = extent_usize_get(extent);
573 size_t udiff = oldusize - usize;
576 arena_stats_lock(tsdn, &arena->stats);
577 arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
578 arena_stats_unlock(tsdn, &arena->stats);
580 arena_nactive_sub(arena, udiff >> LG_PAGE);
584 arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
586 size_t usize = extent_usize_get(extent);
587 size_t udiff = usize - oldusize;
590 arena_stats_lock(tsdn, &arena->stats);
591 arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
592 arena_stats_unlock(tsdn, &arena->stats);
594 arena_nactive_add(arena, udiff >> LG_PAGE);
598 arena_decay_ms_read(arena_decay_t *decay) {
599 return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
603 arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) {
604 atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED);
608 arena_decay_deadline_init(arena_decay_t *decay) {
610 * Generate a new deadline that is uniformly random within the next
611 * epoch after the current one.
613 nstime_copy(&decay->deadline, &decay->epoch);
614 nstime_add(&decay->deadline, &decay->interval);
615 if (arena_decay_ms_read(decay) > 0) {
618 nstime_init(&jitter, prng_range_u64(&decay->jitter_state,
619 nstime_ns(&decay->interval)));
620 nstime_add(&decay->deadline, &jitter);
625 arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) {
626 return (nstime_compare(&decay->deadline, time) <= 0);
630 arena_decay_backlog_npages_limit(const arena_decay_t *decay) {
632 size_t npages_limit_backlog;
636 * For each element of decay_backlog, multiply by the corresponding
637 * fixed-point smoothstep decay factor. Sum the products, then divide
638 * to round down to the nearest whole number of pages.
641 for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
642 sum += decay->backlog[i] * h_steps[i];
644 npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
646 return npages_limit_backlog;
650 arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) {
651 size_t npages_delta = (current_npages > decay->nunpurged) ?
652 current_npages - decay->nunpurged : 0;
653 decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta;
656 if (current_npages > decay->ceil_npages) {
657 decay->ceil_npages = current_npages;
659 size_t npages_limit = arena_decay_backlog_npages_limit(decay);
660 assert(decay->ceil_npages >= npages_limit);
661 if (decay->ceil_npages > npages_limit) {
662 decay->ceil_npages = npages_limit;
668 arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64,
669 size_t current_npages) {
670 if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
671 memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
674 size_t nadvance_z = (size_t)nadvance_u64;
676 assert((uint64_t)nadvance_z == nadvance_u64);
678 memmove(decay->backlog, &decay->backlog[nadvance_z],
679 (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
680 if (nadvance_z > 1) {
681 memset(&decay->backlog[SMOOTHSTEP_NSTEPS -
682 nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
686 arena_decay_backlog_update_last(decay, current_npages);
690 arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
691 extents_t *extents, size_t current_npages, size_t npages_limit,
692 bool is_background_thread) {
693 if (current_npages > npages_limit) {
694 arena_decay_to_limit(tsdn, arena, decay, extents, false,
695 npages_limit, is_background_thread);
700 arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time,
701 size_t current_npages) {
702 assert(arena_decay_deadline_reached(decay, time));
705 nstime_copy(&delta, time);
706 nstime_subtract(&delta, &decay->epoch);
708 uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval);
709 assert(nadvance_u64 > 0);
711 /* Add nadvance_u64 decay intervals to epoch. */
712 nstime_copy(&delta, &decay->interval);
713 nstime_imultiply(&delta, nadvance_u64);
714 nstime_add(&decay->epoch, &delta);
716 /* Set a new deadline. */
717 arena_decay_deadline_init(decay);
719 /* Update the backlog. */
720 arena_decay_backlog_update(decay, nadvance_u64, current_npages);
724 arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
725 extents_t *extents, const nstime_t *time, bool is_background_thread) {
726 size_t current_npages = extents_npages_get(extents);
727 arena_decay_epoch_advance_helper(decay, time, current_npages);
729 size_t npages_limit = arena_decay_backlog_npages_limit(decay);
730 /* We may unlock decay->mtx when try_purge(). Finish logging first. */
731 decay->nunpurged = (npages_limit > current_npages) ? npages_limit :
734 if (!background_thread_enabled() || is_background_thread) {
735 arena_decay_try_purge(tsdn, arena, decay, extents,
736 current_npages, npages_limit, is_background_thread);
741 arena_decay_reinit(arena_decay_t *decay, extents_t *extents, ssize_t decay_ms) {
742 arena_decay_ms_write(decay, decay_ms);
744 nstime_init(&decay->interval, (uint64_t)decay_ms *
746 nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS);
749 nstime_init(&decay->epoch, 0);
750 nstime_update(&decay->epoch);
751 decay->jitter_state = (uint64_t)(uintptr_t)decay;
752 arena_decay_deadline_init(decay);
753 decay->nunpurged = 0;
754 memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
758 arena_decay_init(arena_decay_t *decay, extents_t *extents, ssize_t decay_ms,
759 decay_stats_t *stats) {
761 for (size_t i = 0; i < sizeof(arena_decay_t); i++) {
762 assert(((char *)decay)[i] == 0);
764 decay->ceil_npages = 0;
766 if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY,
767 malloc_mutex_rank_exclusive)) {
770 decay->purging = false;
771 arena_decay_reinit(decay, extents, decay_ms);
772 /* Memory is zeroed, so there is no need to clear stats. */
774 decay->stats = stats;
780 arena_decay_ms_valid(ssize_t decay_ms) {
784 if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX *
792 arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
793 extents_t *extents, bool is_background_thread) {
794 malloc_mutex_assert_owner(tsdn, &decay->mtx);
796 /* Purge all or nothing if the option is disabled. */
797 ssize_t decay_ms = arena_decay_ms_read(decay);
800 arena_decay_to_limit(tsdn, arena, decay, extents, false,
801 0, is_background_thread);
807 nstime_init(&time, 0);
808 nstime_update(&time);
809 if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time)
812 * Time went backwards. Move the epoch back in time and
813 * generate a new deadline, with the expectation that time
814 * typically flows forward for long enough periods of time that
815 * epochs complete. Unfortunately, this strategy is susceptible
816 * to clock jitter triggering premature epoch advances, but
817 * clock jitter estimation and compensation isn't feasible here
818 * because calls into this code are event-driven.
820 nstime_copy(&decay->epoch, &time);
821 arena_decay_deadline_init(decay);
823 /* Verify that time does not go backwards. */
824 assert(nstime_compare(&decay->epoch, &time) <= 0);
828 * If the deadline has been reached, advance to the current epoch and
829 * purge to the new limit if necessary. Note that dirty pages created
830 * during the current epoch are not subject to purge until a future
831 * epoch, so as a result purging only happens during epoch advances, or
832 * being triggered by background threads (scheduled event).
834 bool advance_epoch = arena_decay_deadline_reached(decay, &time);
836 arena_decay_epoch_advance(tsdn, arena, decay, extents, &time,
837 is_background_thread);
838 } else if (is_background_thread) {
839 arena_decay_try_purge(tsdn, arena, decay, extents,
840 extents_npages_get(extents),
841 arena_decay_backlog_npages_limit(decay),
842 is_background_thread);
845 return advance_epoch;
849 arena_decay_ms_get(arena_decay_t *decay) {
850 return arena_decay_ms_read(decay);
854 arena_dirty_decay_ms_get(arena_t *arena) {
855 return arena_decay_ms_get(&arena->decay_dirty);
859 arena_muzzy_decay_ms_get(arena_t *arena) {
860 return arena_decay_ms_get(&arena->decay_muzzy);
864 arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
865 extents_t *extents, ssize_t decay_ms) {
866 if (!arena_decay_ms_valid(decay_ms)) {
870 malloc_mutex_lock(tsdn, &decay->mtx);
872 * Restart decay backlog from scratch, which may cause many dirty pages
873 * to be immediately purged. It would conceptually be possible to map
874 * the old backlog onto the new backlog, but there is no justification
875 * for such complexity since decay_ms changes are intended to be
876 * infrequent, either between the {-1, 0, >0} states, or a one-time
877 * arbitrary change during initial arena configuration.
879 arena_decay_reinit(decay, extents, decay_ms);
880 arena_maybe_decay(tsdn, arena, decay, extents, false);
881 malloc_mutex_unlock(tsdn, &decay->mtx);
887 arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
889 return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty,
890 &arena->extents_dirty, decay_ms);
894 arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
896 return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy,
897 &arena->extents_muzzy, decay_ms);
901 arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
902 extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit,
903 extent_list_t *decay_extents) {
904 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
905 WITNESS_RANK_CORE, 0);
907 /* Stash extents according to npages_limit. */
910 while ((extent = extents_evict(tsdn, arena, r_extent_hooks, extents,
911 npages_limit)) != NULL) {
912 extent_list_append(decay_extents, extent);
913 nstashed += extent_size_get(extent) >> LG_PAGE;
919 arena_decay_stashed(tsdn_t *tsdn, arena_t *arena,
920 extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents,
921 bool all, extent_list_t *decay_extents, bool is_background_thread) {
922 UNUSED size_t nmadvise, nunmapped;
931 ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
932 for (extent_t *extent = extent_list_first(decay_extents); extent !=
933 NULL; extent = extent_list_first(decay_extents)) {
937 size_t npages = extent_size_get(extent) >> LG_PAGE;
939 extent_list_remove(decay_extents, extent);
940 switch (extents_state_get(extents)) {
941 case extent_state_active:
943 case extent_state_dirty:
944 if (!all && muzzy_decay_ms != 0 &&
945 !extent_purge_lazy_wrapper(tsdn, arena,
946 r_extent_hooks, extent, 0,
947 extent_size_get(extent))) {
948 extents_dalloc(tsdn, arena, r_extent_hooks,
949 &arena->extents_muzzy, extent);
950 arena_background_thread_inactivity_check(tsdn,
951 arena, is_background_thread);
955 case extent_state_muzzy:
956 extent_dalloc_wrapper(tsdn, arena, r_extent_hooks,
962 case extent_state_retained:
969 arena_stats_lock(tsdn, &arena->stats);
970 arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge,
972 arena_stats_add_u64(tsdn, &arena->stats,
973 &decay->stats->nmadvise, nmadvise);
974 arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged,
976 arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
977 nunmapped << LG_PAGE);
978 arena_stats_unlock(tsdn, &arena->stats);
985 * npages_limit: Decay as many dirty extents as possible without violating the
986 * invariant: (extents_npages_get(extents) >= npages_limit)
989 arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
990 extents_t *extents, bool all, size_t npages_limit,
991 bool is_background_thread) {
992 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
993 WITNESS_RANK_CORE, 1);
994 malloc_mutex_assert_owner(tsdn, &decay->mtx);
996 if (decay->purging) {
999 decay->purging = true;
1000 malloc_mutex_unlock(tsdn, &decay->mtx);
1002 extent_hooks_t *extent_hooks = extent_hooks_get(arena);
1004 extent_list_t decay_extents;
1005 extent_list_init(&decay_extents);
1007 size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents,
1008 npages_limit, &decay_extents);
1010 UNUSED size_t npurged = arena_decay_stashed(tsdn, arena,
1011 &extent_hooks, decay, extents, all, &decay_extents,
1012 is_background_thread);
1013 assert(npurged == npurge);
1016 malloc_mutex_lock(tsdn, &decay->mtx);
1017 decay->purging = false;
1021 arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
1022 extents_t *extents, bool is_background_thread, bool all) {
1024 malloc_mutex_lock(tsdn, &decay->mtx);
1025 arena_decay_to_limit(tsdn, arena, decay, extents, all, 0,
1026 is_background_thread);
1027 malloc_mutex_unlock(tsdn, &decay->mtx);
1032 if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
1033 /* No need to wait if another thread is in progress. */
1037 bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents,
1038 is_background_thread);
1040 if (epoch_advanced) {
1041 /* Backlog is updated on epoch advance. */
1042 npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1];
1044 malloc_mutex_unlock(tsdn, &decay->mtx);
1046 if (have_background_thread && background_thread_enabled() &&
1047 epoch_advanced && !is_background_thread) {
1048 background_thread_interval_check(tsdn, arena, decay, npages_new);
1055 arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
1057 return arena_decay_impl(tsdn, arena, &arena->decay_dirty,
1058 &arena->extents_dirty, is_background_thread, all);
1062 arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
1064 return arena_decay_impl(tsdn, arena, &arena->decay_muzzy,
1065 &arena->extents_muzzy, is_background_thread, all);
1069 arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) {
1070 if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) {
1073 arena_decay_muzzy(tsdn, arena, is_background_thread, all);
1077 arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
1078 arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
1080 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
1081 arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab);
1085 arena_bin_slabs_nonfull_insert(arena_bin_t *bin, extent_t *slab) {
1086 assert(extent_nfree_get(slab) > 0);
1087 extent_heap_insert(&bin->slabs_nonfull, slab);
1091 arena_bin_slabs_nonfull_remove(arena_bin_t *bin, extent_t *slab) {
1092 extent_heap_remove(&bin->slabs_nonfull, slab);
1096 arena_bin_slabs_nonfull_tryget(arena_bin_t *bin) {
1097 extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
1102 bin->stats.reslabs++;
1108 arena_bin_slabs_full_insert(arena_t *arena, arena_bin_t *bin, extent_t *slab) {
1109 assert(extent_nfree_get(slab) == 0);
1111 * Tracking extents is required by arena_reset, which is not allowed
1112 * for auto arenas. Bypass this step to avoid touching the extent
1113 * linkage (often results in cache misses) for auto arenas.
1115 if (arena_is_auto(arena)) {
1118 extent_list_append(&bin->slabs_full, slab);
1122 arena_bin_slabs_full_remove(arena_t *arena, arena_bin_t *bin, extent_t *slab) {
1123 if (arena_is_auto(arena)) {
1126 extent_list_remove(&bin->slabs_full, slab);
1130 arena_reset(tsd_t *tsd, arena_t *arena) {
1132 * Locking in this function is unintuitive. The caller guarantees that
1133 * no concurrent operations are happening in this arena, but there are
1134 * still reasons that some locking is necessary:
1136 * - Some of the functions in the transitive closure of calls assume
1137 * appropriate locks are held, and in some cases these locks are
1138 * temporarily dropped to avoid lock order reversal or deadlock due to
1140 * - mallctl("epoch", ...) may concurrently refresh stats. While
1141 * strictly speaking this is a "concurrent operation", disallowing
1142 * stats refreshes would impose an inconvenient burden.
1145 /* Large allocations. */
1146 malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
1148 for (extent_t *extent = extent_list_first(&arena->large); extent !=
1149 NULL; extent = extent_list_first(&arena->large)) {
1150 void *ptr = extent_base_get(extent);
1153 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
1154 alloc_ctx_t alloc_ctx;
1155 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
1156 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
1157 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
1158 assert(alloc_ctx.szind != NSIZES);
1160 if (config_stats || (config_prof && opt_prof)) {
1161 usize = sz_index2size(alloc_ctx.szind);
1162 assert(usize == isalloc(tsd_tsdn(tsd), ptr));
1164 /* Remove large allocation from prof sample set. */
1165 if (config_prof && opt_prof) {
1166 prof_free(tsd, ptr, usize, &alloc_ctx);
1168 large_dalloc(tsd_tsdn(tsd), extent);
1169 malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
1171 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
1174 for (unsigned i = 0; i < NBINS; i++) {
1176 arena_bin_t *bin = &arena->bins[i];
1177 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1178 if (bin->slabcur != NULL) {
1179 slab = bin->slabcur;
1180 bin->slabcur = NULL;
1181 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1182 arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
1183 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1185 while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) !=
1187 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1188 arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
1189 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1191 for (slab = extent_list_first(&bin->slabs_full); slab != NULL;
1192 slab = extent_list_first(&bin->slabs_full)) {
1193 arena_bin_slabs_full_remove(arena, bin, slab);
1194 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1195 arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
1196 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1199 bin->stats.curregs = 0;
1200 bin->stats.curslabs = 0;
1202 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1205 atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
1209 arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
1211 * Iterate over the retained extents and destroy them. This gives the
1212 * extent allocator underlying the extent hooks an opportunity to unmap
1213 * all retained memory without having to keep its own metadata
1214 * structures. In practice, virtual memory for dss-allocated extents is
1215 * leaked here, so best practice is to avoid dss for arenas to be
1216 * destroyed, or provide custom extent hooks that track retained
1217 * dss-based extents for later reuse.
1219 extent_hooks_t *extent_hooks = extent_hooks_get(arena);
1221 while ((extent = extents_evict(tsdn, arena, &extent_hooks,
1222 &arena->extents_retained, 0)) != NULL) {
1223 extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent);
1228 arena_destroy(tsd_t *tsd, arena_t *arena) {
1229 assert(base_ind_get(arena->base) >= narenas_auto);
1230 assert(arena_nthreads_get(arena, false) == 0);
1231 assert(arena_nthreads_get(arena, true) == 0);
1234 * No allocations have occurred since arena_reset() was called.
1235 * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
1236 * extents, so only retained extents may remain.
1238 assert(extents_npages_get(&arena->extents_dirty) == 0);
1239 assert(extents_npages_get(&arena->extents_muzzy) == 0);
1241 /* Deallocate retained memory. */
1242 arena_destroy_retained(tsd_tsdn(tsd), arena);
1245 * Remove the arena pointer from the arenas array. We rely on the fact
1246 * that there is no way for the application to get a dirty read from the
1247 * arenas array unless there is an inherent race in the application
1248 * involving access of an arena being concurrently destroyed. The
1249 * application must synchronize knowledge of the arena's validity, so as
1250 * long as we use an atomic write to update the arenas array, the
1251 * application will get a clean read any time after it synchronizes
1252 * knowledge that the arena is no longer valid.
1254 arena_set(base_ind_get(arena->base), NULL);
1257 * Destroy the base allocator, which manages all metadata ever mapped by
1260 base_delete(tsd_tsdn(tsd), arena->base);
1264 arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
1265 extent_hooks_t **r_extent_hooks, const arena_bin_info_t *bin_info,
1270 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1271 WITNESS_RANK_CORE, 0);
1275 slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL,
1276 bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit);
1278 if (config_stats && slab != NULL) {
1279 arena_stats_mapped_add(tsdn, &arena->stats,
1280 bin_info->slab_size);
1287 arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
1288 const arena_bin_info_t *bin_info) {
1289 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1290 WITNESS_RANK_CORE, 0);
1292 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
1293 szind_t szind = sz_size2index(bin_info->reg_size);
1296 extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks,
1297 &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true,
1298 binind, &zero, &commit);
1300 slab = extents_alloc(tsdn, arena, &extent_hooks,
1301 &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE,
1302 true, binind, &zero, &commit);
1305 slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks,
1311 assert(extent_slab_get(slab));
1313 /* Initialize slab internals. */
1314 arena_slab_data_t *slab_data = extent_slab_data_get(slab);
1315 extent_nfree_set(slab, bin_info->nregs);
1316 bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
1318 arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
1324 arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
1327 const arena_bin_info_t *bin_info;
1329 /* Look for a usable slab. */
1330 slab = arena_bin_slabs_nonfull_tryget(bin);
1334 /* No existing slabs have any space available. */
1336 bin_info = &arena_bin_info[binind];
1338 /* Allocate a new slab. */
1339 malloc_mutex_unlock(tsdn, &bin->lock);
1340 /******************************/
1341 slab = arena_slab_alloc(tsdn, arena, binind, bin_info);
1342 /********************************/
1343 malloc_mutex_lock(tsdn, &bin->lock);
1346 bin->stats.nslabs++;
1347 bin->stats.curslabs++;
1353 * arena_slab_alloc() failed, but another thread may have made
1354 * sufficient memory available while this one dropped bin->lock above,
1355 * so search one more time.
1357 slab = arena_bin_slabs_nonfull_tryget(bin);
1365 /* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
1367 arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
1369 const arena_bin_info_t *bin_info;
1372 bin_info = &arena_bin_info[binind];
1373 if (!arena_is_auto(arena) && bin->slabcur != NULL) {
1374 arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1375 bin->slabcur = NULL;
1377 slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind);
1378 if (bin->slabcur != NULL) {
1380 * Another thread updated slabcur while this one ran without the
1381 * bin lock in arena_bin_nonfull_slab_get().
1383 if (extent_nfree_get(bin->slabcur) > 0) {
1384 void *ret = arena_slab_reg_alloc(tsdn, bin->slabcur,
1388 * arena_slab_alloc() may have allocated slab,
1389 * or it may have been pulled from
1390 * slabs_nonfull. Therefore it is unsafe to
1391 * make any assumptions about how slab has
1392 * previously been used, and
1393 * arena_bin_lower_slab() must be called, as if
1394 * a region were just deallocated from the slab.
1396 if (extent_nfree_get(slab) == bin_info->nregs) {
1397 arena_dalloc_bin_slab(tsdn, arena, slab,
1400 arena_bin_lower_slab(tsdn, arena, slab,
1407 arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1408 bin->slabcur = NULL;
1414 bin->slabcur = slab;
1416 assert(extent_nfree_get(bin->slabcur) > 0);
1418 return arena_slab_reg_alloc(tsdn, slab, bin_info);
1422 arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
1423 tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) {
1427 assert(tbin->ncached == 0);
1429 if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) {
1432 bin = &arena->bins[binind];
1433 malloc_mutex_lock(tsdn, &bin->lock);
1434 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
1435 tcache->lg_fill_div[binind]); i < nfill; i++) {
1438 if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) >
1440 ptr = arena_slab_reg_alloc(tsdn, slab,
1441 &arena_bin_info[binind]);
1443 ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind);
1447 * OOM. tbin->avail isn't yet filled down to its first
1448 * element, so the successful allocations (if any) must
1449 * be moved just before tbin->avail before bailing out.
1452 memmove(tbin->avail - i, tbin->avail - nfill,
1453 i * sizeof(void *));
1457 if (config_fill && unlikely(opt_junk_alloc)) {
1458 arena_alloc_junk_small(ptr, &arena_bin_info[binind],
1461 /* Insert such that low regions get used first. */
1462 *(tbin->avail - nfill + i) = ptr;
1465 bin->stats.nmalloc += i;
1466 bin->stats.nrequests += tbin->tstats.nrequests;
1467 bin->stats.curregs += i;
1468 bin->stats.nfills++;
1469 tbin->tstats.nrequests = 0;
1471 malloc_mutex_unlock(tsdn, &bin->lock);
1473 arena_decay_tick(tsdn, arena);
1477 arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info, bool zero) {
1479 memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size);
1484 arena_dalloc_junk_small_impl(void *ptr, const arena_bin_info_t *bin_info) {
1485 memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size);
1487 arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small =
1488 arena_dalloc_junk_small_impl;
1491 arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
1497 assert(binind < NBINS);
1498 bin = &arena->bins[binind];
1499 usize = sz_index2size(binind);
1501 malloc_mutex_lock(tsdn, &bin->lock);
1502 if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
1503 ret = arena_slab_reg_alloc(tsdn, slab, &arena_bin_info[binind]);
1505 ret = arena_bin_malloc_hard(tsdn, arena, bin, binind);
1509 malloc_mutex_unlock(tsdn, &bin->lock);
1514 bin->stats.nmalloc++;
1515 bin->stats.nrequests++;
1516 bin->stats.curregs++;
1518 malloc_mutex_unlock(tsdn, &bin->lock);
1519 if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
1525 if (unlikely(opt_junk_alloc)) {
1526 arena_alloc_junk_small(ret,
1527 &arena_bin_info[binind], false);
1528 } else if (unlikely(opt_zero)) {
1529 memset(ret, 0, usize);
1533 if (config_fill && unlikely(opt_junk_alloc)) {
1534 arena_alloc_junk_small(ret, &arena_bin_info[binind],
1537 memset(ret, 0, usize);
1540 arena_decay_tick(tsdn, arena);
1545 arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
1547 assert(!tsdn_null(tsdn) || arena != NULL);
1549 if (likely(!tsdn_null(tsdn))) {
1550 arena = arena_choose(tsdn_tsd(tsdn), arena);
1552 if (unlikely(arena == NULL)) {
1556 if (likely(size <= SMALL_MAXCLASS)) {
1557 return arena_malloc_small(tsdn, arena, ind, zero);
1559 return large_malloc(tsdn, arena, sz_index2size(ind), zero);
1563 arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
1564 bool zero, tcache_t *tcache) {
1567 if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
1568 && (usize & PAGE_MASK) == 0))) {
1569 /* Small; alignment doesn't require special slab placement. */
1570 ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize),
1571 zero, tcache, true);
1573 if (likely(alignment <= CACHELINE)) {
1574 ret = large_malloc(tsdn, arena, usize, zero);
1576 ret = large_palloc(tsdn, arena, usize, alignment, zero);
1583 arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) {
1584 cassert(config_prof);
1585 assert(ptr != NULL);
1586 assert(isalloc(tsdn, ptr) == LARGE_MINCLASS);
1587 assert(usize <= SMALL_MAXCLASS);
1589 rtree_ctx_t rtree_ctx_fallback;
1590 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1592 extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
1593 (uintptr_t)ptr, true);
1594 arena_t *arena = extent_arena_get(extent);
1596 szind_t szind = sz_size2index(usize);
1597 extent_szind_set(extent, szind);
1598 rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
1601 prof_accum_cancel(tsdn, &arena->prof_accum, usize);
1603 assert(isalloc(tsdn, ptr) == usize);
1607 arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
1608 cassert(config_prof);
1609 assert(ptr != NULL);
1611 extent_szind_set(extent, NBINS);
1612 rtree_ctx_t rtree_ctx_fallback;
1613 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1614 rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
1617 assert(isalloc(tsdn, ptr) == LARGE_MINCLASS);
1619 return LARGE_MINCLASS;
1623 arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
1625 cassert(config_prof);
1628 extent_t *extent = iealloc(tsdn, ptr);
1629 size_t usize = arena_prof_demote(tsdn, extent, ptr);
1630 if (usize <= tcache_maxclass) {
1631 tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
1632 sz_size2index(usize), slow_path);
1634 large_dalloc(tsdn, extent);
1639 arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, arena_bin_t *bin) {
1640 /* Dissociate slab from bin. */
1641 if (slab == bin->slabcur) {
1642 bin->slabcur = NULL;
1644 szind_t binind = extent_szind_get(slab);
1645 const arena_bin_info_t *bin_info = &arena_bin_info[binind];
1648 * The following block's conditional is necessary because if the
1649 * slab only contains one region, then it never gets inserted
1650 * into the non-full slabs heap.
1652 if (bin_info->nregs == 1) {
1653 arena_bin_slabs_full_remove(arena, bin, slab);
1655 arena_bin_slabs_nonfull_remove(bin, slab);
1661 arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
1663 assert(slab != bin->slabcur);
1665 malloc_mutex_unlock(tsdn, &bin->lock);
1666 /******************************/
1667 arena_slab_dalloc(tsdn, arena, slab);
1668 /****************************/
1669 malloc_mutex_lock(tsdn, &bin->lock);
1671 bin->stats.curslabs--;
1676 arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
1678 assert(extent_nfree_get(slab) > 0);
1681 * Make sure that if bin->slabcur is non-NULL, it refers to the
1682 * oldest/lowest non-full slab. It is okay to NULL slabcur out rather
1683 * than proactively keeping it pointing at the oldest/lowest non-full
1686 if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) {
1687 /* Switch slabcur. */
1688 if (extent_nfree_get(bin->slabcur) > 0) {
1689 arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
1691 arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1693 bin->slabcur = slab;
1695 bin->stats.reslabs++;
1698 arena_bin_slabs_nonfull_insert(bin, slab);
1703 arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
1704 void *ptr, bool junked) {
1705 arena_slab_data_t *slab_data = extent_slab_data_get(slab);
1706 szind_t binind = extent_szind_get(slab);
1707 arena_bin_t *bin = &arena->bins[binind];
1708 const arena_bin_info_t *bin_info = &arena_bin_info[binind];
1710 if (!junked && config_fill && unlikely(opt_junk_free)) {
1711 arena_dalloc_junk_small(ptr, bin_info);
1714 arena_slab_reg_dalloc(tsdn, slab, slab_data, ptr);
1715 unsigned nfree = extent_nfree_get(slab);
1716 if (nfree == bin_info->nregs) {
1717 arena_dissociate_bin_slab(arena, slab, bin);
1718 arena_dalloc_bin_slab(tsdn, arena, slab, bin);
1719 } else if (nfree == 1 && slab != bin->slabcur) {
1720 arena_bin_slabs_full_remove(arena, bin, slab);
1721 arena_bin_lower_slab(tsdn, arena, slab, bin);
1725 bin->stats.ndalloc++;
1726 bin->stats.curregs--;
1731 arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
1733 arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, true);
1737 arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) {
1738 szind_t binind = extent_szind_get(extent);
1739 arena_bin_t *bin = &arena->bins[binind];
1741 malloc_mutex_lock(tsdn, &bin->lock);
1742 arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false);
1743 malloc_mutex_unlock(tsdn, &bin->lock);
1747 arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
1748 extent_t *extent = iealloc(tsdn, ptr);
1749 arena_t *arena = extent_arena_get(extent);
1751 arena_dalloc_bin(tsdn, arena, extent, ptr);
1752 arena_decay_tick(tsdn, arena);
1756 arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
1757 size_t extra, bool zero) {
1758 /* Calls with non-zero extra had to clamp extra. */
1759 assert(extra == 0 || size + extra <= LARGE_MAXCLASS);
1761 if (unlikely(size > LARGE_MAXCLASS)) {
1765 extent_t *extent = iealloc(tsdn, ptr);
1766 size_t usize_min = sz_s2u(size);
1767 size_t usize_max = sz_s2u(size + extra);
1768 if (likely(oldsize <= SMALL_MAXCLASS && usize_min <= SMALL_MAXCLASS)) {
1770 * Avoid moving the allocation if the size class can be left the
1773 assert(arena_bin_info[sz_size2index(oldsize)].reg_size ==
1775 if ((usize_max > SMALL_MAXCLASS || sz_size2index(usize_max) !=
1776 sz_size2index(oldsize)) && (size > oldsize || usize_max <
1781 arena_decay_tick(tsdn, extent_arena_get(extent));
1783 } else if (oldsize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS) {
1784 return large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
1792 arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
1793 size_t alignment, bool zero, tcache_t *tcache) {
1794 if (alignment == 0) {
1795 return arena_malloc(tsdn, arena, usize, sz_size2index(usize),
1796 zero, tcache, true);
1798 usize = sz_sa2u(usize, alignment);
1799 if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
1802 return ipalloct(tsdn, usize, alignment, zero, tcache, arena);
1806 arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
1807 size_t size, size_t alignment, bool zero, tcache_t *tcache) {
1808 size_t usize = sz_s2u(size);
1809 if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) {
1813 if (likely(usize <= SMALL_MAXCLASS)) {
1814 /* Try to avoid moving the allocation. */
1815 if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero)) {
1820 if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) {
1821 return large_ralloc(tsdn, arena, iealloc(tsdn, ptr), usize,
1822 alignment, zero, tcache);
1826 * size and oldsize are different enough that we need to move the
1827 * object. In that case, fall back to allocating new space and copying.
1829 void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment,
1836 * Junk/zero-filling were already done by
1837 * ipalloc()/arena_malloc().
1840 size_t copysize = (usize < oldsize) ? usize : oldsize;
1841 memcpy(ret, ptr, copysize);
1842 isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
1847 arena_dss_prec_get(arena_t *arena) {
1848 return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE);
1852 arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) {
1854 return (dss_prec != dss_prec_disabled);
1856 atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE);
1861 arena_dirty_decay_ms_default_get(void) {
1862 return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED);
1866 arena_dirty_decay_ms_default_set(ssize_t decay_ms) {
1867 if (!arena_decay_ms_valid(decay_ms)) {
1870 atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED);
1875 arena_muzzy_decay_ms_default_get(void) {
1876 return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED);
1880 arena_muzzy_decay_ms_default_set(ssize_t decay_ms) {
1881 if (!arena_decay_ms_valid(decay_ms)) {
1884 atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED);
1889 arena_nthreads_get(arena_t *arena, bool internal) {
1890 return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED);
1894 arena_nthreads_inc(arena_t *arena, bool internal) {
1895 atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
1899 arena_nthreads_dec(arena_t *arena, bool internal) {
1900 atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
1904 arena_extent_sn_next(arena_t *arena) {
1905 return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED);
1909 arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
1917 base = base_new(tsdn, ind, extent_hooks);
1923 arena = (arena_t *)base_alloc(tsdn, base, sizeof(arena_t), CACHELINE);
1924 if (arena == NULL) {
1928 atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
1929 atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
1930 arena->last_thd = NULL;
1933 if (arena_stats_init(tsdn, &arena->stats)) {
1937 ql_new(&arena->tcache_ql);
1938 if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql",
1939 WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) {
1945 if (prof_accum_init(tsdn, &arena->prof_accum)) {
1950 if (config_cache_oblivious) {
1952 * A nondeterministic seed based on the address of arena reduces
1953 * the likelihood of lockstep non-uniform cache index
1954 * utilization among identical concurrent processes, but at the
1955 * cost of test repeatability. For debug builds, instead use a
1956 * deterministic seed.
1958 atomic_store_zu(&arena->offset_state, config_debug ? ind :
1959 (size_t)(uintptr_t)arena, ATOMIC_RELAXED);
1962 atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED);
1964 atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
1967 atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
1969 extent_list_init(&arena->large);
1970 if (malloc_mutex_init(&arena->large_mtx, "arena_large",
1971 WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
1976 * Delay coalescing for dirty extents despite the disruptive effect on
1977 * memory layout for best-fit extent allocation, since cached extents
1978 * are likely to be reused soon after deallocation, and the cost of
1979 * merging/splitting extents is non-trivial.
1981 if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty,
1986 * Coalesce muzzy extents immediately, because operations on them are in
1987 * the critical path much less often than for dirty extents.
1989 if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy,
1994 * Coalesce retained extents immediately, in part because they will
1995 * never be evicted (and therefore there's no opportunity for delayed
1996 * coalescing), but also because operations on retained extents are not
1997 * in the critical path.
1999 if (extents_init(tsdn, &arena->extents_retained, extent_state_retained,
2004 if (arena_decay_init(&arena->decay_dirty, &arena->extents_dirty,
2005 arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) {
2008 if (arena_decay_init(&arena->decay_muzzy, &arena->extents_muzzy,
2009 arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) {
2013 arena->extent_grow_next = sz_psz2ind(HUGEPAGE);
2014 if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow",
2015 WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
2019 extent_avail_new(&arena->extent_avail);
2020 if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail",
2021 WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) {
2025 /* Initialize bins. */
2026 for (i = 0; i < NBINS; i++) {
2027 arena_bin_t *bin = &arena->bins[i];
2028 if (malloc_mutex_init(&bin->lock, "arena_bin",
2029 WITNESS_RANK_ARENA_BIN, malloc_mutex_rank_exclusive)) {
2032 bin->slabcur = NULL;
2033 extent_heap_new(&bin->slabs_nonfull);
2034 extent_list_init(&bin->slabs_full);
2036 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
2041 /* Set arena before creating background threads. */
2042 arena_set(ind, arena);
2044 nstime_init(&arena->create_time, 0);
2045 nstime_update(&arena->create_time);
2047 /* We don't support reentrancy for arena 0 bootstrapping. */
2050 * If we're here, then arena 0 already exists, so bootstrapping
2051 * is done enough that we should have tsd.
2053 assert(!tsdn_null(tsdn));
2054 pre_reentrancy(tsdn_tsd(tsdn), arena);
2055 if (hooks_arena_new_hook) {
2056 hooks_arena_new_hook();
2058 post_reentrancy(tsdn_tsd(tsdn));
2064 base_delete(tsdn, base);
2071 arena_dirty_decay_ms_default_set(opt_dirty_decay_ms);
2072 arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms);
2076 arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
2077 malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx);
2078 malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx);
2082 arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
2084 malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx);
2089 arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
2090 malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx);
2094 arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
2095 extents_prefork(tsdn, &arena->extents_dirty);
2096 extents_prefork(tsdn, &arena->extents_muzzy);
2097 extents_prefork(tsdn, &arena->extents_retained);
2101 arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
2102 malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx);
2106 arena_prefork5(tsdn_t *tsdn, arena_t *arena) {
2107 base_prefork(tsdn, arena->base);
2111 arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
2112 malloc_mutex_prefork(tsdn, &arena->large_mtx);
2116 arena_prefork7(tsdn_t *tsdn, arena_t *arena) {
2117 for (unsigned i = 0; i < NBINS; i++) {
2118 malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
2123 arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
2126 for (i = 0; i < NBINS; i++) {
2127 malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
2129 malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
2130 base_postfork_parent(tsdn, arena->base);
2131 malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx);
2132 extents_postfork_parent(tsdn, &arena->extents_dirty);
2133 extents_postfork_parent(tsdn, &arena->extents_muzzy);
2134 extents_postfork_parent(tsdn, &arena->extents_retained);
2135 malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx);
2136 malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx);
2137 malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx);
2139 malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx);
2144 arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
2147 atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
2148 atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
2149 if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) {
2150 arena_nthreads_inc(arena, false);
2152 if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) {
2153 arena_nthreads_inc(arena, true);
2156 ql_new(&arena->tcache_ql);
2157 tcache_t *tcache = tcache_get(tsdn_tsd(tsdn));
2158 if (tcache != NULL && tcache->arena == arena) {
2159 ql_elm_new(tcache, link);
2160 ql_tail_insert(&arena->tcache_ql, tcache, link);
2164 for (i = 0; i < NBINS; i++) {
2165 malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
2167 malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
2168 base_postfork_child(tsdn, arena->base);
2169 malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx);
2170 extents_postfork_child(tsdn, &arena->extents_dirty);
2171 extents_postfork_child(tsdn, &arena->extents_muzzy);
2172 extents_postfork_child(tsdn, &arena->extents_retained);
2173 malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx);
2174 malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx);
2175 malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx);
2177 malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx);