1 #define JEMALLOC_EXTENT_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/extent_dss.h"
7 #include "jemalloc/internal/extent_mmap.h"
8 #include "jemalloc/internal/ph.h"
9 #include "jemalloc/internal/rtree.h"
10 #include "jemalloc/internal/mutex.h"
11 #include "jemalloc/internal/mutex_pool.h"
13 /******************************************************************************/
16 rtree_t extents_rtree;
17 /* Keyed by the address of the extent_t being protected. */
18 mutex_pool_t extent_mutex_pool;
20 size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
22 static const bitmap_info_t extents_bitmap_info =
23 BITMAP_INFO_INITIALIZER(SC_NPSIZES+1);
25 static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr,
26 size_t size, size_t alignment, bool *zero, bool *commit,
28 static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr,
29 size_t size, bool committed, unsigned arena_ind);
30 static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr,
31 size_t size, bool committed, unsigned arena_ind);
32 static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr,
33 size_t size, size_t offset, size_t length, unsigned arena_ind);
34 static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
35 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
36 size_t length, bool growing_retained);
37 static bool extent_decommit_default(extent_hooks_t *extent_hooks,
38 void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
39 #ifdef PAGES_CAN_PURGE_LAZY
40 static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr,
41 size_t size, size_t offset, size_t length, unsigned arena_ind);
43 static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
44 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
45 size_t length, bool growing_retained);
46 #ifdef PAGES_CAN_PURGE_FORCED
47 static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
48 void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
50 static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
51 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
52 size_t length, bool growing_retained);
53 static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
54 size_t size, size_t size_a, size_t size_b, bool committed,
56 static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
57 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
58 szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
59 bool growing_retained);
60 static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
61 size_t size_a, void *addr_b, size_t size_b, bool committed,
63 static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
64 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
65 bool growing_retained);
67 const extent_hooks_t extent_hooks_default = {
69 extent_dalloc_default,
70 extent_destroy_default,
71 extent_commit_default,
72 extent_decommit_default
73 #ifdef PAGES_CAN_PURGE_LAZY
75 extent_purge_lazy_default
80 #ifdef PAGES_CAN_PURGE_FORCED
82 extent_purge_forced_default
92 /* Used exclusively for gdump triggering. */
93 static atomic_zu_t curpages;
94 static atomic_zu_t highpages;
96 /******************************************************************************/
98 * Function prototypes for static functions that are referenced prior to
102 static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
103 static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena,
104 extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
105 size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind,
106 bool *zero, bool *commit, bool growing_retained);
107 static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
108 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
109 extent_t *extent, bool *coalesced, bool growing_retained);
110 static void extent_record(tsdn_t *tsdn, arena_t *arena,
111 extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent,
112 bool growing_retained);
114 /******************************************************************************/
116 #define ATTR_NONE /* does nothing */
118 ph_gen(ATTR_NONE, extent_avail_, extent_tree_t, extent_t, ph_link,
126 lock_result_no_extent
130 extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
131 extent_t **result, bool inactive_only) {
132 extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
135 /* Slab implies active extents and should be skipped. */
136 if (extent1 == NULL || (inactive_only && rtree_leaf_elm_slab_read(tsdn,
137 &extents_rtree, elm, true))) {
138 return lock_result_no_extent;
142 * It's possible that the extent changed out from under us, and with it
143 * the leaf->extent mapping. We have to recheck while holding the lock.
145 extent_lock(tsdn, extent1);
146 extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn,
147 &extents_rtree, elm, true);
149 if (extent1 == extent2) {
151 return lock_result_success;
153 extent_unlock(tsdn, extent1);
154 return lock_result_failure;
159 * Returns a pool-locked extent_t * if there's one associated with the given
160 * address, and NULL otherwise.
163 extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr,
164 bool inactive_only) {
165 extent_t *ret = NULL;
166 rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
167 rtree_ctx, (uintptr_t)addr, false, false);
171 lock_result_t lock_result;
173 lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret,
175 } while (lock_result == lock_result_failure);
180 extent_alloc(tsdn_t *tsdn, arena_t *arena) {
181 malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
182 extent_t *extent = extent_avail_first(&arena->extent_avail);
183 if (extent == NULL) {
184 malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
185 return base_alloc_extent(tsdn, arena->base);
187 extent_avail_remove(&arena->extent_avail, extent);
188 atomic_fetch_sub_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
189 malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
194 extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
195 malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
196 extent_avail_insert(&arena->extent_avail, extent);
197 atomic_fetch_add_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
198 malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
202 extent_hooks_get(arena_t *arena) {
203 return base_extent_hooks_get(arena->base);
207 extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) {
208 background_thread_info_t *info;
209 if (have_background_thread) {
210 info = arena_background_thread_info_get(arena);
211 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
213 extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
214 if (have_background_thread) {
215 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
222 extent_hooks_assure_initialized(arena_t *arena,
223 extent_hooks_t **r_extent_hooks) {
224 if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) {
225 *r_extent_hooks = extent_hooks_get(arena);
233 extent_size_quantize_floor(size_t size) {
238 assert((size & PAGE_MASK) == 0);
240 pind = sz_psz2ind(size - sz_large_pad + 1);
243 * Avoid underflow. This short-circuit would also do the right
244 * thing for all sizes in the range for which there are
245 * PAGE-spaced size classes, but it's simplest to just handle
246 * the one case that would cause erroneous results.
250 ret = sz_pind2sz(pind - 1) + sz_large_pad;
259 extent_size_quantize_ceil(size_t size) {
263 assert(size - sz_large_pad <= SC_LARGE_MAXCLASS);
264 assert((size & PAGE_MASK) == 0);
266 ret = extent_size_quantize_floor(size);
269 * Skip a quantization that may have an adequately large extent,
270 * because under-sized extents may be mixed in. This only
271 * happens when an unusual size is requested, i.e. for aligned
272 * allocation, and is just one of several places where linear
273 * search would potentially find sufficiently aligned available
274 * memory somewhere lower.
276 ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
282 /* Generate pairing heap functions. */
283 ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
286 extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
287 bool delay_coalesce) {
288 if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS,
289 malloc_mutex_rank_exclusive)) {
292 for (unsigned i = 0; i < SC_NPSIZES + 1; i++) {
293 extent_heap_new(&extents->heaps[i]);
295 bitmap_init(extents->bitmap, &extents_bitmap_info, true);
296 extent_list_init(&extents->lru);
297 atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED);
298 extents->state = state;
299 extents->delay_coalesce = delay_coalesce;
304 extents_state_get(const extents_t *extents) {
305 return extents->state;
309 extents_npages_get(extents_t *extents) {
310 return atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
314 extents_nextents_get(extents_t *extents, pszind_t pind) {
315 return atomic_load_zu(&extents->nextents[pind], ATOMIC_RELAXED);
319 extents_nbytes_get(extents_t *extents, pszind_t pind) {
320 return atomic_load_zu(&extents->nbytes[pind], ATOMIC_RELAXED);
324 extents_stats_add(extents_t *extent, pszind_t pind, size_t sz) {
325 size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
326 atomic_store_zu(&extent->nextents[pind], cur + 1, ATOMIC_RELAXED);
327 cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
328 atomic_store_zu(&extent->nbytes[pind], cur + sz, ATOMIC_RELAXED);
332 extents_stats_sub(extents_t *extent, pszind_t pind, size_t sz) {
333 size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
334 atomic_store_zu(&extent->nextents[pind], cur - 1, ATOMIC_RELAXED);
335 cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
336 atomic_store_zu(&extent->nbytes[pind], cur - sz, ATOMIC_RELAXED);
340 extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
341 malloc_mutex_assert_owner(tsdn, &extents->mtx);
342 assert(extent_state_get(extent) == extents->state);
344 size_t size = extent_size_get(extent);
345 size_t psz = extent_size_quantize_floor(size);
346 pszind_t pind = sz_psz2ind(psz);
347 if (extent_heap_empty(&extents->heaps[pind])) {
348 bitmap_unset(extents->bitmap, &extents_bitmap_info,
351 extent_heap_insert(&extents->heaps[pind], extent);
354 extents_stats_add(extents, pind, size);
357 extent_list_append(&extents->lru, extent);
358 size_t npages = size >> LG_PAGE;
360 * All modifications to npages hold the mutex (as asserted above), so we
361 * don't need an atomic fetch-add; we can get by with a load followed by
364 size_t cur_extents_npages =
365 atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
366 atomic_store_zu(&extents->npages, cur_extents_npages + npages,
371 extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
372 malloc_mutex_assert_owner(tsdn, &extents->mtx);
373 assert(extent_state_get(extent) == extents->state);
375 size_t size = extent_size_get(extent);
376 size_t psz = extent_size_quantize_floor(size);
377 pszind_t pind = sz_psz2ind(psz);
378 extent_heap_remove(&extents->heaps[pind], extent);
381 extents_stats_sub(extents, pind, size);
384 if (extent_heap_empty(&extents->heaps[pind])) {
385 bitmap_set(extents->bitmap, &extents_bitmap_info,
388 extent_list_remove(&extents->lru, extent);
389 size_t npages = size >> LG_PAGE;
391 * As in extents_insert_locked, we hold extents->mtx and so don't need
392 * atomic operations for updating extents->npages.
394 size_t cur_extents_npages =
395 atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
396 assert(cur_extents_npages >= npages);
397 atomic_store_zu(&extents->npages,
398 cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
402 * Find an extent with size [min_size, max_size) to satisfy the alignment
403 * requirement. For each size, try only the first extent in the heap.
406 extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size,
408 pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size));
409 pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size));
411 for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
412 &extents_bitmap_info, (size_t)pind); i < pind_max; i =
413 (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
415 assert(i < SC_NPSIZES);
416 assert(!extent_heap_empty(&extents->heaps[i]));
417 extent_t *extent = extent_heap_first(&extents->heaps[i]);
418 uintptr_t base = (uintptr_t)extent_base_get(extent);
419 size_t candidate_size = extent_size_get(extent);
420 assert(candidate_size >= min_size);
422 uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
423 PAGE_CEILING(alignment));
424 if (base > next_align || base + candidate_size <= next_align) {
425 /* Overflow or not crossing the next alignment. */
429 size_t leadsize = next_align - base;
430 if (candidate_size - leadsize >= min_size) {
439 * Do first-fit extent selection, i.e. select the oldest/lowest extent that is
443 extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
445 extent_t *ret = NULL;
447 pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
449 if (!maps_coalesce && !opt_retain) {
451 * No split / merge allowed (Windows w/o retain). Try exact fit
454 return extent_heap_empty(&extents->heaps[pind]) ? NULL :
455 extent_heap_first(&extents->heaps[pind]);
458 for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
459 &extents_bitmap_info, (size_t)pind);
461 i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
463 assert(!extent_heap_empty(&extents->heaps[i]));
464 extent_t *extent = extent_heap_first(&extents->heaps[i]);
465 assert(extent_size_get(extent) >= size);
467 * In order to reduce fragmentation, avoid reusing and splitting
468 * large extents for much smaller sizes.
470 * Only do check for dirty extents (delay_coalesce).
472 if (extents->delay_coalesce &&
473 (sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
476 if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
479 if (i == SC_NPSIZES) {
482 assert(i < SC_NPSIZES);
489 * Do first-fit extent selection, where the selection policy choice is
490 * based on extents->delay_coalesce.
493 extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
494 size_t esize, size_t alignment) {
495 malloc_mutex_assert_owner(tsdn, &extents->mtx);
497 size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
498 /* Beware size_t wrap-around. */
499 if (max_size < esize) {
504 extents_first_fit_locked(tsdn, arena, extents, max_size);
506 if (alignment > PAGE && extent == NULL) {
508 * max_size guarantees the alignment requirement but is rather
509 * pessimistic. Next we try to satisfy the aligned allocation
510 * with sizes in [esize, max_size).
512 extent = extents_fit_alignment(extents, esize, max_size,
520 extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
521 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
523 extent_state_set(extent, extent_state_active);
525 extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx,
526 extents, extent, &coalesced, false);
527 extent_state_set(extent, extents_state_get(extents));
532 extents_insert_locked(tsdn, extents, extent);
537 extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
538 extents_t *extents, void *new_addr, size_t size, size_t pad,
539 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
540 assert(size + pad != 0);
541 assert(alignment != 0);
542 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
543 WITNESS_RANK_CORE, 0);
545 extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents,
546 new_addr, size, pad, alignment, slab, szind, zero, commit, false);
547 assert(extent == NULL || extent_dumpable_get(extent));
552 extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
553 extents_t *extents, extent_t *extent) {
554 assert(extent_base_get(extent) != NULL);
555 assert(extent_size_get(extent) != 0);
556 assert(extent_dumpable_get(extent));
557 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
558 WITNESS_RANK_CORE, 0);
560 extent_addr_set(extent, extent_base_get(extent));
561 extent_zeroed_set(extent, false);
563 extent_record(tsdn, arena, r_extent_hooks, extents, extent, false);
567 extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
568 extents_t *extents, size_t npages_min) {
569 rtree_ctx_t rtree_ctx_fallback;
570 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
572 malloc_mutex_lock(tsdn, &extents->mtx);
575 * Get the LRU coalesced extent, if any. If coalescing was delayed,
576 * the loop will iterate until the LRU extent is fully coalesced.
580 /* Get the LRU extent, if any. */
581 extent = extent_list_first(&extents->lru);
582 if (extent == NULL) {
585 /* Check the eviction limit. */
586 size_t extents_npages = atomic_load_zu(&extents->npages,
588 if (extents_npages <= npages_min) {
592 extents_remove_locked(tsdn, extents, extent);
593 if (!extents->delay_coalesce) {
596 /* Try to coalesce. */
597 if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks,
598 rtree_ctx, extents, extent)) {
602 * The LRU extent was just coalesced and the result placed in
603 * the LRU at its neighbor's position. Start over.
608 * Either mark the extent active or deregister it to protect against
609 * concurrent operations.
611 switch (extents_state_get(extents)) {
612 case extent_state_active:
614 case extent_state_dirty:
615 case extent_state_muzzy:
616 extent_state_set(extent, extent_state_active);
618 case extent_state_retained:
619 extent_deregister(tsdn, extent);
626 malloc_mutex_unlock(tsdn, &extents->mtx);
631 * This can only happen when we fail to allocate a new extent struct (which
632 * indicates OOM), e.g. when trying to split an existing extent.
635 extents_abandon_vm(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
636 extents_t *extents, extent_t *extent, bool growing_retained) {
637 size_t sz = extent_size_get(extent);
639 arena_stats_accum_zu(&arena->stats.abandoned_vm, sz);
642 * Leak extent after making sure its pages have already been purged, so
643 * that this is only a virtual memory leak.
645 if (extents_state_get(extents) == extent_state_dirty) {
646 if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks,
647 extent, 0, sz, growing_retained)) {
648 extent_purge_forced_impl(tsdn, arena, r_extent_hooks,
649 extent, 0, extent_size_get(extent),
653 extent_dalloc(tsdn, arena, extent);
657 extents_prefork(tsdn_t *tsdn, extents_t *extents) {
658 malloc_mutex_prefork(tsdn, &extents->mtx);
662 extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) {
663 malloc_mutex_postfork_parent(tsdn, &extents->mtx);
667 extents_postfork_child(tsdn_t *tsdn, extents_t *extents) {
668 malloc_mutex_postfork_child(tsdn, &extents->mtx);
672 extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
674 assert(extent_arena_get(extent) == arena);
675 assert(extent_state_get(extent) == extent_state_active);
677 extent_state_set(extent, extents_state_get(extents));
678 extents_insert_locked(tsdn, extents, extent);
682 extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
684 malloc_mutex_lock(tsdn, &extents->mtx);
685 extent_deactivate_locked(tsdn, arena, extents, extent);
686 malloc_mutex_unlock(tsdn, &extents->mtx);
690 extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
692 assert(extent_arena_get(extent) == arena);
693 assert(extent_state_get(extent) == extents_state_get(extents));
695 extents_remove_locked(tsdn, extents, extent);
696 extent_state_set(extent, extent_state_active);
700 extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
701 const extent_t *extent, bool dependent, bool init_missing,
702 rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
703 *r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
704 (uintptr_t)extent_base_get(extent), dependent, init_missing);
705 if (!dependent && *r_elm_a == NULL) {
708 assert(*r_elm_a != NULL);
710 *r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
711 (uintptr_t)extent_last_get(extent), dependent, init_missing);
712 if (!dependent && *r_elm_b == NULL) {
715 assert(*r_elm_b != NULL);
721 extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
722 rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
723 rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab);
725 rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind,
731 extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
733 assert(extent_slab_get(extent));
735 /* Register interior. */
736 for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
737 rtree_write(tsdn, &extents_rtree, rtree_ctx,
738 (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
739 LG_PAGE), extent, szind, true);
744 extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
745 cassert(config_prof);
746 /* prof_gdump() requirement. */
747 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
748 WITNESS_RANK_CORE, 0);
750 if (opt_prof && extent_state_get(extent) == extent_state_active) {
751 size_t nadd = extent_size_get(extent) >> LG_PAGE;
752 size_t cur = atomic_fetch_add_zu(&curpages, nadd,
753 ATOMIC_RELAXED) + nadd;
754 size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
755 while (cur > high && !atomic_compare_exchange_weak_zu(
756 &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) {
758 * Don't refresh cur, because it may have decreased
759 * since this thread lost the highpages update race.
760 * Note that high is updated in case of CAS failure.
763 if (cur > high && prof_gdump_get_unlocked()) {
770 extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
771 cassert(config_prof);
773 if (opt_prof && extent_state_get(extent) == extent_state_active) {
774 size_t nsub = extent_size_get(extent) >> LG_PAGE;
775 assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
776 atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
781 extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
782 rtree_ctx_t rtree_ctx_fallback;
783 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
784 rtree_leaf_elm_t *elm_a, *elm_b;
787 * We need to hold the lock to protect against a concurrent coalesce
788 * operation that sees us in a partial state.
790 extent_lock(tsdn, extent);
792 if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
794 extent_unlock(tsdn, extent);
798 szind_t szind = extent_szind_get_maybe_invalid(extent);
799 bool slab = extent_slab_get(extent);
800 extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
802 extent_interior_register(tsdn, rtree_ctx, extent, szind);
805 extent_unlock(tsdn, extent);
807 if (config_prof && gdump_add) {
808 extent_gdump_add(tsdn, extent);
815 extent_register(tsdn_t *tsdn, extent_t *extent) {
816 return extent_register_impl(tsdn, extent, true);
820 extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
821 return extent_register_impl(tsdn, extent, false);
825 extent_reregister(tsdn_t *tsdn, extent_t *extent) {
826 bool err = extent_register(tsdn, extent);
831 * Removes all pointers to the given extent from the global rtree indices for
832 * its interior. This is relevant for slab extents, for which we need to do
833 * metadata lookups at places other than the head of the extent. We deregister
834 * on the interior, then, when an extent moves from being an active slab to an
838 extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
842 assert(extent_slab_get(extent));
844 for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
845 rtree_clear(tsdn, &extents_rtree, rtree_ctx,
846 (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
852 * Removes all pointers to the given extent from the global rtree.
855 extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) {
856 rtree_ctx_t rtree_ctx_fallback;
857 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
858 rtree_leaf_elm_t *elm_a, *elm_b;
859 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
862 extent_lock(tsdn, extent);
864 extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, SC_NSIZES, false);
865 if (extent_slab_get(extent)) {
866 extent_interior_deregister(tsdn, rtree_ctx, extent);
867 extent_slab_set(extent, false);
870 extent_unlock(tsdn, extent);
872 if (config_prof && gdump) {
873 extent_gdump_sub(tsdn, extent);
878 extent_deregister(tsdn_t *tsdn, extent_t *extent) {
879 extent_deregister_impl(tsdn, extent, true);
883 extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) {
884 extent_deregister_impl(tsdn, extent, false);
888 * Tries to find and remove an extent from extents that can be used for the
889 * given allocation request.
892 extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
893 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
894 void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
895 bool growing_retained) {
896 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
897 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
898 assert(alignment > 0);
899 if (config_debug && new_addr != NULL) {
901 * Non-NULL new_addr has two use cases:
903 * 1) Recycle a known-extant extent, e.g. during purging.
904 * 2) Perform in-place expanding reallocation.
906 * Regardless of use case, new_addr must either refer to a
907 * non-existing extent, or to the base of an extant extent,
908 * since only active slabs support interior lookups (which of
909 * course cannot be recycled).
911 assert(PAGE_ADDR2BASE(new_addr) == new_addr);
913 assert(alignment <= PAGE);
916 size_t esize = size + pad;
917 malloc_mutex_lock(tsdn, &extents->mtx);
918 extent_hooks_assure_initialized(arena, r_extent_hooks);
920 if (new_addr != NULL) {
921 extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr,
923 if (extent != NULL) {
925 * We might null-out extent to report an error, but we
926 * still need to unlock the associated mutex after.
928 extent_t *unlock_extent = extent;
929 assert(extent_base_get(extent) == new_addr);
930 if (extent_arena_get(extent) != arena ||
931 extent_size_get(extent) < esize ||
932 extent_state_get(extent) !=
933 extents_state_get(extents)) {
936 extent_unlock(tsdn, unlock_extent);
939 extent = extents_fit_locked(tsdn, arena, extents, esize,
942 if (extent == NULL) {
943 malloc_mutex_unlock(tsdn, &extents->mtx);
947 extent_activate_locked(tsdn, arena, extents, extent);
948 malloc_mutex_unlock(tsdn, &extents->mtx);
954 * Given an allocation request and an extent guaranteed to be able to satisfy
955 * it, this splits off lead and trail extents, leaving extent pointing to an
956 * extent satisfying the allocation.
957 * This function doesn't put lead or trail into any extents_t; it's the caller's
958 * job to ensure that they can be reused.
962 * Split successfully. lead, extent, and trail, are modified to extents
963 * describing the ranges before, in, and after the given allocation.
965 extent_split_interior_ok,
967 * The extent can't satisfy the given allocation request. None of the
968 * input extent_t *s are touched.
970 extent_split_interior_cant_alloc,
972 * In a potentially invalid state. Must leak (if *to_leak is non-NULL),
973 * and salvage what's still salvageable (if *to_salvage is non-NULL).
974 * None of lead, extent, or trail are valid.
976 extent_split_interior_error
977 } extent_split_interior_result_t;
979 static extent_split_interior_result_t
980 extent_split_interior(tsdn_t *tsdn, arena_t *arena,
981 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx,
982 /* The result of splitting, in case of success. */
983 extent_t **extent, extent_t **lead, extent_t **trail,
984 /* The mess to clean up, in case of error. */
985 extent_t **to_leak, extent_t **to_salvage,
986 void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
987 szind_t szind, bool growing_retained) {
988 size_t esize = size + pad;
989 size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent),
990 PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent);
991 assert(new_addr == NULL || leadsize == 0);
992 if (extent_size_get(*extent) < leadsize + esize) {
993 return extent_split_interior_cant_alloc;
995 size_t trailsize = extent_size_get(*extent) - leadsize - esize;
1002 /* Split the lead. */
1003 if (leadsize != 0) {
1005 *extent = extent_split_impl(tsdn, arena, r_extent_hooks,
1006 *lead, leadsize, SC_NSIZES, false, esize + trailsize, szind,
1007 slab, growing_retained);
1008 if (*extent == NULL) {
1011 return extent_split_interior_error;
1015 /* Split the trail. */
1016 if (trailsize != 0) {
1017 *trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent,
1018 esize, szind, slab, trailsize, SC_NSIZES, false,
1020 if (*trail == NULL) {
1022 *to_salvage = *lead;
1025 return extent_split_interior_error;
1029 if (leadsize == 0 && trailsize == 0) {
1031 * Splitting causes szind to be set as a side effect, but no
1032 * splitting occurred.
1034 extent_szind_set(*extent, szind);
1035 if (szind != SC_NSIZES) {
1036 rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
1037 (uintptr_t)extent_addr_get(*extent), szind, slab);
1038 if (slab && extent_size_get(*extent) > PAGE) {
1039 rtree_szind_slab_update(tsdn, &extents_rtree,
1041 (uintptr_t)extent_past_get(*extent) -
1042 (uintptr_t)PAGE, szind, slab);
1047 return extent_split_interior_ok;
1051 * This fulfills the indicated allocation request out of the given extent (which
1052 * the caller should have ensured was big enough). If there's any unused space
1053 * before or after the resulting allocation, that space is given its own extent
1054 * and put back into extents.
1057 extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
1058 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1059 void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
1060 szind_t szind, extent_t *extent, bool growing_retained) {
1064 extent_t *to_salvage;
1066 extent_split_interior_result_t result = extent_split_interior(
1067 tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
1068 &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind,
1071 if (!maps_coalesce && result != extent_split_interior_ok
1074 * Split isn't supported (implies Windows w/o retain). Avoid
1075 * leaking the extents.
1077 assert(to_leak != NULL && lead == NULL && trail == NULL);
1078 extent_deactivate(tsdn, arena, extents, to_leak);
1082 if (result == extent_split_interior_ok) {
1084 extent_deactivate(tsdn, arena, extents, lead);
1086 if (trail != NULL) {
1087 extent_deactivate(tsdn, arena, extents, trail);
1092 * We should have picked an extent that was large enough to
1093 * fulfill our allocation request.
1095 assert(result == extent_split_interior_error);
1096 if (to_salvage != NULL) {
1097 extent_deregister(tsdn, to_salvage);
1099 if (to_leak != NULL) {
1100 void *leak = extent_base_get(to_leak);
1101 extent_deregister_no_gdump_sub(tsdn, to_leak);
1102 extents_abandon_vm(tsdn, arena, r_extent_hooks, extents,
1103 to_leak, growing_retained);
1104 assert(extent_lock_from_addr(tsdn, rtree_ctx, leak,
1113 extent_need_manual_zero(arena_t *arena) {
1115 * Need to manually zero the extent on repopulating if either; 1) non
1116 * default extent hooks installed (in which case the purge semantics may
1117 * change); or 2) transparent huge pages enabled.
1119 return (!arena_has_default_hooks(arena) ||
1120 (opt_thp == thp_mode_always));
1124 * Tries to satisfy the given allocation request by reusing one of the extents
1125 * in the given extents_t.
1128 extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1129 extents_t *extents, void *new_addr, size_t size, size_t pad,
1130 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit,
1131 bool growing_retained) {
1132 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1133 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1134 assert(new_addr == NULL || !slab);
1135 assert(pad == 0 || !slab);
1136 assert(!*zero || !slab);
1138 rtree_ctx_t rtree_ctx_fallback;
1139 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1141 extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
1142 rtree_ctx, extents, new_addr, size, pad, alignment, slab,
1144 if (extent == NULL) {
1148 extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
1149 extents, new_addr, size, pad, alignment, slab, szind, extent,
1151 if (extent == NULL) {
1155 if (*commit && !extent_committed_get(extent)) {
1156 if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent,
1157 0, extent_size_get(extent), growing_retained)) {
1158 extent_record(tsdn, arena, r_extent_hooks, extents,
1159 extent, growing_retained);
1162 if (!extent_need_manual_zero(arena)) {
1163 extent_zeroed_set(extent, true);
1167 if (extent_committed_get(extent)) {
1170 if (extent_zeroed_get(extent)) {
1175 extent_addr_randomize(tsdn, extent, alignment);
1177 assert(extent_state_get(extent) == extent_state_active);
1179 extent_slab_set(extent, slab);
1180 extent_interior_register(tsdn, rtree_ctx, extent, szind);
1184 void *addr = extent_base_get(extent);
1185 if (!extent_zeroed_get(extent)) {
1186 size_t size = extent_size_get(extent);
1187 if (extent_need_manual_zero(arena) ||
1188 pages_purge_forced(addr, size)) {
1189 memset(addr, 0, size);
1191 } else if (config_debug) {
1192 size_t *p = (size_t *)(uintptr_t)addr;
1193 /* Check the first page only. */
1194 for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
1203 * If the caller specifies (!*zero), it is still possible to receive zeroed
1204 * memory, in which case *zero is toggled to true. arena_extent_alloc() takes
1205 * advantage of this to avoid demanding zeroed extents, but taking advantage of
1206 * them if they are returned.
1209 extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
1210 size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
1214 assert(alignment != 0);
1216 /* "primary" dss. */
1217 if (have_dss && dss_prec == dss_prec_primary && (ret =
1218 extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
1223 if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
1227 /* "secondary" dss. */
1228 if (have_dss && dss_prec == dss_prec_secondary && (ret =
1229 extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
1234 /* All strategies for allocation failed. */
1239 extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
1240 size_t size, size_t alignment, bool *zero, bool *commit) {
1241 void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
1242 commit, (dss_prec_t)atomic_load_u(&arena->dss_prec,
1244 if (have_madvise_huge && ret) {
1245 pages_set_thp_state(ret, size);
1251 extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
1252 size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
1256 tsdn = tsdn_fetch();
1257 arena = arena_get(tsdn, arena_ind, false);
1259 * The arena we're allocating on behalf of must have been initialized
1262 assert(arena != NULL);
1264 return extent_alloc_default_impl(tsdn, arena, new_addr, size,
1265 ALIGNMENT_CEILING(alignment, PAGE), zero, commit);
1269 extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) {
1270 tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
1271 if (arena == arena_get(tsd_tsdn(tsd), 0, false)) {
1273 * The only legitimate case of customized extent hooks for a0 is
1274 * hooks with no allocation activities. One such example is to
1275 * place metadata on pre-allocated resources such as huge pages.
1276 * In that case, rely on reentrancy_level checks to catch
1277 * infinite recursions.
1279 pre_reentrancy(tsd, NULL);
1281 pre_reentrancy(tsd, arena);
1286 extent_hook_post_reentrancy(tsdn_t *tsdn) {
1287 tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
1288 post_reentrancy(tsd);
1292 * If virtual memory is retained, create increasingly larger extents from which
1293 * to split requested extents in order to limit the total number of disjoint
1294 * virtual memory ranges retained by each arena.
1297 extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
1298 extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment,
1299 bool slab, szind_t szind, bool *zero, bool *commit) {
1300 malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx);
1301 assert(pad == 0 || !slab);
1302 assert(!*zero || !slab);
1304 size_t esize = size + pad;
1305 size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE;
1306 /* Beware size_t wrap-around. */
1307 if (alloc_size_min < esize) {
1311 * Find the next extent size in the series that would be large enough to
1312 * satisfy this request.
1314 pszind_t egn_skip = 0;
1315 size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1316 while (alloc_size < alloc_size_min) {
1318 if (arena->extent_grow_next + egn_skip >=
1319 sz_psz2ind(SC_LARGE_MAXCLASS)) {
1320 /* Outside legal range. */
1323 alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1326 extent_t *extent = extent_alloc(tsdn, arena);
1327 if (extent == NULL) {
1330 bool zeroed = false;
1331 bool committed = false;
1334 if (*r_extent_hooks == &extent_hooks_default) {
1335 ptr = extent_alloc_default_impl(tsdn, arena, NULL,
1336 alloc_size, PAGE, &zeroed, &committed);
1338 extent_hook_pre_reentrancy(tsdn, arena);
1339 ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL,
1340 alloc_size, PAGE, &zeroed, &committed,
1341 arena_ind_get(arena));
1342 extent_hook_post_reentrancy(tsdn);
1345 extent_init(extent, arena, ptr, alloc_size, false, SC_NSIZES,
1346 arena_extent_sn_next(arena), extent_state_active, zeroed,
1347 committed, true, EXTENT_IS_HEAD);
1349 extent_dalloc(tsdn, arena, extent);
1353 if (extent_register_no_gdump_add(tsdn, extent)) {
1354 extent_dalloc(tsdn, arena, extent);
1358 if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
1361 if (extent_committed_get(extent)) {
1365 rtree_ctx_t rtree_ctx_fallback;
1366 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1371 extent_t *to_salvage;
1372 extent_split_interior_result_t result = extent_split_interior(
1373 tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
1374 &to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind,
1377 if (result == extent_split_interior_ok) {
1379 extent_record(tsdn, arena, r_extent_hooks,
1380 &arena->extents_retained, lead, true);
1382 if (trail != NULL) {
1383 extent_record(tsdn, arena, r_extent_hooks,
1384 &arena->extents_retained, trail, true);
1388 * We should have allocated a sufficiently large extent; the
1389 * cant_alloc case should not occur.
1391 assert(result == extent_split_interior_error);
1392 if (to_salvage != NULL) {
1394 extent_gdump_add(tsdn, to_salvage);
1396 extent_record(tsdn, arena, r_extent_hooks,
1397 &arena->extents_retained, to_salvage, true);
1399 if (to_leak != NULL) {
1400 extent_deregister_no_gdump_sub(tsdn, to_leak);
1401 extents_abandon_vm(tsdn, arena, r_extent_hooks,
1402 &arena->extents_retained, to_leak, true);
1407 if (*commit && !extent_committed_get(extent)) {
1408 if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0,
1409 extent_size_get(extent), true)) {
1410 extent_record(tsdn, arena, r_extent_hooks,
1411 &arena->extents_retained, extent, true);
1414 if (!extent_need_manual_zero(arena)) {
1415 extent_zeroed_set(extent, true);
1420 * Increment extent_grow_next if doing so wouldn't exceed the allowed
1423 if (arena->extent_grow_next + egn_skip + 1 <=
1424 arena->retain_grow_limit) {
1425 arena->extent_grow_next += egn_skip + 1;
1427 arena->extent_grow_next = arena->retain_grow_limit;
1429 /* All opportunities for failure are past. */
1430 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1433 /* Adjust gdump stats now that extent is final size. */
1434 extent_gdump_add(tsdn, extent);
1437 extent_addr_randomize(tsdn, extent, alignment);
1440 rtree_ctx_t rtree_ctx_fallback;
1441 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
1442 &rtree_ctx_fallback);
1444 extent_slab_set(extent, true);
1445 extent_interior_register(tsdn, rtree_ctx, extent, szind);
1447 if (*zero && !extent_zeroed_get(extent)) {
1448 void *addr = extent_base_get(extent);
1449 size_t size = extent_size_get(extent);
1450 if (extent_need_manual_zero(arena) ||
1451 pages_purge_forced(addr, size)) {
1452 memset(addr, 0, size);
1458 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1463 extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
1464 extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1465 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1467 assert(alignment != 0);
1469 malloc_mutex_lock(tsdn, &arena->extent_grow_mtx);
1471 extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks,
1472 &arena->extents_retained, new_addr, size, pad, alignment, slab,
1473 szind, zero, commit, true);
1474 if (extent != NULL) {
1475 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1477 extent_gdump_add(tsdn, extent);
1479 } else if (opt_retain && new_addr == NULL) {
1480 extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size,
1481 pad, alignment, slab, szind, zero, commit);
1482 /* extent_grow_retained() always releases extent_grow_mtx. */
1484 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1486 malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx);
1492 extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
1493 extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1494 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1495 size_t esize = size + pad;
1496 extent_t *extent = extent_alloc(tsdn, arena);
1497 if (extent == NULL) {
1501 size_t palignment = ALIGNMENT_CEILING(alignment, PAGE);
1502 if (*r_extent_hooks == &extent_hooks_default) {
1503 /* Call directly to propagate tsdn. */
1504 addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
1505 palignment, zero, commit);
1507 extent_hook_pre_reentrancy(tsdn, arena);
1508 addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
1509 esize, palignment, zero, commit, arena_ind_get(arena));
1510 extent_hook_post_reentrancy(tsdn);
1513 extent_dalloc(tsdn, arena, extent);
1516 extent_init(extent, arena, addr, esize, slab, szind,
1517 arena_extent_sn_next(arena), extent_state_active, *zero, *commit,
1518 true, EXTENT_NOT_HEAD);
1520 extent_addr_randomize(tsdn, extent, alignment);
1522 if (extent_register(tsdn, extent)) {
1523 extent_dalloc(tsdn, arena, extent);
1531 extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1532 extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1533 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1534 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1535 WITNESS_RANK_CORE, 0);
1537 extent_hooks_assure_initialized(arena, r_extent_hooks);
1539 extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
1540 new_addr, size, pad, alignment, slab, szind, zero, commit);
1541 if (extent == NULL) {
1542 if (opt_retain && new_addr != NULL) {
1544 * When retain is enabled and new_addr is set, we do not
1545 * attempt extent_alloc_wrapper_hard which does mmap
1546 * that is very unlikely to succeed (unless it happens
1547 * to be at the end).
1551 extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
1552 new_addr, size, pad, alignment, slab, szind, zero, commit);
1555 assert(extent == NULL || extent_dumpable_get(extent));
1560 extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner,
1561 const extent_t *outer) {
1562 assert(extent_arena_get(inner) == arena);
1563 if (extent_arena_get(outer) != arena) {
1567 assert(extent_state_get(inner) == extent_state_active);
1568 if (extent_state_get(outer) != extents->state) {
1572 if (extent_committed_get(inner) != extent_committed_get(outer)) {
1580 extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1581 extents_t *extents, extent_t *inner, extent_t *outer, bool forward,
1582 bool growing_retained) {
1583 assert(extent_can_coalesce(arena, extents, inner, outer));
1585 extent_activate_locked(tsdn, arena, extents, outer);
1587 malloc_mutex_unlock(tsdn, &extents->mtx);
1588 bool err = extent_merge_impl(tsdn, arena, r_extent_hooks,
1589 forward ? inner : outer, forward ? outer : inner, growing_retained);
1590 malloc_mutex_lock(tsdn, &extents->mtx);
1593 extent_deactivate_locked(tsdn, arena, extents, outer);
1600 extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena,
1601 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1602 extent_t *extent, bool *coalesced, bool growing_retained,
1603 bool inactive_only) {
1605 * We avoid checking / locking inactive neighbors for large size
1606 * classes, since they are eagerly coalesced on deallocation which can
1607 * cause lock contention.
1610 * Continue attempting to coalesce until failure, to protect against
1611 * races with other threads that are thwarted by this one.
1617 /* Try to coalesce forward. */
1618 extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx,
1619 extent_past_get(extent), inactive_only);
1622 * extents->mtx only protects against races for
1623 * like-state extents, so call extent_can_coalesce()
1624 * before releasing next's pool lock.
1626 bool can_coalesce = extent_can_coalesce(arena, extents,
1629 extent_unlock(tsdn, next);
1631 if (can_coalesce && !extent_coalesce(tsdn, arena,
1632 r_extent_hooks, extents, extent, next, true,
1633 growing_retained)) {
1634 if (extents->delay_coalesce) {
1635 /* Do minimal coalescing. */
1643 /* Try to coalesce backward. */
1644 extent_t *prev = NULL;
1645 if (extent_before_get(extent) != NULL) {
1646 prev = extent_lock_from_addr(tsdn, rtree_ctx,
1647 extent_before_get(extent), inactive_only);
1650 bool can_coalesce = extent_can_coalesce(arena, extents,
1652 extent_unlock(tsdn, prev);
1654 if (can_coalesce && !extent_coalesce(tsdn, arena,
1655 r_extent_hooks, extents, extent, prev, false,
1656 growing_retained)) {
1658 if (extents->delay_coalesce) {
1659 /* Do minimal coalescing. */
1668 if (extents->delay_coalesce) {
1675 extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
1676 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1677 extent_t *extent, bool *coalesced, bool growing_retained) {
1678 return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx,
1679 extents, extent, coalesced, growing_retained, false);
1683 extent_try_coalesce_large(tsdn_t *tsdn, arena_t *arena,
1684 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1685 extent_t *extent, bool *coalesced, bool growing_retained) {
1686 return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx,
1687 extents, extent, coalesced, growing_retained, true);
1691 * Does the metadata management portions of putting an unused extent into the
1692 * given extents_t (coalesces, deregisters slab interiors, the heap operations).
1695 extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1696 extents_t *extents, extent_t *extent, bool growing_retained) {
1697 rtree_ctx_t rtree_ctx_fallback;
1698 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1700 assert((extents_state_get(extents) != extent_state_dirty &&
1701 extents_state_get(extents) != extent_state_muzzy) ||
1702 !extent_zeroed_get(extent));
1704 malloc_mutex_lock(tsdn, &extents->mtx);
1705 extent_hooks_assure_initialized(arena, r_extent_hooks);
1707 extent_szind_set(extent, SC_NSIZES);
1708 if (extent_slab_get(extent)) {
1709 extent_interior_deregister(tsdn, rtree_ctx, extent);
1710 extent_slab_set(extent, false);
1713 assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
1714 (uintptr_t)extent_base_get(extent), true) == extent);
1716 if (!extents->delay_coalesce) {
1717 extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
1718 rtree_ctx, extents, extent, NULL, growing_retained);
1719 } else if (extent_size_get(extent) >= SC_LARGE_MINCLASS) {
1720 assert(extents == &arena->extents_dirty);
1721 /* Always coalesce large extents eagerly. */
1724 assert(extent_state_get(extent) == extent_state_active);
1725 extent = extent_try_coalesce_large(tsdn, arena,
1726 r_extent_hooks, rtree_ctx, extents, extent,
1727 &coalesced, growing_retained);
1728 } while (coalesced);
1729 if (extent_size_get(extent) >= oversize_threshold) {
1730 /* Shortcut to purge the oversize extent eagerly. */
1731 malloc_mutex_unlock(tsdn, &extents->mtx);
1732 arena_decay_extent(tsdn, arena, r_extent_hooks, extent);
1736 extent_deactivate_locked(tsdn, arena, extents, extent);
1738 malloc_mutex_unlock(tsdn, &extents->mtx);
1742 extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
1743 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
1745 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1746 WITNESS_RANK_CORE, 0);
1748 if (extent_register(tsdn, extent)) {
1749 extent_dalloc(tsdn, arena, extent);
1752 extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
1756 extent_may_dalloc(void) {
1757 /* With retain enabled, the default dalloc always fails. */
1762 extent_dalloc_default_impl(void *addr, size_t size) {
1763 if (!have_dss || !extent_in_dss(addr)) {
1764 return extent_dalloc_mmap(addr, size);
1770 extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1771 bool committed, unsigned arena_ind) {
1772 return extent_dalloc_default_impl(addr, size);
1776 extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
1777 extent_hooks_t **r_extent_hooks, extent_t *extent) {
1780 assert(extent_base_get(extent) != NULL);
1781 assert(extent_size_get(extent) != 0);
1782 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1783 WITNESS_RANK_CORE, 0);
1785 extent_addr_set(extent, extent_base_get(extent));
1787 extent_hooks_assure_initialized(arena, r_extent_hooks);
1788 /* Try to deallocate. */
1789 if (*r_extent_hooks == &extent_hooks_default) {
1790 /* Call directly to propagate tsdn. */
1791 err = extent_dalloc_default_impl(extent_base_get(extent),
1792 extent_size_get(extent));
1794 extent_hook_pre_reentrancy(tsdn, arena);
1795 err = ((*r_extent_hooks)->dalloc == NULL ||
1796 (*r_extent_hooks)->dalloc(*r_extent_hooks,
1797 extent_base_get(extent), extent_size_get(extent),
1798 extent_committed_get(extent), arena_ind_get(arena)));
1799 extent_hook_post_reentrancy(tsdn);
1803 extent_dalloc(tsdn, arena, extent);
1810 extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1811 extent_hooks_t **r_extent_hooks, extent_t *extent) {
1812 assert(extent_dumpable_get(extent));
1813 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1814 WITNESS_RANK_CORE, 0);
1816 /* Avoid calling the default extent_dalloc unless have to. */
1817 if (*r_extent_hooks != &extent_hooks_default || extent_may_dalloc()) {
1819 * Deregister first to avoid a race with other allocating
1820 * threads, and reregister if deallocation fails.
1822 extent_deregister(tsdn, extent);
1823 if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks,
1827 extent_reregister(tsdn, extent);
1830 if (*r_extent_hooks != &extent_hooks_default) {
1831 extent_hook_pre_reentrancy(tsdn, arena);
1833 /* Try to decommit; purge if that fails. */
1835 if (!extent_committed_get(extent)) {
1837 } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
1838 0, extent_size_get(extent))) {
1840 } else if ((*r_extent_hooks)->purge_forced != NULL &&
1841 !(*r_extent_hooks)->purge_forced(*r_extent_hooks,
1842 extent_base_get(extent), extent_size_get(extent), 0,
1843 extent_size_get(extent), arena_ind_get(arena))) {
1845 } else if (extent_state_get(extent) == extent_state_muzzy ||
1846 ((*r_extent_hooks)->purge_lazy != NULL &&
1847 !(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
1848 extent_base_get(extent), extent_size_get(extent), 0,
1849 extent_size_get(extent), arena_ind_get(arena)))) {
1854 if (*r_extent_hooks != &extent_hooks_default) {
1855 extent_hook_post_reentrancy(tsdn);
1857 extent_zeroed_set(extent, zeroed);
1860 extent_gdump_sub(tsdn, extent);
1863 extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained,
1868 extent_destroy_default_impl(void *addr, size_t size) {
1869 if (!have_dss || !extent_in_dss(addr)) {
1870 pages_unmap(addr, size);
1875 extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1876 bool committed, unsigned arena_ind) {
1877 extent_destroy_default_impl(addr, size);
1881 extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
1882 extent_hooks_t **r_extent_hooks, extent_t *extent) {
1883 assert(extent_base_get(extent) != NULL);
1884 assert(extent_size_get(extent) != 0);
1885 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1886 WITNESS_RANK_CORE, 0);
1888 /* Deregister first to avoid a race with other allocating threads. */
1889 extent_deregister(tsdn, extent);
1891 extent_addr_set(extent, extent_base_get(extent));
1893 extent_hooks_assure_initialized(arena, r_extent_hooks);
1894 /* Try to destroy; silently fail otherwise. */
1895 if (*r_extent_hooks == &extent_hooks_default) {
1896 /* Call directly to propagate tsdn. */
1897 extent_destroy_default_impl(extent_base_get(extent),
1898 extent_size_get(extent));
1899 } else if ((*r_extent_hooks)->destroy != NULL) {
1900 extent_hook_pre_reentrancy(tsdn, arena);
1901 (*r_extent_hooks)->destroy(*r_extent_hooks,
1902 extent_base_get(extent), extent_size_get(extent),
1903 extent_committed_get(extent), arena_ind_get(arena));
1904 extent_hook_post_reentrancy(tsdn);
1907 extent_dalloc(tsdn, arena, extent);
1911 extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1912 size_t offset, size_t length, unsigned arena_ind) {
1913 return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
1918 extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
1919 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1920 size_t length, bool growing_retained) {
1921 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1922 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1924 extent_hooks_assure_initialized(arena, r_extent_hooks);
1925 if (*r_extent_hooks != &extent_hooks_default) {
1926 extent_hook_pre_reentrancy(tsdn, arena);
1928 bool err = ((*r_extent_hooks)->commit == NULL ||
1929 (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
1930 extent_size_get(extent), offset, length, arena_ind_get(arena)));
1931 if (*r_extent_hooks != &extent_hooks_default) {
1932 extent_hook_post_reentrancy(tsdn);
1934 extent_committed_set(extent, extent_committed_get(extent) || !err);
1939 extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
1940 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1942 return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset,
1947 extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1948 size_t offset, size_t length, unsigned arena_ind) {
1949 return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
1954 extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
1955 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1957 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1958 WITNESS_RANK_CORE, 0);
1960 extent_hooks_assure_initialized(arena, r_extent_hooks);
1962 if (*r_extent_hooks != &extent_hooks_default) {
1963 extent_hook_pre_reentrancy(tsdn, arena);
1965 bool err = ((*r_extent_hooks)->decommit == NULL ||
1966 (*r_extent_hooks)->decommit(*r_extent_hooks,
1967 extent_base_get(extent), extent_size_get(extent), offset, length,
1968 arena_ind_get(arena)));
1969 if (*r_extent_hooks != &extent_hooks_default) {
1970 extent_hook_post_reentrancy(tsdn);
1972 extent_committed_set(extent, extent_committed_get(extent) && err);
1976 #ifdef PAGES_CAN_PURGE_LAZY
1978 extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1979 size_t offset, size_t length, unsigned arena_ind) {
1980 assert(addr != NULL);
1981 assert((offset & PAGE_MASK) == 0);
1982 assert(length != 0);
1983 assert((length & PAGE_MASK) == 0);
1985 return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
1991 extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
1992 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1993 size_t length, bool growing_retained) {
1994 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1995 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1997 extent_hooks_assure_initialized(arena, r_extent_hooks);
1999 if ((*r_extent_hooks)->purge_lazy == NULL) {
2002 if (*r_extent_hooks != &extent_hooks_default) {
2003 extent_hook_pre_reentrancy(tsdn, arena);
2005 bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks,
2006 extent_base_get(extent), extent_size_get(extent), offset, length,
2007 arena_ind_get(arena));
2008 if (*r_extent_hooks != &extent_hooks_default) {
2009 extent_hook_post_reentrancy(tsdn);
2016 extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
2017 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
2019 return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent,
2020 offset, length, false);
2023 #ifdef PAGES_CAN_PURGE_FORCED
2025 extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
2026 size_t size, size_t offset, size_t length, unsigned arena_ind) {
2027 assert(addr != NULL);
2028 assert((offset & PAGE_MASK) == 0);
2029 assert(length != 0);
2030 assert((length & PAGE_MASK) == 0);
2032 return pages_purge_forced((void *)((uintptr_t)addr +
2033 (uintptr_t)offset), length);
2038 extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
2039 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
2040 size_t length, bool growing_retained) {
2041 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
2042 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
2044 extent_hooks_assure_initialized(arena, r_extent_hooks);
2046 if ((*r_extent_hooks)->purge_forced == NULL) {
2049 if (*r_extent_hooks != &extent_hooks_default) {
2050 extent_hook_pre_reentrancy(tsdn, arena);
2052 bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks,
2053 extent_base_get(extent), extent_size_get(extent), offset, length,
2054 arena_ind_get(arena));
2055 if (*r_extent_hooks != &extent_hooks_default) {
2056 extent_hook_post_reentrancy(tsdn);
2062 extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
2063 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
2065 return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent,
2066 offset, length, false);
2070 extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
2071 size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
2072 if (!maps_coalesce) {
2074 * Without retain, only whole regions can be purged (required by
2075 * MEM_RELEASE on Windows) -- therefore disallow splitting. See
2076 * comments in extent_head_no_merge().
2085 * Accepts the extent to split, and the characteristics of each side of the
2086 * split. The 'a' parameters go with the 'lead' of the resulting pair of
2087 * extents (the lower addressed portion of the split), and the 'b' parameters go
2088 * with the trail (the higher addressed portion). This makes 'extent' the lead,
2089 * and returns the trail (except in case of error).
2092 extent_split_impl(tsdn_t *tsdn, arena_t *arena,
2093 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
2094 szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
2095 bool growing_retained) {
2096 assert(extent_size_get(extent) == size_a + size_b);
2097 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
2098 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
2100 extent_hooks_assure_initialized(arena, r_extent_hooks);
2102 if ((*r_extent_hooks)->split == NULL) {
2106 extent_t *trail = extent_alloc(tsdn, arena);
2107 if (trail == NULL) {
2111 extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
2112 size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
2113 extent_state_get(extent), extent_zeroed_get(extent),
2114 extent_committed_get(extent), extent_dumpable_get(extent),
2117 rtree_ctx_t rtree_ctx_fallback;
2118 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
2119 rtree_leaf_elm_t *lead_elm_a, *lead_elm_b;
2123 extent_init(&lead, arena, extent_addr_get(extent), size_a,
2124 slab_a, szind_a, extent_sn_get(extent),
2125 extent_state_get(extent), extent_zeroed_get(extent),
2126 extent_committed_get(extent), extent_dumpable_get(extent),
2129 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
2130 true, &lead_elm_a, &lead_elm_b);
2132 rtree_leaf_elm_t *trail_elm_a, *trail_elm_b;
2133 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true,
2134 &trail_elm_a, &trail_elm_b);
2136 if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL
2137 || trail_elm_b == NULL) {
2141 extent_lock2(tsdn, extent, trail);
2143 if (*r_extent_hooks != &extent_hooks_default) {
2144 extent_hook_pre_reentrancy(tsdn, arena);
2146 bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
2147 size_a + size_b, size_a, size_b, extent_committed_get(extent),
2148 arena_ind_get(arena));
2149 if (*r_extent_hooks != &extent_hooks_default) {
2150 extent_hook_post_reentrancy(tsdn);
2156 extent_size_set(extent, size_a);
2157 extent_szind_set(extent, szind_a);
2159 extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
2161 extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
2164 extent_unlock2(tsdn, extent, trail);
2168 extent_unlock2(tsdn, extent, trail);
2170 extent_dalloc(tsdn, arena, trail);
2176 extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
2177 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
2178 szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
2179 return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a,
2180 szind_a, slab_a, size_b, szind_b, slab_b, false);
2184 extent_merge_default_impl(void *addr_a, void *addr_b) {
2185 if (!maps_coalesce && !opt_retain) {
2188 if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
2196 * Returns true if the given extents can't be merged because of their head bit
2197 * settings. Assumes the second extent has the higher address.
2200 extent_head_no_merge(extent_t *a, extent_t *b) {
2201 assert(extent_base_get(a) < extent_base_get(b));
2203 * When coalesce is not always allowed (Windows), only merge extents
2204 * from the same VirtualAlloc region under opt.retain (in which case
2205 * MEM_DECOMMIT is utilized for purging).
2207 if (maps_coalesce) {
2213 /* If b is a head extent, disallow the cross-region merge. */
2214 if (extent_is_head_get(b)) {
2216 * Additionally, sn should not overflow with retain; sanity
2217 * check that different regions have unique sn.
2219 assert(extent_sn_comp(a, b) != 0);
2222 assert(extent_sn_comp(a, b) == 0);
2228 extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
2229 void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
2230 if (!maps_coalesce) {
2231 tsdn_t *tsdn = tsdn_fetch();
2232 extent_t *a = iealloc(tsdn, addr_a);
2233 extent_t *b = iealloc(tsdn, addr_b);
2234 if (extent_head_no_merge(a, b)) {
2238 return extent_merge_default_impl(addr_a, addr_b);
2242 extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
2243 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
2244 bool growing_retained) {
2245 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
2246 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
2247 assert(extent_base_get(a) < extent_base_get(b));
2249 extent_hooks_assure_initialized(arena, r_extent_hooks);
2251 if ((*r_extent_hooks)->merge == NULL || extent_head_no_merge(a, b)) {
2256 if (*r_extent_hooks == &extent_hooks_default) {
2257 /* Call directly to propagate tsdn. */
2258 err = extent_merge_default_impl(extent_base_get(a),
2259 extent_base_get(b));
2261 extent_hook_pre_reentrancy(tsdn, arena);
2262 err = (*r_extent_hooks)->merge(*r_extent_hooks,
2263 extent_base_get(a), extent_size_get(a), extent_base_get(b),
2264 extent_size_get(b), extent_committed_get(a),
2265 arena_ind_get(arena));
2266 extent_hook_post_reentrancy(tsdn);
2274 * The rtree writes must happen while all the relevant elements are
2275 * owned, so the following code uses decomposed helper functions rather
2276 * than extent_{,de}register() to do things in the right order.
2278 rtree_ctx_t rtree_ctx_fallback;
2279 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
2280 rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
2281 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a,
2283 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a,
2286 extent_lock2(tsdn, a, b);
2288 if (a_elm_b != NULL) {
2289 rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
2292 if (b_elm_b != NULL) {
2293 rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
2299 extent_size_set(a, extent_size_get(a) + extent_size_get(b));
2300 extent_szind_set(a, SC_NSIZES);
2301 extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
2302 extent_sn_get(a) : extent_sn_get(b));
2303 extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
2305 extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, SC_NSIZES,
2308 extent_unlock2(tsdn, a, b);
2310 extent_dalloc(tsdn, extent_arena_get(b), b);
2316 extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
2317 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) {
2318 return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false);
2323 if (rtree_new(&extents_rtree, true)) {
2327 if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool",
2328 WITNESS_RANK_EXTENT_POOL)) {
2340 extent_util_stats_get(tsdn_t *tsdn, const void *ptr,
2341 size_t *nfree, size_t *nregs, size_t *size) {
2342 assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL);
2344 const extent_t *extent = iealloc(tsdn, ptr);
2345 if (unlikely(extent == NULL)) {
2346 *nfree = *nregs = *size = 0;
2350 *size = extent_size_get(extent);
2351 if (!extent_slab_get(extent)) {
2355 *nfree = extent_nfree_get(extent);
2356 *nregs = bin_infos[extent_szind_get(extent)].nregs;
2357 assert(*nfree <= *nregs);
2358 assert(*nfree * extent_usize_get(extent) <= *size);
2363 extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
2364 size_t *nfree, size_t *nregs, size_t *size,
2365 size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr) {
2366 assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL
2367 && bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL);
2369 const extent_t *extent = iealloc(tsdn, ptr);
2370 if (unlikely(extent == NULL)) {
2371 *nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0;
2372 *slabcur_addr = NULL;
2376 *size = extent_size_get(extent);
2377 if (!extent_slab_get(extent)) {
2378 *nfree = *bin_nfree = *bin_nregs = 0;
2380 *slabcur_addr = NULL;
2384 *nfree = extent_nfree_get(extent);
2385 const szind_t szind = extent_szind_get(extent);
2386 *nregs = bin_infos[szind].nregs;
2387 assert(*nfree <= *nregs);
2388 assert(*nfree * extent_usize_get(extent) <= *size);
2390 const arena_t *arena = extent_arena_get(extent);
2391 assert(arena != NULL);
2392 const unsigned binshard = extent_binshard_get(extent);
2393 bin_t *bin = &arena->bins[szind].bin_shards[binshard];
2395 malloc_mutex_lock(tsdn, &bin->lock);
2397 *bin_nregs = *nregs * bin->stats.curslabs;
2398 assert(*bin_nregs >= bin->stats.curregs);
2399 *bin_nfree = *bin_nregs - bin->stats.curregs;
2401 *bin_nfree = *bin_nregs = 0;
2403 *slabcur_addr = extent_addr_get(bin->slabcur);
2404 assert(*slabcur_addr != NULL);
2405 malloc_mutex_unlock(tsdn, &bin->lock);