1 #define JEMALLOC_EXTENT_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/extent_dss.h"
7 #include "jemalloc/internal/extent_mmap.h"
8 #include "jemalloc/internal/ph.h"
9 #include "jemalloc/internal/rtree.h"
10 #include "jemalloc/internal/mutex.h"
11 #include "jemalloc/internal/mutex_pool.h"
13 /******************************************************************************/
16 rtree_t extents_rtree;
17 /* Keyed by the address of the extent_t being protected. */
18 mutex_pool_t extent_mutex_pool;
20 size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
22 static const bitmap_info_t extents_bitmap_info =
23 BITMAP_INFO_INITIALIZER(NPSIZES+1);
25 static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr,
26 size_t size, size_t alignment, bool *zero, bool *commit,
28 static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr,
29 size_t size, bool committed, unsigned arena_ind);
30 static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr,
31 size_t size, bool committed, unsigned arena_ind);
32 static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr,
33 size_t size, size_t offset, size_t length, unsigned arena_ind);
34 static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
35 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
36 size_t length, bool growing_retained);
37 static bool extent_decommit_default(extent_hooks_t *extent_hooks,
38 void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
39 #ifdef PAGES_CAN_PURGE_LAZY
40 static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr,
41 size_t size, size_t offset, size_t length, unsigned arena_ind);
43 static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
44 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
45 size_t length, bool growing_retained);
46 #ifdef PAGES_CAN_PURGE_FORCED
47 static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
48 void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
50 static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
51 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
52 size_t length, bool growing_retained);
53 #ifdef JEMALLOC_MAPS_COALESCE
54 static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
55 size_t size, size_t size_a, size_t size_b, bool committed,
58 static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
59 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
60 szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
61 bool growing_retained);
62 #ifdef JEMALLOC_MAPS_COALESCE
63 static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
64 size_t size_a, void *addr_b, size_t size_b, bool committed,
67 static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
68 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
69 bool growing_retained);
71 const extent_hooks_t extent_hooks_default = {
73 extent_dalloc_default,
74 extent_destroy_default,
75 extent_commit_default,
76 extent_decommit_default
77 #ifdef PAGES_CAN_PURGE_LAZY
79 extent_purge_lazy_default
84 #ifdef PAGES_CAN_PURGE_FORCED
86 extent_purge_forced_default
91 #ifdef JEMALLOC_MAPS_COALESCE
98 /* Used exclusively for gdump triggering. */
99 static atomic_zu_t curpages;
100 static atomic_zu_t highpages;
102 /******************************************************************************/
104 * Function prototypes for static functions that are referenced prior to
108 static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
109 static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena,
110 extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
111 size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind,
112 bool *zero, bool *commit, bool growing_retained);
113 static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
114 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
115 extent_t *extent, bool *coalesced, bool growing_retained);
116 static void extent_record(tsdn_t *tsdn, arena_t *arena,
117 extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent,
118 bool growing_retained);
120 /******************************************************************************/
122 ph_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, ph_link,
128 lock_result_no_extent
132 extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
134 extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
137 if (extent1 == NULL) {
138 return lock_result_no_extent;
141 * It's possible that the extent changed out from under us, and with it
142 * the leaf->extent mapping. We have to recheck while holding the lock.
144 extent_lock(tsdn, extent1);
145 extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn,
146 &extents_rtree, elm, true);
148 if (extent1 == extent2) {
150 return lock_result_success;
152 extent_unlock(tsdn, extent1);
153 return lock_result_failure;
158 * Returns a pool-locked extent_t * if there's one associated with the given
159 * address, and NULL otherwise.
162 extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr) {
163 extent_t *ret = NULL;
164 rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
165 rtree_ctx, (uintptr_t)addr, false, false);
169 lock_result_t lock_result;
171 lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret);
172 } while (lock_result == lock_result_failure);
177 extent_alloc(tsdn_t *tsdn, arena_t *arena) {
178 malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
179 extent_t *extent = extent_avail_first(&arena->extent_avail);
180 if (extent == NULL) {
181 malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
182 return base_alloc_extent(tsdn, arena->base);
184 extent_avail_remove(&arena->extent_avail, extent);
185 malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
190 extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
191 malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
192 extent_avail_insert(&arena->extent_avail, extent);
193 malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
197 extent_hooks_get(arena_t *arena) {
198 return base_extent_hooks_get(arena->base);
202 extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) {
203 background_thread_info_t *info;
204 if (have_background_thread) {
205 info = arena_background_thread_info_get(arena);
206 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
208 extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
209 if (have_background_thread) {
210 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
217 extent_hooks_assure_initialized(arena_t *arena,
218 extent_hooks_t **r_extent_hooks) {
219 if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) {
220 *r_extent_hooks = extent_hooks_get(arena);
228 extent_size_quantize_floor(size_t size) {
233 assert((size & PAGE_MASK) == 0);
235 pind = sz_psz2ind(size - sz_large_pad + 1);
238 * Avoid underflow. This short-circuit would also do the right
239 * thing for all sizes in the range for which there are
240 * PAGE-spaced size classes, but it's simplest to just handle
241 * the one case that would cause erroneous results.
245 ret = sz_pind2sz(pind - 1) + sz_large_pad;
254 extent_size_quantize_ceil(size_t size) {
258 assert(size - sz_large_pad <= LARGE_MAXCLASS);
259 assert((size & PAGE_MASK) == 0);
261 ret = extent_size_quantize_floor(size);
264 * Skip a quantization that may have an adequately large extent,
265 * because under-sized extents may be mixed in. This only
266 * happens when an unusual size is requested, i.e. for aligned
267 * allocation, and is just one of several places where linear
268 * search would potentially find sufficiently aligned available
269 * memory somewhere lower.
271 ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
277 /* Generate pairing heap functions. */
278 ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
281 extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
282 bool delay_coalesce) {
283 if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS,
284 malloc_mutex_rank_exclusive)) {
287 for (unsigned i = 0; i < NPSIZES+1; i++) {
288 extent_heap_new(&extents->heaps[i]);
290 bitmap_init(extents->bitmap, &extents_bitmap_info, true);
291 extent_list_init(&extents->lru);
292 atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED);
293 extents->state = state;
294 extents->delay_coalesce = delay_coalesce;
299 extents_state_get(const extents_t *extents) {
300 return extents->state;
304 extents_npages_get(extents_t *extents) {
305 return atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
309 extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
310 malloc_mutex_assert_owner(tsdn, &extents->mtx);
311 assert(extent_state_get(extent) == extents->state);
313 size_t size = extent_size_get(extent);
314 size_t psz = extent_size_quantize_floor(size);
315 pszind_t pind = sz_psz2ind(psz);
316 if (extent_heap_empty(&extents->heaps[pind])) {
317 bitmap_unset(extents->bitmap, &extents_bitmap_info,
320 extent_heap_insert(&extents->heaps[pind], extent);
321 extent_list_append(&extents->lru, extent);
322 size_t npages = size >> LG_PAGE;
324 * All modifications to npages hold the mutex (as asserted above), so we
325 * don't need an atomic fetch-add; we can get by with a load followed by
328 size_t cur_extents_npages =
329 atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
330 atomic_store_zu(&extents->npages, cur_extents_npages + npages,
335 extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
336 malloc_mutex_assert_owner(tsdn, &extents->mtx);
337 assert(extent_state_get(extent) == extents->state);
339 size_t size = extent_size_get(extent);
340 size_t psz = extent_size_quantize_floor(size);
341 pszind_t pind = sz_psz2ind(psz);
342 extent_heap_remove(&extents->heaps[pind], extent);
343 if (extent_heap_empty(&extents->heaps[pind])) {
344 bitmap_set(extents->bitmap, &extents_bitmap_info,
347 extent_list_remove(&extents->lru, extent);
348 size_t npages = size >> LG_PAGE;
350 * As in extents_insert_locked, we hold extents->mtx and so don't need
351 * atomic operations for updating extents->npages.
353 size_t cur_extents_npages =
354 atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
355 assert(cur_extents_npages >= npages);
356 atomic_store_zu(&extents->npages,
357 cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
361 * Find an extent with size [min_size, max_size) to satisfy the alignment
362 * requirement. For each size, try only the first extent in the heap.
365 extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size,
367 pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size));
368 pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size));
370 for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
371 &extents_bitmap_info, (size_t)pind); i < pind_max; i =
372 (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
375 assert(!extent_heap_empty(&extents->heaps[i]));
376 extent_t *extent = extent_heap_first(&extents->heaps[i]);
377 uintptr_t base = (uintptr_t)extent_base_get(extent);
378 size_t candidate_size = extent_size_get(extent);
379 assert(candidate_size >= min_size);
381 uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
382 PAGE_CEILING(alignment));
383 if (base > next_align || base + candidate_size <= next_align) {
384 /* Overflow or not crossing the next alignment. */
388 size_t leadsize = next_align - base;
389 if (candidate_size - leadsize >= min_size) {
397 /* Do any-best-fit extent selection, i.e. select any extent that best fits. */
399 extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
401 pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
402 pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
406 * In order to reduce fragmentation, avoid reusing and splitting
407 * large extents for much smaller sizes.
409 if ((sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
412 assert(!extent_heap_empty(&extents->heaps[i]));
413 extent_t *extent = extent_heap_first(&extents->heaps[i]);
414 assert(extent_size_get(extent) >= size);
422 * Do first-fit extent selection, i.e. select the oldest/lowest extent that is
426 extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
428 extent_t *ret = NULL;
430 pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
431 for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
432 &extents_bitmap_info, (size_t)pind); i < NPSIZES+1; i =
433 (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
435 assert(!extent_heap_empty(&extents->heaps[i]));
436 extent_t *extent = extent_heap_first(&extents->heaps[i]);
437 assert(extent_size_get(extent) >= size);
438 if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
451 * Do {best,first}-fit extent selection, where the selection policy choice is
452 * based on extents->delay_coalesce. Best-fit selection requires less
453 * searching, but its layout policy is less stable and may cause higher virtual
454 * memory fragmentation as a side effect.
457 extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
458 size_t esize, size_t alignment) {
459 malloc_mutex_assert_owner(tsdn, &extents->mtx);
461 size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
462 /* Beware size_t wrap-around. */
463 if (max_size < esize) {
467 extent_t *extent = extents->delay_coalesce ?
468 extents_best_fit_locked(tsdn, arena, extents, max_size) :
469 extents_first_fit_locked(tsdn, arena, extents, max_size);
471 if (alignment > PAGE && extent == NULL) {
473 * max_size guarantees the alignment requirement but is rather
474 * pessimistic. Next we try to satisfy the aligned allocation
475 * with sizes in [esize, max_size).
477 extent = extents_fit_alignment(extents, esize, max_size,
485 extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
486 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
488 extent_state_set(extent, extent_state_active);
490 extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx,
491 extents, extent, &coalesced, false);
492 extent_state_set(extent, extents_state_get(extents));
497 extents_insert_locked(tsdn, extents, extent);
502 extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
503 extents_t *extents, void *new_addr, size_t size, size_t pad,
504 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
505 assert(size + pad != 0);
506 assert(alignment != 0);
507 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
508 WITNESS_RANK_CORE, 0);
510 extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents,
511 new_addr, size, pad, alignment, slab, szind, zero, commit, false);
512 assert(extent == NULL || extent_dumpable_get(extent));
517 extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
518 extents_t *extents, extent_t *extent) {
519 assert(extent_base_get(extent) != NULL);
520 assert(extent_size_get(extent) != 0);
521 assert(extent_dumpable_get(extent));
522 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
523 WITNESS_RANK_CORE, 0);
525 extent_addr_set(extent, extent_base_get(extent));
526 extent_zeroed_set(extent, false);
528 extent_record(tsdn, arena, r_extent_hooks, extents, extent, false);
532 extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
533 extents_t *extents, size_t npages_min) {
534 rtree_ctx_t rtree_ctx_fallback;
535 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
537 malloc_mutex_lock(tsdn, &extents->mtx);
540 * Get the LRU coalesced extent, if any. If coalescing was delayed,
541 * the loop will iterate until the LRU extent is fully coalesced.
545 /* Get the LRU extent, if any. */
546 extent = extent_list_first(&extents->lru);
547 if (extent == NULL) {
550 /* Check the eviction limit. */
551 size_t extents_npages = atomic_load_zu(&extents->npages,
553 if (extents_npages <= npages_min) {
557 extents_remove_locked(tsdn, extents, extent);
558 if (!extents->delay_coalesce) {
561 /* Try to coalesce. */
562 if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks,
563 rtree_ctx, extents, extent)) {
567 * The LRU extent was just coalesced and the result placed in
568 * the LRU at its neighbor's position. Start over.
573 * Either mark the extent active or deregister it to protect against
574 * concurrent operations.
576 switch (extents_state_get(extents)) {
577 case extent_state_active:
579 case extent_state_dirty:
580 case extent_state_muzzy:
581 extent_state_set(extent, extent_state_active);
583 case extent_state_retained:
584 extent_deregister(tsdn, extent);
591 malloc_mutex_unlock(tsdn, &extents->mtx);
596 extents_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
597 extents_t *extents, extent_t *extent, bool growing_retained) {
599 * Leak extent after making sure its pages have already been purged, so
600 * that this is only a virtual memory leak.
602 if (extents_state_get(extents) == extent_state_dirty) {
603 if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks,
604 extent, 0, extent_size_get(extent), growing_retained)) {
605 extent_purge_forced_impl(tsdn, arena, r_extent_hooks,
606 extent, 0, extent_size_get(extent),
610 extent_dalloc(tsdn, arena, extent);
614 extents_prefork(tsdn_t *tsdn, extents_t *extents) {
615 malloc_mutex_prefork(tsdn, &extents->mtx);
619 extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) {
620 malloc_mutex_postfork_parent(tsdn, &extents->mtx);
624 extents_postfork_child(tsdn_t *tsdn, extents_t *extents) {
625 malloc_mutex_postfork_child(tsdn, &extents->mtx);
629 extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
631 assert(extent_arena_get(extent) == arena);
632 assert(extent_state_get(extent) == extent_state_active);
634 extent_state_set(extent, extents_state_get(extents));
635 extents_insert_locked(tsdn, extents, extent);
639 extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
641 malloc_mutex_lock(tsdn, &extents->mtx);
642 extent_deactivate_locked(tsdn, arena, extents, extent);
643 malloc_mutex_unlock(tsdn, &extents->mtx);
647 extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
649 assert(extent_arena_get(extent) == arena);
650 assert(extent_state_get(extent) == extents_state_get(extents));
652 extents_remove_locked(tsdn, extents, extent);
653 extent_state_set(extent, extent_state_active);
657 extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
658 const extent_t *extent, bool dependent, bool init_missing,
659 rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
660 *r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
661 (uintptr_t)extent_base_get(extent), dependent, init_missing);
662 if (!dependent && *r_elm_a == NULL) {
665 assert(*r_elm_a != NULL);
667 *r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
668 (uintptr_t)extent_last_get(extent), dependent, init_missing);
669 if (!dependent && *r_elm_b == NULL) {
672 assert(*r_elm_b != NULL);
678 extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
679 rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
680 rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab);
682 rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind,
688 extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
690 assert(extent_slab_get(extent));
692 /* Register interior. */
693 for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
694 rtree_write(tsdn, &extents_rtree, rtree_ctx,
695 (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
696 LG_PAGE), extent, szind, true);
701 extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
702 cassert(config_prof);
703 /* prof_gdump() requirement. */
704 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
705 WITNESS_RANK_CORE, 0);
707 if (opt_prof && extent_state_get(extent) == extent_state_active) {
708 size_t nadd = extent_size_get(extent) >> LG_PAGE;
709 size_t cur = atomic_fetch_add_zu(&curpages, nadd,
710 ATOMIC_RELAXED) + nadd;
711 size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
712 while (cur > high && !atomic_compare_exchange_weak_zu(
713 &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) {
715 * Don't refresh cur, because it may have decreased
716 * since this thread lost the highpages update race.
717 * Note that high is updated in case of CAS failure.
720 if (cur > high && prof_gdump_get_unlocked()) {
727 extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
728 cassert(config_prof);
730 if (opt_prof && extent_state_get(extent) == extent_state_active) {
731 size_t nsub = extent_size_get(extent) >> LG_PAGE;
732 assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
733 atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
738 extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
739 rtree_ctx_t rtree_ctx_fallback;
740 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
741 rtree_leaf_elm_t *elm_a, *elm_b;
744 * We need to hold the lock to protect against a concurrent coalesce
745 * operation that sees us in a partial state.
747 extent_lock(tsdn, extent);
749 if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
754 szind_t szind = extent_szind_get_maybe_invalid(extent);
755 bool slab = extent_slab_get(extent);
756 extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
758 extent_interior_register(tsdn, rtree_ctx, extent, szind);
761 extent_unlock(tsdn, extent);
763 if (config_prof && gdump_add) {
764 extent_gdump_add(tsdn, extent);
771 extent_register(tsdn_t *tsdn, extent_t *extent) {
772 return extent_register_impl(tsdn, extent, true);
776 extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
777 return extent_register_impl(tsdn, extent, false);
781 extent_reregister(tsdn_t *tsdn, extent_t *extent) {
782 bool err = extent_register(tsdn, extent);
787 * Removes all pointers to the given extent from the global rtree indices for
788 * its interior. This is relevant for slab extents, for which we need to do
789 * metadata lookups at places other than the head of the extent. We deregister
790 * on the interior, then, when an extent moves from being an active slab to an
794 extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
798 assert(extent_slab_get(extent));
800 for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
801 rtree_clear(tsdn, &extents_rtree, rtree_ctx,
802 (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
808 * Removes all pointers to the given extent from the global rtree.
811 extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) {
812 rtree_ctx_t rtree_ctx_fallback;
813 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
814 rtree_leaf_elm_t *elm_a, *elm_b;
815 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
818 extent_lock(tsdn, extent);
820 extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, NSIZES, false);
821 if (extent_slab_get(extent)) {
822 extent_interior_deregister(tsdn, rtree_ctx, extent);
823 extent_slab_set(extent, false);
826 extent_unlock(tsdn, extent);
828 if (config_prof && gdump) {
829 extent_gdump_sub(tsdn, extent);
834 extent_deregister(tsdn_t *tsdn, extent_t *extent) {
835 extent_deregister_impl(tsdn, extent, true);
839 extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) {
840 extent_deregister_impl(tsdn, extent, false);
844 * Tries to find and remove an extent from extents that can be used for the
845 * given allocation request.
848 extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
849 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
850 void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
851 bool growing_retained) {
852 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
853 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
854 assert(alignment > 0);
855 if (config_debug && new_addr != NULL) {
857 * Non-NULL new_addr has two use cases:
859 * 1) Recycle a known-extant extent, e.g. during purging.
860 * 2) Perform in-place expanding reallocation.
862 * Regardless of use case, new_addr must either refer to a
863 * non-existing extent, or to the base of an extant extent,
864 * since only active slabs support interior lookups (which of
865 * course cannot be recycled).
867 assert(PAGE_ADDR2BASE(new_addr) == new_addr);
869 assert(alignment <= PAGE);
872 size_t esize = size + pad;
873 malloc_mutex_lock(tsdn, &extents->mtx);
874 extent_hooks_assure_initialized(arena, r_extent_hooks);
876 if (new_addr != NULL) {
877 extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr);
878 if (extent != NULL) {
880 * We might null-out extent to report an error, but we
881 * still need to unlock the associated mutex after.
883 extent_t *unlock_extent = extent;
884 assert(extent_base_get(extent) == new_addr);
885 if (extent_arena_get(extent) != arena ||
886 extent_size_get(extent) < esize ||
887 extent_state_get(extent) !=
888 extents_state_get(extents)) {
891 extent_unlock(tsdn, unlock_extent);
894 extent = extents_fit_locked(tsdn, arena, extents, esize,
897 if (extent == NULL) {
898 malloc_mutex_unlock(tsdn, &extents->mtx);
902 extent_activate_locked(tsdn, arena, extents, extent);
903 malloc_mutex_unlock(tsdn, &extents->mtx);
909 * Given an allocation request and an extent guaranteed to be able to satisfy
910 * it, this splits off lead and trail extents, leaving extent pointing to an
911 * extent satisfying the allocation.
912 * This function doesn't put lead or trail into any extents_t; it's the caller's
913 * job to ensure that they can be reused.
917 * Split successfully. lead, extent, and trail, are modified to extents
918 * describing the ranges before, in, and after the given allocation.
920 extent_split_interior_ok,
922 * The extent can't satisfy the given allocation request. None of the
923 * input extent_t *s are touched.
925 extent_split_interior_cant_alloc,
927 * In a potentially invalid state. Must leak (if *to_leak is non-NULL),
928 * and salvage what's still salvageable (if *to_salvage is non-NULL).
929 * None of lead, extent, or trail are valid.
931 extent_split_interior_error
932 } extent_split_interior_result_t;
934 static extent_split_interior_result_t
935 extent_split_interior(tsdn_t *tsdn, arena_t *arena,
936 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx,
937 /* The result of splitting, in case of success. */
938 extent_t **extent, extent_t **lead, extent_t **trail,
939 /* The mess to clean up, in case of error. */
940 extent_t **to_leak, extent_t **to_salvage,
941 void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
942 szind_t szind, bool growing_retained) {
943 size_t esize = size + pad;
944 size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent),
945 PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent);
946 assert(new_addr == NULL || leadsize == 0);
947 if (extent_size_get(*extent) < leadsize + esize) {
948 return extent_split_interior_cant_alloc;
950 size_t trailsize = extent_size_get(*extent) - leadsize - esize;
957 /* Split the lead. */
960 *extent = extent_split_impl(tsdn, arena, r_extent_hooks,
961 *lead, leadsize, NSIZES, false, esize + trailsize, szind,
962 slab, growing_retained);
963 if (*extent == NULL) {
966 return extent_split_interior_error;
970 /* Split the trail. */
971 if (trailsize != 0) {
972 *trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent,
973 esize, szind, slab, trailsize, NSIZES, false,
975 if (*trail == NULL) {
980 return extent_split_interior_error;
984 if (leadsize == 0 && trailsize == 0) {
986 * Splitting causes szind to be set as a side effect, but no
987 * splitting occurred.
989 extent_szind_set(*extent, szind);
990 if (szind != NSIZES) {
991 rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
992 (uintptr_t)extent_addr_get(*extent), szind, slab);
993 if (slab && extent_size_get(*extent) > PAGE) {
994 rtree_szind_slab_update(tsdn, &extents_rtree,
996 (uintptr_t)extent_past_get(*extent) -
997 (uintptr_t)PAGE, szind, slab);
1002 return extent_split_interior_ok;
1006 * This fulfills the indicated allocation request out of the given extent (which
1007 * the caller should have ensured was big enough). If there's any unused space
1008 * before or after the resulting allocation, that space is given its own extent
1009 * and put back into extents.
1012 extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
1013 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1014 void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
1015 szind_t szind, extent_t *extent, bool growing_retained) {
1019 extent_t *to_salvage;
1021 extent_split_interior_result_t result = extent_split_interior(
1022 tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
1023 &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind,
1026 if (result == extent_split_interior_ok) {
1028 extent_deactivate(tsdn, arena, extents, lead);
1030 if (trail != NULL) {
1031 extent_deactivate(tsdn, arena, extents, trail);
1036 * We should have picked an extent that was large enough to
1037 * fulfill our allocation request.
1039 assert(result == extent_split_interior_error);
1040 if (to_salvage != NULL) {
1041 extent_deregister(tsdn, to_salvage);
1043 if (to_leak != NULL) {
1044 void *leak = extent_base_get(to_leak);
1045 extent_deregister_no_gdump_sub(tsdn, to_leak);
1046 extents_leak(tsdn, arena, r_extent_hooks, extents,
1047 to_leak, growing_retained);
1048 assert(extent_lock_from_addr(tsdn, rtree_ctx, leak)
1057 * Tries to satisfy the given allocation request by reusing one of the extents
1058 * in the given extents_t.
1061 extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1062 extents_t *extents, void *new_addr, size_t size, size_t pad,
1063 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit,
1064 bool growing_retained) {
1065 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1066 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1067 assert(new_addr == NULL || !slab);
1068 assert(pad == 0 || !slab);
1069 assert(!*zero || !slab);
1071 rtree_ctx_t rtree_ctx_fallback;
1072 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1074 extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
1075 rtree_ctx, extents, new_addr, size, pad, alignment, slab,
1077 if (extent == NULL) {
1081 extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
1082 extents, new_addr, size, pad, alignment, slab, szind, extent,
1084 if (extent == NULL) {
1088 if (*commit && !extent_committed_get(extent)) {
1089 if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent,
1090 0, extent_size_get(extent), growing_retained)) {
1091 extent_record(tsdn, arena, r_extent_hooks, extents,
1092 extent, growing_retained);
1095 extent_zeroed_set(extent, true);
1098 if (extent_committed_get(extent)) {
1101 if (extent_zeroed_get(extent)) {
1106 extent_addr_randomize(tsdn, extent, alignment);
1108 assert(extent_state_get(extent) == extent_state_active);
1110 extent_slab_set(extent, slab);
1111 extent_interior_register(tsdn, rtree_ctx, extent, szind);
1115 void *addr = extent_base_get(extent);
1116 if (!extent_zeroed_get(extent)) {
1117 size_t size = extent_size_get(extent);
1118 if (pages_purge_forced(addr, size)) {
1119 memset(addr, 0, size);
1121 } else if (config_debug) {
1122 size_t *p = (size_t *)(uintptr_t)addr;
1123 /* Check the first page only. */
1124 for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
1133 * If the caller specifies (!*zero), it is still possible to receive zeroed
1134 * memory, in which case *zero is toggled to true. arena_extent_alloc() takes
1135 * advantage of this to avoid demanding zeroed extents, but taking advantage of
1136 * them if they are returned.
1139 extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
1140 size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
1144 assert(alignment != 0);
1146 /* "primary" dss. */
1147 if (have_dss && dss_prec == dss_prec_primary && (ret =
1148 extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
1153 if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
1157 /* "secondary" dss. */
1158 if (have_dss && dss_prec == dss_prec_secondary && (ret =
1159 extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
1164 /* All strategies for allocation failed. */
1169 extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
1170 size_t size, size_t alignment, bool *zero, bool *commit) {
1171 void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
1172 commit, (dss_prec_t)atomic_load_u(&arena->dss_prec,
1174 if (have_madvise_huge && ret) {
1175 pages_set_thp_state(ret, size);
1181 extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
1182 size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
1186 tsdn = tsdn_fetch();
1187 arena = arena_get(tsdn, arena_ind, false);
1189 * The arena we're allocating on behalf of must have been initialized
1192 assert(arena != NULL);
1194 return extent_alloc_default_impl(tsdn, arena, new_addr, size,
1195 alignment, zero, commit);
1199 extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) {
1200 tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
1201 if (arena == arena_get(tsd_tsdn(tsd), 0, false)) {
1203 * The only legitimate case of customized extent hooks for a0 is
1204 * hooks with no allocation activities. One such example is to
1205 * place metadata on pre-allocated resources such as huge pages.
1206 * In that case, rely on reentrancy_level checks to catch
1207 * infinite recursions.
1209 pre_reentrancy(tsd, NULL);
1211 pre_reentrancy(tsd, arena);
1216 extent_hook_post_reentrancy(tsdn_t *tsdn) {
1217 tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
1218 post_reentrancy(tsd);
1222 * If virtual memory is retained, create increasingly larger extents from which
1223 * to split requested extents in order to limit the total number of disjoint
1224 * virtual memory ranges retained by each arena.
1227 extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
1228 extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment,
1229 bool slab, szind_t szind, bool *zero, bool *commit) {
1230 malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx);
1231 assert(pad == 0 || !slab);
1232 assert(!*zero || !slab);
1234 size_t esize = size + pad;
1235 size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE;
1236 /* Beware size_t wrap-around. */
1237 if (alloc_size_min < esize) {
1241 * Find the next extent size in the series that would be large enough to
1242 * satisfy this request.
1244 pszind_t egn_skip = 0;
1245 size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1246 while (alloc_size < alloc_size_min) {
1248 if (arena->extent_grow_next + egn_skip == NPSIZES) {
1249 /* Outside legal range. */
1252 assert(arena->extent_grow_next + egn_skip < NPSIZES);
1253 alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1256 extent_t *extent = extent_alloc(tsdn, arena);
1257 if (extent == NULL) {
1260 bool zeroed = false;
1261 bool committed = false;
1264 if (*r_extent_hooks == &extent_hooks_default) {
1265 ptr = extent_alloc_default_impl(tsdn, arena, NULL,
1266 alloc_size, PAGE, &zeroed, &committed);
1268 extent_hook_pre_reentrancy(tsdn, arena);
1269 ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL,
1270 alloc_size, PAGE, &zeroed, &committed,
1271 arena_ind_get(arena));
1272 extent_hook_post_reentrancy(tsdn);
1275 extent_init(extent, arena, ptr, alloc_size, false, NSIZES,
1276 arena_extent_sn_next(arena), extent_state_active, zeroed,
1279 extent_dalloc(tsdn, arena, extent);
1283 if (extent_register_no_gdump_add(tsdn, extent)) {
1284 extents_leak(tsdn, arena, r_extent_hooks,
1285 &arena->extents_retained, extent, true);
1289 if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
1292 if (extent_committed_get(extent)) {
1296 rtree_ctx_t rtree_ctx_fallback;
1297 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1302 extent_t *to_salvage;
1303 extent_split_interior_result_t result = extent_split_interior(
1304 tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
1305 &to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind,
1308 if (result == extent_split_interior_ok) {
1310 extent_record(tsdn, arena, r_extent_hooks,
1311 &arena->extents_retained, lead, true);
1313 if (trail != NULL) {
1314 extent_record(tsdn, arena, r_extent_hooks,
1315 &arena->extents_retained, trail, true);
1319 * We should have allocated a sufficiently large extent; the
1320 * cant_alloc case should not occur.
1322 assert(result == extent_split_interior_error);
1323 if (to_salvage != NULL) {
1325 extent_gdump_add(tsdn, to_salvage);
1327 extent_record(tsdn, arena, r_extent_hooks,
1328 &arena->extents_retained, to_salvage, true);
1330 if (to_leak != NULL) {
1331 extent_deregister_no_gdump_sub(tsdn, to_leak);
1332 extents_leak(tsdn, arena, r_extent_hooks,
1333 &arena->extents_retained, to_leak, true);
1338 if (*commit && !extent_committed_get(extent)) {
1339 if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0,
1340 extent_size_get(extent), true)) {
1341 extent_record(tsdn, arena, r_extent_hooks,
1342 &arena->extents_retained, extent, true);
1345 extent_zeroed_set(extent, true);
1349 * Increment extent_grow_next if doing so wouldn't exceed the allowed
1352 if (arena->extent_grow_next + egn_skip + 1 <=
1353 arena->retain_grow_limit) {
1354 arena->extent_grow_next += egn_skip + 1;
1356 arena->extent_grow_next = arena->retain_grow_limit;
1358 /* All opportunities for failure are past. */
1359 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1362 /* Adjust gdump stats now that extent is final size. */
1363 extent_gdump_add(tsdn, extent);
1366 extent_addr_randomize(tsdn, extent, alignment);
1369 rtree_ctx_t rtree_ctx_fallback;
1370 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
1371 &rtree_ctx_fallback);
1373 extent_slab_set(extent, true);
1374 extent_interior_register(tsdn, rtree_ctx, extent, szind);
1376 if (*zero && !extent_zeroed_get(extent)) {
1377 void *addr = extent_base_get(extent);
1378 size_t size = extent_size_get(extent);
1379 if (pages_purge_forced(addr, size)) {
1380 memset(addr, 0, size);
1386 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1391 extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
1392 extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1393 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1395 assert(alignment != 0);
1397 malloc_mutex_lock(tsdn, &arena->extent_grow_mtx);
1399 extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks,
1400 &arena->extents_retained, new_addr, size, pad, alignment, slab,
1401 szind, zero, commit, true);
1402 if (extent != NULL) {
1403 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1405 extent_gdump_add(tsdn, extent);
1407 } else if (opt_retain && new_addr == NULL) {
1408 extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size,
1409 pad, alignment, slab, szind, zero, commit);
1410 /* extent_grow_retained() always releases extent_grow_mtx. */
1412 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1414 malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx);
1420 extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
1421 extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1422 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1423 size_t esize = size + pad;
1424 extent_t *extent = extent_alloc(tsdn, arena);
1425 if (extent == NULL) {
1429 if (*r_extent_hooks == &extent_hooks_default) {
1430 /* Call directly to propagate tsdn. */
1431 addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
1432 alignment, zero, commit);
1434 extent_hook_pre_reentrancy(tsdn, arena);
1435 addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
1436 esize, alignment, zero, commit, arena_ind_get(arena));
1437 extent_hook_post_reentrancy(tsdn);
1440 extent_dalloc(tsdn, arena, extent);
1443 extent_init(extent, arena, addr, esize, slab, szind,
1444 arena_extent_sn_next(arena), extent_state_active, *zero, *commit,
1447 extent_addr_randomize(tsdn, extent, alignment);
1449 if (extent_register(tsdn, extent)) {
1450 extents_leak(tsdn, arena, r_extent_hooks,
1451 &arena->extents_retained, extent, false);
1459 extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1460 extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1461 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1462 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1463 WITNESS_RANK_CORE, 0);
1465 extent_hooks_assure_initialized(arena, r_extent_hooks);
1467 extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
1468 new_addr, size, pad, alignment, slab, szind, zero, commit);
1469 if (extent == NULL) {
1470 if (opt_retain && new_addr != NULL) {
1472 * When retain is enabled and new_addr is set, we do not
1473 * attempt extent_alloc_wrapper_hard which does mmap
1474 * that is very unlikely to succeed (unless it happens
1475 * to be at the end).
1479 extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
1480 new_addr, size, pad, alignment, slab, szind, zero, commit);
1483 assert(extent == NULL || extent_dumpable_get(extent));
1488 extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner,
1489 const extent_t *outer) {
1490 assert(extent_arena_get(inner) == arena);
1491 if (extent_arena_get(outer) != arena) {
1495 assert(extent_state_get(inner) == extent_state_active);
1496 if (extent_state_get(outer) != extents->state) {
1500 if (extent_committed_get(inner) != extent_committed_get(outer)) {
1508 extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1509 extents_t *extents, extent_t *inner, extent_t *outer, bool forward,
1510 bool growing_retained) {
1511 assert(extent_can_coalesce(arena, extents, inner, outer));
1513 extent_activate_locked(tsdn, arena, extents, outer);
1515 malloc_mutex_unlock(tsdn, &extents->mtx);
1516 bool err = extent_merge_impl(tsdn, arena, r_extent_hooks,
1517 forward ? inner : outer, forward ? outer : inner, growing_retained);
1518 malloc_mutex_lock(tsdn, &extents->mtx);
1521 extent_deactivate_locked(tsdn, arena, extents, outer);
1528 extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
1529 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1530 extent_t *extent, bool *coalesced, bool growing_retained) {
1532 * Continue attempting to coalesce until failure, to protect against
1533 * races with other threads that are thwarted by this one.
1539 /* Try to coalesce forward. */
1540 extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx,
1541 extent_past_get(extent));
1544 * extents->mtx only protects against races for
1545 * like-state extents, so call extent_can_coalesce()
1546 * before releasing next's pool lock.
1548 bool can_coalesce = extent_can_coalesce(arena, extents,
1551 extent_unlock(tsdn, next);
1553 if (can_coalesce && !extent_coalesce(tsdn, arena,
1554 r_extent_hooks, extents, extent, next, true,
1555 growing_retained)) {
1556 if (extents->delay_coalesce) {
1557 /* Do minimal coalescing. */
1565 /* Try to coalesce backward. */
1566 extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx,
1567 extent_before_get(extent));
1569 bool can_coalesce = extent_can_coalesce(arena, extents,
1571 extent_unlock(tsdn, prev);
1573 if (can_coalesce && !extent_coalesce(tsdn, arena,
1574 r_extent_hooks, extents, extent, prev, false,
1575 growing_retained)) {
1577 if (extents->delay_coalesce) {
1578 /* Do minimal coalescing. */
1587 if (extents->delay_coalesce) {
1594 * Does the metadata management portions of putting an unused extent into the
1595 * given extents_t (coalesces, deregisters slab interiors, the heap operations).
1598 extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1599 extents_t *extents, extent_t *extent, bool growing_retained) {
1600 rtree_ctx_t rtree_ctx_fallback;
1601 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1603 assert((extents_state_get(extents) != extent_state_dirty &&
1604 extents_state_get(extents) != extent_state_muzzy) ||
1605 !extent_zeroed_get(extent));
1607 malloc_mutex_lock(tsdn, &extents->mtx);
1608 extent_hooks_assure_initialized(arena, r_extent_hooks);
1610 extent_szind_set(extent, NSIZES);
1611 if (extent_slab_get(extent)) {
1612 extent_interior_deregister(tsdn, rtree_ctx, extent);
1613 extent_slab_set(extent, false);
1616 assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
1617 (uintptr_t)extent_base_get(extent), true) == extent);
1619 if (!extents->delay_coalesce) {
1620 extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
1621 rtree_ctx, extents, extent, NULL, growing_retained);
1622 } else if (extent_size_get(extent) >= LARGE_MINCLASS) {
1623 /* Always coalesce large extents eagerly. */
1627 prev_size = extent_size_get(extent);
1628 assert(extent_state_get(extent) == extent_state_active);
1629 extent = extent_try_coalesce(tsdn, arena,
1630 r_extent_hooks, rtree_ctx, extents, extent,
1631 &coalesced, growing_retained);
1632 } while (coalesced &&
1633 extent_size_get(extent) >= prev_size + LARGE_MINCLASS);
1635 extent_deactivate_locked(tsdn, arena, extents, extent);
1637 malloc_mutex_unlock(tsdn, &extents->mtx);
1641 extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
1642 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
1644 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1645 WITNESS_RANK_CORE, 0);
1647 if (extent_register(tsdn, extent)) {
1648 extents_leak(tsdn, arena, &extent_hooks,
1649 &arena->extents_retained, extent, false);
1652 extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
1656 extent_dalloc_default_impl(void *addr, size_t size) {
1657 if (!have_dss || !extent_in_dss(addr)) {
1658 return extent_dalloc_mmap(addr, size);
1664 extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1665 bool committed, unsigned arena_ind) {
1666 return extent_dalloc_default_impl(addr, size);
1670 extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
1671 extent_hooks_t **r_extent_hooks, extent_t *extent) {
1674 assert(extent_base_get(extent) != NULL);
1675 assert(extent_size_get(extent) != 0);
1676 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1677 WITNESS_RANK_CORE, 0);
1679 extent_addr_set(extent, extent_base_get(extent));
1681 extent_hooks_assure_initialized(arena, r_extent_hooks);
1682 /* Try to deallocate. */
1683 if (*r_extent_hooks == &extent_hooks_default) {
1684 /* Call directly to propagate tsdn. */
1685 err = extent_dalloc_default_impl(extent_base_get(extent),
1686 extent_size_get(extent));
1688 extent_hook_pre_reentrancy(tsdn, arena);
1689 err = ((*r_extent_hooks)->dalloc == NULL ||
1690 (*r_extent_hooks)->dalloc(*r_extent_hooks,
1691 extent_base_get(extent), extent_size_get(extent),
1692 extent_committed_get(extent), arena_ind_get(arena)));
1693 extent_hook_post_reentrancy(tsdn);
1697 extent_dalloc(tsdn, arena, extent);
1704 extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1705 extent_hooks_t **r_extent_hooks, extent_t *extent) {
1706 assert(extent_dumpable_get(extent));
1707 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1708 WITNESS_RANK_CORE, 0);
1711 * Deregister first to avoid a race with other allocating threads, and
1712 * reregister if deallocation fails.
1714 extent_deregister(tsdn, extent);
1715 if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent)) {
1719 extent_reregister(tsdn, extent);
1720 if (*r_extent_hooks != &extent_hooks_default) {
1721 extent_hook_pre_reentrancy(tsdn, arena);
1723 /* Try to decommit; purge if that fails. */
1725 if (!extent_committed_get(extent)) {
1727 } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
1728 0, extent_size_get(extent))) {
1730 } else if ((*r_extent_hooks)->purge_forced != NULL &&
1731 !(*r_extent_hooks)->purge_forced(*r_extent_hooks,
1732 extent_base_get(extent), extent_size_get(extent), 0,
1733 extent_size_get(extent), arena_ind_get(arena))) {
1735 } else if (extent_state_get(extent) == extent_state_muzzy ||
1736 ((*r_extent_hooks)->purge_lazy != NULL &&
1737 !(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
1738 extent_base_get(extent), extent_size_get(extent), 0,
1739 extent_size_get(extent), arena_ind_get(arena)))) {
1744 if (*r_extent_hooks != &extent_hooks_default) {
1745 extent_hook_post_reentrancy(tsdn);
1747 extent_zeroed_set(extent, zeroed);
1750 extent_gdump_sub(tsdn, extent);
1753 extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained,
1758 extent_destroy_default_impl(void *addr, size_t size) {
1759 if (!have_dss || !extent_in_dss(addr)) {
1760 pages_unmap(addr, size);
1765 extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1766 bool committed, unsigned arena_ind) {
1767 extent_destroy_default_impl(addr, size);
1771 extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
1772 extent_hooks_t **r_extent_hooks, extent_t *extent) {
1773 assert(extent_base_get(extent) != NULL);
1774 assert(extent_size_get(extent) != 0);
1775 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1776 WITNESS_RANK_CORE, 0);
1778 /* Deregister first to avoid a race with other allocating threads. */
1779 extent_deregister(tsdn, extent);
1781 extent_addr_set(extent, extent_base_get(extent));
1783 extent_hooks_assure_initialized(arena, r_extent_hooks);
1784 /* Try to destroy; silently fail otherwise. */
1785 if (*r_extent_hooks == &extent_hooks_default) {
1786 /* Call directly to propagate tsdn. */
1787 extent_destroy_default_impl(extent_base_get(extent),
1788 extent_size_get(extent));
1789 } else if ((*r_extent_hooks)->destroy != NULL) {
1790 extent_hook_pre_reentrancy(tsdn, arena);
1791 (*r_extent_hooks)->destroy(*r_extent_hooks,
1792 extent_base_get(extent), extent_size_get(extent),
1793 extent_committed_get(extent), arena_ind_get(arena));
1794 extent_hook_post_reentrancy(tsdn);
1797 extent_dalloc(tsdn, arena, extent);
1801 extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1802 size_t offset, size_t length, unsigned arena_ind) {
1803 return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
1808 extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
1809 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1810 size_t length, bool growing_retained) {
1811 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1812 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1814 extent_hooks_assure_initialized(arena, r_extent_hooks);
1815 if (*r_extent_hooks != &extent_hooks_default) {
1816 extent_hook_pre_reentrancy(tsdn, arena);
1818 bool err = ((*r_extent_hooks)->commit == NULL ||
1819 (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
1820 extent_size_get(extent), offset, length, arena_ind_get(arena)));
1821 if (*r_extent_hooks != &extent_hooks_default) {
1822 extent_hook_post_reentrancy(tsdn);
1824 extent_committed_set(extent, extent_committed_get(extent) || !err);
1829 extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
1830 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1832 return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset,
1837 extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1838 size_t offset, size_t length, unsigned arena_ind) {
1839 return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
1844 extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
1845 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1847 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1848 WITNESS_RANK_CORE, 0);
1850 extent_hooks_assure_initialized(arena, r_extent_hooks);
1852 if (*r_extent_hooks != &extent_hooks_default) {
1853 extent_hook_pre_reentrancy(tsdn, arena);
1855 bool err = ((*r_extent_hooks)->decommit == NULL ||
1856 (*r_extent_hooks)->decommit(*r_extent_hooks,
1857 extent_base_get(extent), extent_size_get(extent), offset, length,
1858 arena_ind_get(arena)));
1859 if (*r_extent_hooks != &extent_hooks_default) {
1860 extent_hook_post_reentrancy(tsdn);
1862 extent_committed_set(extent, extent_committed_get(extent) && err);
1866 #ifdef PAGES_CAN_PURGE_LAZY
1868 extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1869 size_t offset, size_t length, unsigned arena_ind) {
1870 assert(addr != NULL);
1871 assert((offset & PAGE_MASK) == 0);
1872 assert(length != 0);
1873 assert((length & PAGE_MASK) == 0);
1875 return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
1881 extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
1882 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1883 size_t length, bool growing_retained) {
1884 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1885 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1887 extent_hooks_assure_initialized(arena, r_extent_hooks);
1889 if ((*r_extent_hooks)->purge_lazy == NULL) {
1892 if (*r_extent_hooks != &extent_hooks_default) {
1893 extent_hook_pre_reentrancy(tsdn, arena);
1895 bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks,
1896 extent_base_get(extent), extent_size_get(extent), offset, length,
1897 arena_ind_get(arena));
1898 if (*r_extent_hooks != &extent_hooks_default) {
1899 extent_hook_post_reentrancy(tsdn);
1906 extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
1907 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1909 return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent,
1910 offset, length, false);
1913 #ifdef PAGES_CAN_PURGE_FORCED
1915 extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
1916 size_t size, size_t offset, size_t length, unsigned arena_ind) {
1917 assert(addr != NULL);
1918 assert((offset & PAGE_MASK) == 0);
1919 assert(length != 0);
1920 assert((length & PAGE_MASK) == 0);
1922 return pages_purge_forced((void *)((uintptr_t)addr +
1923 (uintptr_t)offset), length);
1928 extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
1929 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1930 size_t length, bool growing_retained) {
1931 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1932 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1934 extent_hooks_assure_initialized(arena, r_extent_hooks);
1936 if ((*r_extent_hooks)->purge_forced == NULL) {
1939 if (*r_extent_hooks != &extent_hooks_default) {
1940 extent_hook_pre_reentrancy(tsdn, arena);
1942 bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks,
1943 extent_base_get(extent), extent_size_get(extent), offset, length,
1944 arena_ind_get(arena));
1945 if (*r_extent_hooks != &extent_hooks_default) {
1946 extent_hook_post_reentrancy(tsdn);
1952 extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
1953 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1955 return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent,
1956 offset, length, false);
1959 #ifdef JEMALLOC_MAPS_COALESCE
1961 extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1962 size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
1963 return !maps_coalesce;
1968 * Accepts the extent to split, and the characteristics of each side of the
1969 * split. The 'a' parameters go with the 'lead' of the resulting pair of
1970 * extents (the lower addressed portion of the split), and the 'b' parameters go
1971 * with the trail (the higher addressed portion). This makes 'extent' the lead,
1972 * and returns the trail (except in case of error).
1975 extent_split_impl(tsdn_t *tsdn, arena_t *arena,
1976 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
1977 szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
1978 bool growing_retained) {
1979 assert(extent_size_get(extent) == size_a + size_b);
1980 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1981 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1983 extent_hooks_assure_initialized(arena, r_extent_hooks);
1985 if ((*r_extent_hooks)->split == NULL) {
1989 extent_t *trail = extent_alloc(tsdn, arena);
1990 if (trail == NULL) {
1994 extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
1995 size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
1996 extent_state_get(extent), extent_zeroed_get(extent),
1997 extent_committed_get(extent), extent_dumpable_get(extent));
1999 rtree_ctx_t rtree_ctx_fallback;
2000 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
2001 rtree_leaf_elm_t *lead_elm_a, *lead_elm_b;
2005 extent_init(&lead, arena, extent_addr_get(extent), size_a,
2006 slab_a, szind_a, extent_sn_get(extent),
2007 extent_state_get(extent), extent_zeroed_get(extent),
2008 extent_committed_get(extent), extent_dumpable_get(extent));
2010 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
2011 true, &lead_elm_a, &lead_elm_b);
2013 rtree_leaf_elm_t *trail_elm_a, *trail_elm_b;
2014 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true,
2015 &trail_elm_a, &trail_elm_b);
2017 if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL
2018 || trail_elm_b == NULL) {
2022 extent_lock2(tsdn, extent, trail);
2024 if (*r_extent_hooks != &extent_hooks_default) {
2025 extent_hook_pre_reentrancy(tsdn, arena);
2027 bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
2028 size_a + size_b, size_a, size_b, extent_committed_get(extent),
2029 arena_ind_get(arena));
2030 if (*r_extent_hooks != &extent_hooks_default) {
2031 extent_hook_post_reentrancy(tsdn);
2037 extent_size_set(extent, size_a);
2038 extent_szind_set(extent, szind_a);
2040 extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
2042 extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
2045 extent_unlock2(tsdn, extent, trail);
2049 extent_unlock2(tsdn, extent, trail);
2051 extent_dalloc(tsdn, arena, trail);
2057 extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
2058 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
2059 szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
2060 return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a,
2061 szind_a, slab_a, size_b, szind_b, slab_b, false);
2065 extent_merge_default_impl(void *addr_a, void *addr_b) {
2066 if (!maps_coalesce) {
2069 if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
2076 #ifdef JEMALLOC_MAPS_COALESCE
2078 extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
2079 void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
2080 return extent_merge_default_impl(addr_a, addr_b);
2085 extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
2086 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
2087 bool growing_retained) {
2088 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
2089 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
2091 extent_hooks_assure_initialized(arena, r_extent_hooks);
2093 if ((*r_extent_hooks)->merge == NULL) {
2098 if (*r_extent_hooks == &extent_hooks_default) {
2099 /* Call directly to propagate tsdn. */
2100 err = extent_merge_default_impl(extent_base_get(a),
2101 extent_base_get(b));
2103 extent_hook_pre_reentrancy(tsdn, arena);
2104 err = (*r_extent_hooks)->merge(*r_extent_hooks,
2105 extent_base_get(a), extent_size_get(a), extent_base_get(b),
2106 extent_size_get(b), extent_committed_get(a),
2107 arena_ind_get(arena));
2108 extent_hook_post_reentrancy(tsdn);
2116 * The rtree writes must happen while all the relevant elements are
2117 * owned, so the following code uses decomposed helper functions rather
2118 * than extent_{,de}register() to do things in the right order.
2120 rtree_ctx_t rtree_ctx_fallback;
2121 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
2122 rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
2123 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a,
2125 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a,
2128 extent_lock2(tsdn, a, b);
2130 if (a_elm_b != NULL) {
2131 rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
2134 if (b_elm_b != NULL) {
2135 rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
2141 extent_size_set(a, extent_size_get(a) + extent_size_get(b));
2142 extent_szind_set(a, NSIZES);
2143 extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
2144 extent_sn_get(a) : extent_sn_get(b));
2145 extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
2147 extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, NSIZES, false);
2149 extent_unlock2(tsdn, a, b);
2151 extent_dalloc(tsdn, extent_arena_get(b), b);
2157 extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
2158 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) {
2159 return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false);
2164 if (rtree_new(&extents_rtree, true)) {
2168 if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool",
2169 WITNESS_RANK_EXTENT_POOL)) {