1 #define JEMALLOC_EXTENT_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/extent_dss.h"
7 #include "jemalloc/internal/extent_mmap.h"
8 #include "jemalloc/internal/ph.h"
9 #include "jemalloc/internal/rtree.h"
10 #include "jemalloc/internal/mutex.h"
11 #include "jemalloc/internal/mutex_pool.h"
13 /******************************************************************************/
16 rtree_t extents_rtree;
17 /* Keyed by the address of the extent_t being protected. */
18 mutex_pool_t extent_mutex_pool;
20 size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
22 static const bitmap_info_t extents_bitmap_info =
23 BITMAP_INFO_INITIALIZER(NPSIZES+1);
25 static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr,
26 size_t size, size_t alignment, bool *zero, bool *commit,
28 static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr,
29 size_t size, bool committed, unsigned arena_ind);
30 static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr,
31 size_t size, bool committed, unsigned arena_ind);
32 static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr,
33 size_t size, size_t offset, size_t length, unsigned arena_ind);
34 static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
35 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
36 size_t length, bool growing_retained);
37 static bool extent_decommit_default(extent_hooks_t *extent_hooks,
38 void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
39 #ifdef PAGES_CAN_PURGE_LAZY
40 static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr,
41 size_t size, size_t offset, size_t length, unsigned arena_ind);
43 static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
44 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
45 size_t length, bool growing_retained);
46 #ifdef PAGES_CAN_PURGE_FORCED
47 static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
48 void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
50 static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
51 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
52 size_t length, bool growing_retained);
53 #ifdef JEMALLOC_MAPS_COALESCE
54 static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
55 size_t size, size_t size_a, size_t size_b, bool committed,
58 static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
59 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
60 szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
61 bool growing_retained);
62 #ifdef JEMALLOC_MAPS_COALESCE
63 static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
64 size_t size_a, void *addr_b, size_t size_b, bool committed,
67 static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
68 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
69 bool growing_retained);
71 const extent_hooks_t extent_hooks_default = {
73 extent_dalloc_default,
74 extent_destroy_default,
75 extent_commit_default,
76 extent_decommit_default
77 #ifdef PAGES_CAN_PURGE_LAZY
79 extent_purge_lazy_default
84 #ifdef PAGES_CAN_PURGE_FORCED
86 extent_purge_forced_default
91 #ifdef JEMALLOC_MAPS_COALESCE
98 /* Used exclusively for gdump triggering. */
99 static atomic_zu_t curpages;
100 static atomic_zu_t highpages;
102 /******************************************************************************/
104 * Function prototypes for static functions that are referenced prior to
108 static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
109 static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena,
110 extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
111 size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind,
112 bool *zero, bool *commit, bool growing_retained);
113 static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
114 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
115 extent_t *extent, bool *coalesced, bool growing_retained);
116 static void extent_record(tsdn_t *tsdn, arena_t *arena,
117 extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent,
118 bool growing_retained);
120 /******************************************************************************/
122 ph_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, ph_link,
128 lock_result_no_extent
132 extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
134 extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
137 if (extent1 == NULL) {
138 return lock_result_no_extent;
141 * It's possible that the extent changed out from under us, and with it
142 * the leaf->extent mapping. We have to recheck while holding the lock.
144 extent_lock(tsdn, extent1);
145 extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn,
146 &extents_rtree, elm, true);
148 if (extent1 == extent2) {
150 return lock_result_success;
152 extent_unlock(tsdn, extent1);
153 return lock_result_failure;
158 * Returns a pool-locked extent_t * if there's one associated with the given
159 * address, and NULL otherwise.
162 extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr) {
163 extent_t *ret = NULL;
164 rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
165 rtree_ctx, (uintptr_t)addr, false, false);
169 lock_result_t lock_result;
171 lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret);
172 } while (lock_result == lock_result_failure);
177 extent_alloc(tsdn_t *tsdn, arena_t *arena) {
178 malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
179 extent_t *extent = extent_avail_first(&arena->extent_avail);
180 if (extent == NULL) {
181 malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
182 return base_alloc_extent(tsdn, arena->base);
184 extent_avail_remove(&arena->extent_avail, extent);
185 malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
190 extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
191 malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
192 extent_avail_insert(&arena->extent_avail, extent);
193 malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
197 extent_hooks_get(arena_t *arena) {
198 return base_extent_hooks_get(arena->base);
202 extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) {
203 background_thread_info_t *info;
204 if (have_background_thread) {
205 info = arena_background_thread_info_get(arena);
206 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
208 extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
209 if (have_background_thread) {
210 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
217 extent_hooks_assure_initialized(arena_t *arena,
218 extent_hooks_t **r_extent_hooks) {
219 if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) {
220 *r_extent_hooks = extent_hooks_get(arena);
228 extent_size_quantize_floor(size_t size) {
233 assert((size & PAGE_MASK) == 0);
235 pind = sz_psz2ind(size - sz_large_pad + 1);
238 * Avoid underflow. This short-circuit would also do the right
239 * thing for all sizes in the range for which there are
240 * PAGE-spaced size classes, but it's simplest to just handle
241 * the one case that would cause erroneous results.
245 ret = sz_pind2sz(pind - 1) + sz_large_pad;
254 extent_size_quantize_ceil(size_t size) {
258 assert(size - sz_large_pad <= LARGE_MAXCLASS);
259 assert((size & PAGE_MASK) == 0);
261 ret = extent_size_quantize_floor(size);
264 * Skip a quantization that may have an adequately large extent,
265 * because under-sized extents may be mixed in. This only
266 * happens when an unusual size is requested, i.e. for aligned
267 * allocation, and is just one of several places where linear
268 * search would potentially find sufficiently aligned available
269 * memory somewhere lower.
271 ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
277 /* Generate pairing heap functions. */
278 ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
281 extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
282 bool delay_coalesce) {
283 if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS,
284 malloc_mutex_rank_exclusive)) {
287 for (unsigned i = 0; i < NPSIZES+1; i++) {
288 extent_heap_new(&extents->heaps[i]);
290 bitmap_init(extents->bitmap, &extents_bitmap_info, true);
291 extent_list_init(&extents->lru);
292 atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED);
293 extents->state = state;
294 extents->delay_coalesce = delay_coalesce;
299 extents_state_get(const extents_t *extents) {
300 return extents->state;
304 extents_npages_get(extents_t *extents) {
305 return atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
309 extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
310 malloc_mutex_assert_owner(tsdn, &extents->mtx);
311 assert(extent_state_get(extent) == extents->state);
313 size_t size = extent_size_get(extent);
314 size_t psz = extent_size_quantize_floor(size);
315 pszind_t pind = sz_psz2ind(psz);
316 if (extent_heap_empty(&extents->heaps[pind])) {
317 bitmap_unset(extents->bitmap, &extents_bitmap_info,
320 extent_heap_insert(&extents->heaps[pind], extent);
321 extent_list_append(&extents->lru, extent);
322 size_t npages = size >> LG_PAGE;
324 * All modifications to npages hold the mutex (as asserted above), so we
325 * don't need an atomic fetch-add; we can get by with a load followed by
328 size_t cur_extents_npages =
329 atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
330 atomic_store_zu(&extents->npages, cur_extents_npages + npages,
335 extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
336 malloc_mutex_assert_owner(tsdn, &extents->mtx);
337 assert(extent_state_get(extent) == extents->state);
339 size_t size = extent_size_get(extent);
340 size_t psz = extent_size_quantize_floor(size);
341 pszind_t pind = sz_psz2ind(psz);
342 extent_heap_remove(&extents->heaps[pind], extent);
343 if (extent_heap_empty(&extents->heaps[pind])) {
344 bitmap_set(extents->bitmap, &extents_bitmap_info,
347 extent_list_remove(&extents->lru, extent);
348 size_t npages = size >> LG_PAGE;
350 * As in extents_insert_locked, we hold extents->mtx and so don't need
351 * atomic operations for updating extents->npages.
353 size_t cur_extents_npages =
354 atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
355 assert(cur_extents_npages >= npages);
356 atomic_store_zu(&extents->npages,
357 cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
361 * Find an extent with size [min_size, max_size) to satisfy the alignment
362 * requirement. For each size, try only the first extent in the heap.
365 extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size,
367 pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size));
368 pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size));
370 for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
371 &extents_bitmap_info, (size_t)pind); i < pind_max; i =
372 (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
375 assert(!extent_heap_empty(&extents->heaps[i]));
376 extent_t *extent = extent_heap_first(&extents->heaps[i]);
377 uintptr_t base = (uintptr_t)extent_base_get(extent);
378 size_t candidate_size = extent_size_get(extent);
379 assert(candidate_size >= min_size);
381 uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
382 PAGE_CEILING(alignment));
383 if (base > next_align || base + candidate_size <= next_align) {
384 /* Overflow or not crossing the next alignment. */
388 size_t leadsize = next_align - base;
389 if (candidate_size - leadsize >= min_size) {
397 /* Do any-best-fit extent selection, i.e. select any extent that best fits. */
399 extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
401 pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
402 pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
406 * In order to reduce fragmentation, avoid reusing and splitting
407 * large extents for much smaller sizes.
409 if ((sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
412 assert(!extent_heap_empty(&extents->heaps[i]));
413 extent_t *extent = extent_heap_first(&extents->heaps[i]);
414 assert(extent_size_get(extent) >= size);
422 * Do first-fit extent selection, i.e. select the oldest/lowest extent that is
426 extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
428 extent_t *ret = NULL;
430 pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
431 for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
432 &extents_bitmap_info, (size_t)pind); i < NPSIZES+1; i =
433 (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
435 assert(!extent_heap_empty(&extents->heaps[i]));
436 extent_t *extent = extent_heap_first(&extents->heaps[i]);
437 assert(extent_size_get(extent) >= size);
438 if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
451 * Do {best,first}-fit extent selection, where the selection policy choice is
452 * based on extents->delay_coalesce. Best-fit selection requires less
453 * searching, but its layout policy is less stable and may cause higher virtual
454 * memory fragmentation as a side effect.
457 extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
458 size_t esize, size_t alignment) {
459 malloc_mutex_assert_owner(tsdn, &extents->mtx);
461 size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
462 /* Beware size_t wrap-around. */
463 if (max_size < esize) {
467 extent_t *extent = extents->delay_coalesce ?
468 extents_best_fit_locked(tsdn, arena, extents, max_size) :
469 extents_first_fit_locked(tsdn, arena, extents, max_size);
471 if (alignment > PAGE && extent == NULL) {
473 * max_size guarantees the alignment requirement but is rather
474 * pessimistic. Next we try to satisfy the aligned allocation
475 * with sizes in [esize, max_size).
477 extent = extents_fit_alignment(extents, esize, max_size,
485 extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
486 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
488 extent_state_set(extent, extent_state_active);
490 extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx,
491 extents, extent, &coalesced, false);
492 extent_state_set(extent, extents_state_get(extents));
497 extents_insert_locked(tsdn, extents, extent);
502 extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
503 extents_t *extents, void *new_addr, size_t size, size_t pad,
504 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
505 assert(size + pad != 0);
506 assert(alignment != 0);
507 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
508 WITNESS_RANK_CORE, 0);
510 extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents,
511 new_addr, size, pad, alignment, slab, szind, zero, commit, false);
512 assert(extent == NULL || extent_dumpable_get(extent));
517 extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
518 extents_t *extents, extent_t *extent) {
519 assert(extent_base_get(extent) != NULL);
520 assert(extent_size_get(extent) != 0);
521 assert(extent_dumpable_get(extent));
522 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
523 WITNESS_RANK_CORE, 0);
525 extent_addr_set(extent, extent_base_get(extent));
526 extent_zeroed_set(extent, false);
528 extent_record(tsdn, arena, r_extent_hooks, extents, extent, false);
532 extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
533 extents_t *extents, size_t npages_min) {
534 rtree_ctx_t rtree_ctx_fallback;
535 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
537 malloc_mutex_lock(tsdn, &extents->mtx);
540 * Get the LRU coalesced extent, if any. If coalescing was delayed,
541 * the loop will iterate until the LRU extent is fully coalesced.
545 /* Get the LRU extent, if any. */
546 extent = extent_list_first(&extents->lru);
547 if (extent == NULL) {
550 /* Check the eviction limit. */
551 size_t extents_npages = atomic_load_zu(&extents->npages,
553 if (extents_npages <= npages_min) {
557 extents_remove_locked(tsdn, extents, extent);
558 if (!extents->delay_coalesce) {
561 /* Try to coalesce. */
562 if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks,
563 rtree_ctx, extents, extent)) {
567 * The LRU extent was just coalesced and the result placed in
568 * the LRU at its neighbor's position. Start over.
573 * Either mark the extent active or deregister it to protect against
574 * concurrent operations.
576 switch (extents_state_get(extents)) {
577 case extent_state_active:
579 case extent_state_dirty:
580 case extent_state_muzzy:
581 extent_state_set(extent, extent_state_active);
583 case extent_state_retained:
584 extent_deregister(tsdn, extent);
591 malloc_mutex_unlock(tsdn, &extents->mtx);
596 extents_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
597 extents_t *extents, extent_t *extent, bool growing_retained) {
599 * Leak extent after making sure its pages have already been purged, so
600 * that this is only a virtual memory leak.
602 if (extents_state_get(extents) == extent_state_dirty) {
603 if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks,
604 extent, 0, extent_size_get(extent), growing_retained)) {
605 extent_purge_forced_impl(tsdn, arena, r_extent_hooks,
606 extent, 0, extent_size_get(extent),
610 extent_dalloc(tsdn, arena, extent);
614 extents_prefork(tsdn_t *tsdn, extents_t *extents) {
615 malloc_mutex_prefork(tsdn, &extents->mtx);
619 extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) {
620 malloc_mutex_postfork_parent(tsdn, &extents->mtx);
624 extents_postfork_child(tsdn_t *tsdn, extents_t *extents) {
625 malloc_mutex_postfork_child(tsdn, &extents->mtx);
629 extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
631 assert(extent_arena_get(extent) == arena);
632 assert(extent_state_get(extent) == extent_state_active);
634 extent_state_set(extent, extents_state_get(extents));
635 extents_insert_locked(tsdn, extents, extent);
639 extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
641 malloc_mutex_lock(tsdn, &extents->mtx);
642 extent_deactivate_locked(tsdn, arena, extents, extent);
643 malloc_mutex_unlock(tsdn, &extents->mtx);
647 extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
649 assert(extent_arena_get(extent) == arena);
650 assert(extent_state_get(extent) == extents_state_get(extents));
652 extents_remove_locked(tsdn, extents, extent);
653 extent_state_set(extent, extent_state_active);
657 extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
658 const extent_t *extent, bool dependent, bool init_missing,
659 rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
660 *r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
661 (uintptr_t)extent_base_get(extent), dependent, init_missing);
662 if (!dependent && *r_elm_a == NULL) {
665 assert(*r_elm_a != NULL);
667 *r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
668 (uintptr_t)extent_last_get(extent), dependent, init_missing);
669 if (!dependent && *r_elm_b == NULL) {
672 assert(*r_elm_b != NULL);
678 extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
679 rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
680 rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab);
682 rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind,
688 extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
690 assert(extent_slab_get(extent));
692 /* Register interior. */
693 for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
694 rtree_write(tsdn, &extents_rtree, rtree_ctx,
695 (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
696 LG_PAGE), extent, szind, true);
701 extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
702 cassert(config_prof);
703 /* prof_gdump() requirement. */
704 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
705 WITNESS_RANK_CORE, 0);
707 if (opt_prof && extent_state_get(extent) == extent_state_active) {
708 size_t nadd = extent_size_get(extent) >> LG_PAGE;
709 size_t cur = atomic_fetch_add_zu(&curpages, nadd,
710 ATOMIC_RELAXED) + nadd;
711 size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
712 while (cur > high && !atomic_compare_exchange_weak_zu(
713 &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) {
715 * Don't refresh cur, because it may have decreased
716 * since this thread lost the highpages update race.
717 * Note that high is updated in case of CAS failure.
720 if (cur > high && prof_gdump_get_unlocked()) {
727 extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
728 cassert(config_prof);
730 if (opt_prof && extent_state_get(extent) == extent_state_active) {
731 size_t nsub = extent_size_get(extent) >> LG_PAGE;
732 assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
733 atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
738 extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
739 rtree_ctx_t rtree_ctx_fallback;
740 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
741 rtree_leaf_elm_t *elm_a, *elm_b;
744 * We need to hold the lock to protect against a concurrent coalesce
745 * operation that sees us in a partial state.
747 extent_lock(tsdn, extent);
749 if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
754 szind_t szind = extent_szind_get_maybe_invalid(extent);
755 bool slab = extent_slab_get(extent);
756 extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
758 extent_interior_register(tsdn, rtree_ctx, extent, szind);
761 extent_unlock(tsdn, extent);
763 if (config_prof && gdump_add) {
764 extent_gdump_add(tsdn, extent);
771 extent_register(tsdn_t *tsdn, extent_t *extent) {
772 return extent_register_impl(tsdn, extent, true);
776 extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
777 return extent_register_impl(tsdn, extent, false);
781 extent_reregister(tsdn_t *tsdn, extent_t *extent) {
782 bool err = extent_register(tsdn, extent);
787 * Removes all pointers to the given extent from the global rtree indices for
788 * its interior. This is relevant for slab extents, for which we need to do
789 * metadata lookups at places other than the head of the extent. We deregister
790 * on the interior, then, when an extent moves from being an active slab to an
794 extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
798 assert(extent_slab_get(extent));
800 for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
801 rtree_clear(tsdn, &extents_rtree, rtree_ctx,
802 (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
808 * Removes all pointers to the given extent from the global rtree.
811 extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) {
812 rtree_ctx_t rtree_ctx_fallback;
813 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
814 rtree_leaf_elm_t *elm_a, *elm_b;
815 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
818 extent_lock(tsdn, extent);
820 extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, NSIZES, false);
821 if (extent_slab_get(extent)) {
822 extent_interior_deregister(tsdn, rtree_ctx, extent);
823 extent_slab_set(extent, false);
826 extent_unlock(tsdn, extent);
828 if (config_prof && gdump) {
829 extent_gdump_sub(tsdn, extent);
834 extent_deregister(tsdn_t *tsdn, extent_t *extent) {
835 extent_deregister_impl(tsdn, extent, true);
839 extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) {
840 extent_deregister_impl(tsdn, extent, false);
844 * Tries to find and remove an extent from extents that can be used for the
845 * given allocation request.
848 extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
849 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
850 void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
851 bool growing_retained) {
852 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
853 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
854 assert(alignment > 0);
855 if (config_debug && new_addr != NULL) {
857 * Non-NULL new_addr has two use cases:
859 * 1) Recycle a known-extant extent, e.g. during purging.
860 * 2) Perform in-place expanding reallocation.
862 * Regardless of use case, new_addr must either refer to a
863 * non-existing extent, or to the base of an extant extent,
864 * since only active slabs support interior lookups (which of
865 * course cannot be recycled).
867 assert(PAGE_ADDR2BASE(new_addr) == new_addr);
869 assert(alignment <= PAGE);
872 size_t esize = size + pad;
873 malloc_mutex_lock(tsdn, &extents->mtx);
874 extent_hooks_assure_initialized(arena, r_extent_hooks);
876 if (new_addr != NULL) {
877 extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr);
878 if (extent != NULL) {
880 * We might null-out extent to report an error, but we
881 * still need to unlock the associated mutex after.
883 extent_t *unlock_extent = extent;
884 assert(extent_base_get(extent) == new_addr);
885 if (extent_arena_get(extent) != arena ||
886 extent_size_get(extent) < esize ||
887 extent_state_get(extent) !=
888 extents_state_get(extents)) {
891 extent_unlock(tsdn, unlock_extent);
894 extent = extents_fit_locked(tsdn, arena, extents, esize,
897 if (extent == NULL) {
898 malloc_mutex_unlock(tsdn, &extents->mtx);
902 extent_activate_locked(tsdn, arena, extents, extent);
903 malloc_mutex_unlock(tsdn, &extents->mtx);
909 * Given an allocation request and an extent guaranteed to be able to satisfy
910 * it, this splits off lead and trail extents, leaving extent pointing to an
911 * extent satisfying the allocation.
912 * This function doesn't put lead or trail into any extents_t; it's the caller's
913 * job to ensure that they can be reused.
917 * Split successfully. lead, extent, and trail, are modified to extents
918 * describing the ranges before, in, and after the given allocation.
920 extent_split_interior_ok,
922 * The extent can't satisfy the given allocation request. None of the
923 * input extent_t *s are touched.
925 extent_split_interior_cant_alloc,
927 * In a potentially invalid state. Must leak (if *to_leak is non-NULL),
928 * and salvage what's still salvageable (if *to_salvage is non-NULL).
929 * None of lead, extent, or trail are valid.
931 extent_split_interior_error
932 } extent_split_interior_result_t;
934 static extent_split_interior_result_t
935 extent_split_interior(tsdn_t *tsdn, arena_t *arena,
936 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx,
937 /* The result of splitting, in case of success. */
938 extent_t **extent, extent_t **lead, extent_t **trail,
939 /* The mess to clean up, in case of error. */
940 extent_t **to_leak, extent_t **to_salvage,
941 void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
942 szind_t szind, bool growing_retained) {
943 size_t esize = size + pad;
944 size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent),
945 PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent);
946 assert(new_addr == NULL || leadsize == 0);
947 if (extent_size_get(*extent) < leadsize + esize) {
948 return extent_split_interior_cant_alloc;
950 size_t trailsize = extent_size_get(*extent) - leadsize - esize;
957 /* Split the lead. */
960 *extent = extent_split_impl(tsdn, arena, r_extent_hooks,
961 *lead, leadsize, NSIZES, false, esize + trailsize, szind,
962 slab, growing_retained);
963 if (*extent == NULL) {
966 return extent_split_interior_error;
970 /* Split the trail. */
971 if (trailsize != 0) {
972 *trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent,
973 esize, szind, slab, trailsize, NSIZES, false,
975 if (*trail == NULL) {
980 return extent_split_interior_error;
984 if (leadsize == 0 && trailsize == 0) {
986 * Splitting causes szind to be set as a side effect, but no
987 * splitting occurred.
989 extent_szind_set(*extent, szind);
990 if (szind != NSIZES) {
991 rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
992 (uintptr_t)extent_addr_get(*extent), szind, slab);
993 if (slab && extent_size_get(*extent) > PAGE) {
994 rtree_szind_slab_update(tsdn, &extents_rtree,
996 (uintptr_t)extent_past_get(*extent) -
997 (uintptr_t)PAGE, szind, slab);
1002 return extent_split_interior_ok;
1006 * This fulfills the indicated allocation request out of the given extent (which
1007 * the caller should have ensured was big enough). If there's any unused space
1008 * before or after the resulting allocation, that space is given its own extent
1009 * and put back into extents.
1012 extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
1013 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1014 void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
1015 szind_t szind, extent_t *extent, bool growing_retained) {
1019 extent_t *to_salvage;
1021 extent_split_interior_result_t result = extent_split_interior(
1022 tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
1023 &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind,
1026 if (result == extent_split_interior_ok) {
1028 extent_deactivate(tsdn, arena, extents, lead);
1030 if (trail != NULL) {
1031 extent_deactivate(tsdn, arena, extents, trail);
1036 * We should have picked an extent that was large enough to
1037 * fulfill our allocation request.
1039 assert(result == extent_split_interior_error);
1040 if (to_salvage != NULL) {
1041 extent_deregister(tsdn, to_salvage);
1043 if (to_leak != NULL) {
1044 void *leak = extent_base_get(to_leak);
1045 extent_deregister_no_gdump_sub(tsdn, to_leak);
1046 extents_leak(tsdn, arena, r_extent_hooks, extents,
1047 to_leak, growing_retained);
1048 assert(extent_lock_from_addr(tsdn, rtree_ctx, leak)
1057 * Tries to satisfy the given allocation request by reusing one of the extents
1058 * in the given extents_t.
1061 extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1062 extents_t *extents, void *new_addr, size_t size, size_t pad,
1063 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit,
1064 bool growing_retained) {
1065 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1066 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1067 assert(new_addr == NULL || !slab);
1068 assert(pad == 0 || !slab);
1069 assert(!*zero || !slab);
1071 rtree_ctx_t rtree_ctx_fallback;
1072 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1074 extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
1075 rtree_ctx, extents, new_addr, size, pad, alignment, slab,
1077 if (extent == NULL) {
1081 extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
1082 extents, new_addr, size, pad, alignment, slab, szind, extent,
1084 if (extent == NULL) {
1088 if (*commit && !extent_committed_get(extent)) {
1089 if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent,
1090 0, extent_size_get(extent), growing_retained)) {
1091 extent_record(tsdn, arena, r_extent_hooks, extents,
1092 extent, growing_retained);
1095 extent_zeroed_set(extent, true);
1098 if (extent_committed_get(extent)) {
1101 if (extent_zeroed_get(extent)) {
1106 extent_addr_randomize(tsdn, extent, alignment);
1108 assert(extent_state_get(extent) == extent_state_active);
1110 extent_slab_set(extent, slab);
1111 extent_interior_register(tsdn, rtree_ctx, extent, szind);
1115 void *addr = extent_base_get(extent);
1116 size_t size = extent_size_get(extent);
1117 if (!extent_zeroed_get(extent)) {
1118 if (pages_purge_forced(addr, size)) {
1119 memset(addr, 0, size);
1121 } else if (config_debug) {
1122 size_t *p = (size_t *)(uintptr_t)addr;
1123 for (size_t i = 0; i < size / sizeof(size_t); i++) {
1132 * If the caller specifies (!*zero), it is still possible to receive zeroed
1133 * memory, in which case *zero is toggled to true. arena_extent_alloc() takes
1134 * advantage of this to avoid demanding zeroed extents, but taking advantage of
1135 * them if they are returned.
1138 extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
1139 size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
1143 assert(alignment != 0);
1145 /* "primary" dss. */
1146 if (have_dss && dss_prec == dss_prec_primary && (ret =
1147 extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
1152 if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
1156 /* "secondary" dss. */
1157 if (have_dss && dss_prec == dss_prec_secondary && (ret =
1158 extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
1163 /* All strategies for allocation failed. */
1168 extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
1169 size_t size, size_t alignment, bool *zero, bool *commit) {
1170 void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
1171 commit, (dss_prec_t)atomic_load_u(&arena->dss_prec,
1173 if (have_madvise_huge && ret) {
1174 pages_set_thp_state(ret, size);
1180 extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
1181 size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
1185 tsdn = tsdn_fetch();
1186 arena = arena_get(tsdn, arena_ind, false);
1188 * The arena we're allocating on behalf of must have been initialized
1191 assert(arena != NULL);
1193 return extent_alloc_default_impl(tsdn, arena, new_addr, size,
1194 alignment, zero, commit);
1198 extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) {
1199 tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
1200 if (arena == arena_get(tsd_tsdn(tsd), 0, false)) {
1202 * The only legitimate case of customized extent hooks for a0 is
1203 * hooks with no allocation activities. One such example is to
1204 * place metadata on pre-allocated resources such as huge pages.
1205 * In that case, rely on reentrancy_level checks to catch
1206 * infinite recursions.
1208 pre_reentrancy(tsd, NULL);
1210 pre_reentrancy(tsd, arena);
1215 extent_hook_post_reentrancy(tsdn_t *tsdn) {
1216 tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
1217 post_reentrancy(tsd);
1221 * If virtual memory is retained, create increasingly larger extents from which
1222 * to split requested extents in order to limit the total number of disjoint
1223 * virtual memory ranges retained by each arena.
1226 extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
1227 extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment,
1228 bool slab, szind_t szind, bool *zero, bool *commit) {
1229 malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx);
1230 assert(pad == 0 || !slab);
1231 assert(!*zero || !slab);
1233 size_t esize = size + pad;
1234 size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE;
1235 /* Beware size_t wrap-around. */
1236 if (alloc_size_min < esize) {
1240 * Find the next extent size in the series that would be large enough to
1241 * satisfy this request.
1243 pszind_t egn_skip = 0;
1244 size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1245 while (alloc_size < alloc_size_min) {
1247 if (arena->extent_grow_next + egn_skip == NPSIZES) {
1248 /* Outside legal range. */
1251 assert(arena->extent_grow_next + egn_skip < NPSIZES);
1252 alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1255 extent_t *extent = extent_alloc(tsdn, arena);
1256 if (extent == NULL) {
1259 bool zeroed = false;
1260 bool committed = false;
1263 if (*r_extent_hooks == &extent_hooks_default) {
1264 ptr = extent_alloc_default_impl(tsdn, arena, NULL,
1265 alloc_size, PAGE, &zeroed, &committed);
1267 extent_hook_pre_reentrancy(tsdn, arena);
1268 ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL,
1269 alloc_size, PAGE, &zeroed, &committed,
1270 arena_ind_get(arena));
1271 extent_hook_post_reentrancy(tsdn);
1274 extent_init(extent, arena, ptr, alloc_size, false, NSIZES,
1275 arena_extent_sn_next(arena), extent_state_active, zeroed,
1278 extent_dalloc(tsdn, arena, extent);
1282 if (extent_register_no_gdump_add(tsdn, extent)) {
1283 extents_leak(tsdn, arena, r_extent_hooks,
1284 &arena->extents_retained, extent, true);
1288 if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
1291 if (extent_committed_get(extent)) {
1295 rtree_ctx_t rtree_ctx_fallback;
1296 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1301 extent_t *to_salvage;
1302 extent_split_interior_result_t result = extent_split_interior(
1303 tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
1304 &to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind,
1307 if (result == extent_split_interior_ok) {
1309 extent_record(tsdn, arena, r_extent_hooks,
1310 &arena->extents_retained, lead, true);
1312 if (trail != NULL) {
1313 extent_record(tsdn, arena, r_extent_hooks,
1314 &arena->extents_retained, trail, true);
1318 * We should have allocated a sufficiently large extent; the
1319 * cant_alloc case should not occur.
1321 assert(result == extent_split_interior_error);
1322 if (to_salvage != NULL) {
1324 extent_gdump_add(tsdn, to_salvage);
1326 extent_record(tsdn, arena, r_extent_hooks,
1327 &arena->extents_retained, to_salvage, true);
1329 if (to_leak != NULL) {
1330 extent_deregister_no_gdump_sub(tsdn, to_leak);
1331 extents_leak(tsdn, arena, r_extent_hooks,
1332 &arena->extents_retained, to_leak, true);
1337 if (*commit && !extent_committed_get(extent)) {
1338 if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0,
1339 extent_size_get(extent), true)) {
1340 extent_record(tsdn, arena, r_extent_hooks,
1341 &arena->extents_retained, extent, true);
1344 extent_zeroed_set(extent, true);
1348 * Increment extent_grow_next if doing so wouldn't exceed the allowed
1351 if (arena->extent_grow_next + egn_skip + 1 <=
1352 arena->retain_grow_limit) {
1353 arena->extent_grow_next += egn_skip + 1;
1355 arena->extent_grow_next = arena->retain_grow_limit;
1357 /* All opportunities for failure are past. */
1358 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1361 /* Adjust gdump stats now that extent is final size. */
1362 extent_gdump_add(tsdn, extent);
1365 extent_addr_randomize(tsdn, extent, alignment);
1368 rtree_ctx_t rtree_ctx_fallback;
1369 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
1370 &rtree_ctx_fallback);
1372 extent_slab_set(extent, true);
1373 extent_interior_register(tsdn, rtree_ctx, extent, szind);
1375 if (*zero && !extent_zeroed_get(extent)) {
1376 void *addr = extent_base_get(extent);
1377 size_t size = extent_size_get(extent);
1378 if (pages_purge_forced(addr, size)) {
1379 memset(addr, 0, size);
1385 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1390 extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
1391 extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1392 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1394 assert(alignment != 0);
1396 malloc_mutex_lock(tsdn, &arena->extent_grow_mtx);
1398 extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks,
1399 &arena->extents_retained, new_addr, size, pad, alignment, slab,
1400 szind, zero, commit, true);
1401 if (extent != NULL) {
1402 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1404 extent_gdump_add(tsdn, extent);
1406 } else if (opt_retain && new_addr == NULL) {
1407 extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size,
1408 pad, alignment, slab, szind, zero, commit);
1409 /* extent_grow_retained() always releases extent_grow_mtx. */
1411 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1413 malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx);
1419 extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
1420 extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1421 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1422 size_t esize = size + pad;
1423 extent_t *extent = extent_alloc(tsdn, arena);
1424 if (extent == NULL) {
1428 if (*r_extent_hooks == &extent_hooks_default) {
1429 /* Call directly to propagate tsdn. */
1430 addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
1431 alignment, zero, commit);
1433 extent_hook_pre_reentrancy(tsdn, arena);
1434 addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
1435 esize, alignment, zero, commit, arena_ind_get(arena));
1436 extent_hook_post_reentrancy(tsdn);
1439 extent_dalloc(tsdn, arena, extent);
1442 extent_init(extent, arena, addr, esize, slab, szind,
1443 arena_extent_sn_next(arena), extent_state_active, *zero, *commit,
1446 extent_addr_randomize(tsdn, extent, alignment);
1448 if (extent_register(tsdn, extent)) {
1449 extents_leak(tsdn, arena, r_extent_hooks,
1450 &arena->extents_retained, extent, false);
1458 extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1459 extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1460 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1461 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1462 WITNESS_RANK_CORE, 0);
1464 extent_hooks_assure_initialized(arena, r_extent_hooks);
1466 extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
1467 new_addr, size, pad, alignment, slab, szind, zero, commit);
1468 if (extent == NULL) {
1469 if (opt_retain && new_addr != NULL) {
1471 * When retain is enabled and new_addr is set, we do not
1472 * attempt extent_alloc_wrapper_hard which does mmap
1473 * that is very unlikely to succeed (unless it happens
1474 * to be at the end).
1478 extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
1479 new_addr, size, pad, alignment, slab, szind, zero, commit);
1482 assert(extent == NULL || extent_dumpable_get(extent));
1487 extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner,
1488 const extent_t *outer) {
1489 assert(extent_arena_get(inner) == arena);
1490 if (extent_arena_get(outer) != arena) {
1494 assert(extent_state_get(inner) == extent_state_active);
1495 if (extent_state_get(outer) != extents->state) {
1499 if (extent_committed_get(inner) != extent_committed_get(outer)) {
1507 extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1508 extents_t *extents, extent_t *inner, extent_t *outer, bool forward,
1509 bool growing_retained) {
1510 assert(extent_can_coalesce(arena, extents, inner, outer));
1512 extent_activate_locked(tsdn, arena, extents, outer);
1514 malloc_mutex_unlock(tsdn, &extents->mtx);
1515 bool err = extent_merge_impl(tsdn, arena, r_extent_hooks,
1516 forward ? inner : outer, forward ? outer : inner, growing_retained);
1517 malloc_mutex_lock(tsdn, &extents->mtx);
1520 extent_deactivate_locked(tsdn, arena, extents, outer);
1527 extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
1528 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1529 extent_t *extent, bool *coalesced, bool growing_retained) {
1531 * Continue attempting to coalesce until failure, to protect against
1532 * races with other threads that are thwarted by this one.
1538 /* Try to coalesce forward. */
1539 extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx,
1540 extent_past_get(extent));
1543 * extents->mtx only protects against races for
1544 * like-state extents, so call extent_can_coalesce()
1545 * before releasing next's pool lock.
1547 bool can_coalesce = extent_can_coalesce(arena, extents,
1550 extent_unlock(tsdn, next);
1552 if (can_coalesce && !extent_coalesce(tsdn, arena,
1553 r_extent_hooks, extents, extent, next, true,
1554 growing_retained)) {
1555 if (extents->delay_coalesce) {
1556 /* Do minimal coalescing. */
1564 /* Try to coalesce backward. */
1565 extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx,
1566 extent_before_get(extent));
1568 bool can_coalesce = extent_can_coalesce(arena, extents,
1570 extent_unlock(tsdn, prev);
1572 if (can_coalesce && !extent_coalesce(tsdn, arena,
1573 r_extent_hooks, extents, extent, prev, false,
1574 growing_retained)) {
1576 if (extents->delay_coalesce) {
1577 /* Do minimal coalescing. */
1586 if (extents->delay_coalesce) {
1593 * Does the metadata management portions of putting an unused extent into the
1594 * given extents_t (coalesces, deregisters slab interiors, the heap operations).
1597 extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1598 extents_t *extents, extent_t *extent, bool growing_retained) {
1599 rtree_ctx_t rtree_ctx_fallback;
1600 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1602 assert((extents_state_get(extents) != extent_state_dirty &&
1603 extents_state_get(extents) != extent_state_muzzy) ||
1604 !extent_zeroed_get(extent));
1606 malloc_mutex_lock(tsdn, &extents->mtx);
1607 extent_hooks_assure_initialized(arena, r_extent_hooks);
1609 extent_szind_set(extent, NSIZES);
1610 if (extent_slab_get(extent)) {
1611 extent_interior_deregister(tsdn, rtree_ctx, extent);
1612 extent_slab_set(extent, false);
1615 assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
1616 (uintptr_t)extent_base_get(extent), true) == extent);
1618 if (!extents->delay_coalesce) {
1619 extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
1620 rtree_ctx, extents, extent, NULL, growing_retained);
1621 } else if (extent_size_get(extent) >= LARGE_MINCLASS) {
1622 /* Always coalesce large extents eagerly. */
1626 prev_size = extent_size_get(extent);
1627 assert(extent_state_get(extent) == extent_state_active);
1628 extent = extent_try_coalesce(tsdn, arena,
1629 r_extent_hooks, rtree_ctx, extents, extent,
1630 &coalesced, growing_retained);
1631 } while (coalesced &&
1632 extent_size_get(extent) >= prev_size + LARGE_MINCLASS);
1634 extent_deactivate_locked(tsdn, arena, extents, extent);
1636 malloc_mutex_unlock(tsdn, &extents->mtx);
1640 extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
1641 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
1643 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1644 WITNESS_RANK_CORE, 0);
1646 if (extent_register(tsdn, extent)) {
1647 extents_leak(tsdn, arena, &extent_hooks,
1648 &arena->extents_retained, extent, false);
1651 extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
1655 extent_dalloc_default_impl(void *addr, size_t size) {
1656 if (!have_dss || !extent_in_dss(addr)) {
1657 return extent_dalloc_mmap(addr, size);
1663 extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1664 bool committed, unsigned arena_ind) {
1665 return extent_dalloc_default_impl(addr, size);
1669 extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
1670 extent_hooks_t **r_extent_hooks, extent_t *extent) {
1673 assert(extent_base_get(extent) != NULL);
1674 assert(extent_size_get(extent) != 0);
1675 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1676 WITNESS_RANK_CORE, 0);
1678 extent_addr_set(extent, extent_base_get(extent));
1680 extent_hooks_assure_initialized(arena, r_extent_hooks);
1681 /* Try to deallocate. */
1682 if (*r_extent_hooks == &extent_hooks_default) {
1683 /* Call directly to propagate tsdn. */
1684 err = extent_dalloc_default_impl(extent_base_get(extent),
1685 extent_size_get(extent));
1687 extent_hook_pre_reentrancy(tsdn, arena);
1688 err = ((*r_extent_hooks)->dalloc == NULL ||
1689 (*r_extent_hooks)->dalloc(*r_extent_hooks,
1690 extent_base_get(extent), extent_size_get(extent),
1691 extent_committed_get(extent), arena_ind_get(arena)));
1692 extent_hook_post_reentrancy(tsdn);
1696 extent_dalloc(tsdn, arena, extent);
1703 extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1704 extent_hooks_t **r_extent_hooks, extent_t *extent) {
1705 assert(extent_dumpable_get(extent));
1706 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1707 WITNESS_RANK_CORE, 0);
1710 * Deregister first to avoid a race with other allocating threads, and
1711 * reregister if deallocation fails.
1713 extent_deregister(tsdn, extent);
1714 if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent)) {
1718 extent_reregister(tsdn, extent);
1719 if (*r_extent_hooks != &extent_hooks_default) {
1720 extent_hook_pre_reentrancy(tsdn, arena);
1722 /* Try to decommit; purge if that fails. */
1724 if (!extent_committed_get(extent)) {
1726 } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
1727 0, extent_size_get(extent))) {
1729 } else if ((*r_extent_hooks)->purge_forced != NULL &&
1730 !(*r_extent_hooks)->purge_forced(*r_extent_hooks,
1731 extent_base_get(extent), extent_size_get(extent), 0,
1732 extent_size_get(extent), arena_ind_get(arena))) {
1734 } else if (extent_state_get(extent) == extent_state_muzzy ||
1735 ((*r_extent_hooks)->purge_lazy != NULL &&
1736 !(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
1737 extent_base_get(extent), extent_size_get(extent), 0,
1738 extent_size_get(extent), arena_ind_get(arena)))) {
1743 if (*r_extent_hooks != &extent_hooks_default) {
1744 extent_hook_post_reentrancy(tsdn);
1746 extent_zeroed_set(extent, zeroed);
1749 extent_gdump_sub(tsdn, extent);
1752 extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained,
1757 extent_destroy_default_impl(void *addr, size_t size) {
1758 if (!have_dss || !extent_in_dss(addr)) {
1759 pages_unmap(addr, size);
1764 extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1765 bool committed, unsigned arena_ind) {
1766 extent_destroy_default_impl(addr, size);
1770 extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
1771 extent_hooks_t **r_extent_hooks, extent_t *extent) {
1772 assert(extent_base_get(extent) != NULL);
1773 assert(extent_size_get(extent) != 0);
1774 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1775 WITNESS_RANK_CORE, 0);
1777 /* Deregister first to avoid a race with other allocating threads. */
1778 extent_deregister(tsdn, extent);
1780 extent_addr_set(extent, extent_base_get(extent));
1782 extent_hooks_assure_initialized(arena, r_extent_hooks);
1783 /* Try to destroy; silently fail otherwise. */
1784 if (*r_extent_hooks == &extent_hooks_default) {
1785 /* Call directly to propagate tsdn. */
1786 extent_destroy_default_impl(extent_base_get(extent),
1787 extent_size_get(extent));
1788 } else if ((*r_extent_hooks)->destroy != NULL) {
1789 extent_hook_pre_reentrancy(tsdn, arena);
1790 (*r_extent_hooks)->destroy(*r_extent_hooks,
1791 extent_base_get(extent), extent_size_get(extent),
1792 extent_committed_get(extent), arena_ind_get(arena));
1793 extent_hook_post_reentrancy(tsdn);
1796 extent_dalloc(tsdn, arena, extent);
1800 extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1801 size_t offset, size_t length, unsigned arena_ind) {
1802 return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
1807 extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
1808 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1809 size_t length, bool growing_retained) {
1810 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1811 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1813 extent_hooks_assure_initialized(arena, r_extent_hooks);
1814 if (*r_extent_hooks != &extent_hooks_default) {
1815 extent_hook_pre_reentrancy(tsdn, arena);
1817 bool err = ((*r_extent_hooks)->commit == NULL ||
1818 (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
1819 extent_size_get(extent), offset, length, arena_ind_get(arena)));
1820 if (*r_extent_hooks != &extent_hooks_default) {
1821 extent_hook_post_reentrancy(tsdn);
1823 extent_committed_set(extent, extent_committed_get(extent) || !err);
1828 extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
1829 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1831 return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset,
1836 extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1837 size_t offset, size_t length, unsigned arena_ind) {
1838 return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
1843 extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
1844 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1846 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1847 WITNESS_RANK_CORE, 0);
1849 extent_hooks_assure_initialized(arena, r_extent_hooks);
1851 if (*r_extent_hooks != &extent_hooks_default) {
1852 extent_hook_pre_reentrancy(tsdn, arena);
1854 bool err = ((*r_extent_hooks)->decommit == NULL ||
1855 (*r_extent_hooks)->decommit(*r_extent_hooks,
1856 extent_base_get(extent), extent_size_get(extent), offset, length,
1857 arena_ind_get(arena)));
1858 if (*r_extent_hooks != &extent_hooks_default) {
1859 extent_hook_post_reentrancy(tsdn);
1861 extent_committed_set(extent, extent_committed_get(extent) && err);
1865 #ifdef PAGES_CAN_PURGE_LAZY
1867 extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1868 size_t offset, size_t length, unsigned arena_ind) {
1869 assert(addr != NULL);
1870 assert((offset & PAGE_MASK) == 0);
1871 assert(length != 0);
1872 assert((length & PAGE_MASK) == 0);
1874 return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
1880 extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
1881 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1882 size_t length, bool growing_retained) {
1883 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1884 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1886 extent_hooks_assure_initialized(arena, r_extent_hooks);
1888 if ((*r_extent_hooks)->purge_lazy == NULL) {
1891 if (*r_extent_hooks != &extent_hooks_default) {
1892 extent_hook_pre_reentrancy(tsdn, arena);
1894 bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks,
1895 extent_base_get(extent), extent_size_get(extent), offset, length,
1896 arena_ind_get(arena));
1897 if (*r_extent_hooks != &extent_hooks_default) {
1898 extent_hook_post_reentrancy(tsdn);
1905 extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
1906 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1908 return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent,
1909 offset, length, false);
1912 #ifdef PAGES_CAN_PURGE_FORCED
1914 extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
1915 size_t size, size_t offset, size_t length, unsigned arena_ind) {
1916 assert(addr != NULL);
1917 assert((offset & PAGE_MASK) == 0);
1918 assert(length != 0);
1919 assert((length & PAGE_MASK) == 0);
1921 return pages_purge_forced((void *)((uintptr_t)addr +
1922 (uintptr_t)offset), length);
1927 extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
1928 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1929 size_t length, bool growing_retained) {
1930 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1931 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1933 extent_hooks_assure_initialized(arena, r_extent_hooks);
1935 if ((*r_extent_hooks)->purge_forced == NULL) {
1938 if (*r_extent_hooks != &extent_hooks_default) {
1939 extent_hook_pre_reentrancy(tsdn, arena);
1941 bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks,
1942 extent_base_get(extent), extent_size_get(extent), offset, length,
1943 arena_ind_get(arena));
1944 if (*r_extent_hooks != &extent_hooks_default) {
1945 extent_hook_post_reentrancy(tsdn);
1951 extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
1952 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1954 return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent,
1955 offset, length, false);
1958 #ifdef JEMALLOC_MAPS_COALESCE
1960 extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1961 size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
1962 return !maps_coalesce;
1967 * Accepts the extent to split, and the characteristics of each side of the
1968 * split. The 'a' parameters go with the 'lead' of the resulting pair of
1969 * extents (the lower addressed portion of the split), and the 'b' parameters go
1970 * with the trail (the higher addressed portion). This makes 'extent' the lead,
1971 * and returns the trail (except in case of error).
1974 extent_split_impl(tsdn_t *tsdn, arena_t *arena,
1975 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
1976 szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
1977 bool growing_retained) {
1978 assert(extent_size_get(extent) == size_a + size_b);
1979 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1980 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1982 extent_hooks_assure_initialized(arena, r_extent_hooks);
1984 if ((*r_extent_hooks)->split == NULL) {
1988 extent_t *trail = extent_alloc(tsdn, arena);
1989 if (trail == NULL) {
1993 extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
1994 size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
1995 extent_state_get(extent), extent_zeroed_get(extent),
1996 extent_committed_get(extent), extent_dumpable_get(extent));
1998 rtree_ctx_t rtree_ctx_fallback;
1999 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
2000 rtree_leaf_elm_t *lead_elm_a, *lead_elm_b;
2004 extent_init(&lead, arena, extent_addr_get(extent), size_a,
2005 slab_a, szind_a, extent_sn_get(extent),
2006 extent_state_get(extent), extent_zeroed_get(extent),
2007 extent_committed_get(extent), extent_dumpable_get(extent));
2009 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
2010 true, &lead_elm_a, &lead_elm_b);
2012 rtree_leaf_elm_t *trail_elm_a, *trail_elm_b;
2013 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true,
2014 &trail_elm_a, &trail_elm_b);
2016 if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL
2017 || trail_elm_b == NULL) {
2021 extent_lock2(tsdn, extent, trail);
2023 if (*r_extent_hooks != &extent_hooks_default) {
2024 extent_hook_pre_reentrancy(tsdn, arena);
2026 bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
2027 size_a + size_b, size_a, size_b, extent_committed_get(extent),
2028 arena_ind_get(arena));
2029 if (*r_extent_hooks != &extent_hooks_default) {
2030 extent_hook_post_reentrancy(tsdn);
2036 extent_size_set(extent, size_a);
2037 extent_szind_set(extent, szind_a);
2039 extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
2041 extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
2044 extent_unlock2(tsdn, extent, trail);
2048 extent_unlock2(tsdn, extent, trail);
2050 extent_dalloc(tsdn, arena, trail);
2056 extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
2057 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
2058 szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
2059 return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a,
2060 szind_a, slab_a, size_b, szind_b, slab_b, false);
2064 extent_merge_default_impl(void *addr_a, void *addr_b) {
2065 if (!maps_coalesce) {
2068 if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
2075 #ifdef JEMALLOC_MAPS_COALESCE
2077 extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
2078 void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
2079 return extent_merge_default_impl(addr_a, addr_b);
2084 extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
2085 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
2086 bool growing_retained) {
2087 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
2088 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
2090 extent_hooks_assure_initialized(arena, r_extent_hooks);
2092 if ((*r_extent_hooks)->merge == NULL) {
2097 if (*r_extent_hooks == &extent_hooks_default) {
2098 /* Call directly to propagate tsdn. */
2099 err = extent_merge_default_impl(extent_base_get(a),
2100 extent_base_get(b));
2102 extent_hook_pre_reentrancy(tsdn, arena);
2103 err = (*r_extent_hooks)->merge(*r_extent_hooks,
2104 extent_base_get(a), extent_size_get(a), extent_base_get(b),
2105 extent_size_get(b), extent_committed_get(a),
2106 arena_ind_get(arena));
2107 extent_hook_post_reentrancy(tsdn);
2115 * The rtree writes must happen while all the relevant elements are
2116 * owned, so the following code uses decomposed helper functions rather
2117 * than extent_{,de}register() to do things in the right order.
2119 rtree_ctx_t rtree_ctx_fallback;
2120 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
2121 rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
2122 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a,
2124 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a,
2127 extent_lock2(tsdn, a, b);
2129 if (a_elm_b != NULL) {
2130 rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
2133 if (b_elm_b != NULL) {
2134 rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
2140 extent_size_set(a, extent_size_get(a) + extent_size_get(b));
2141 extent_szind_set(a, NSIZES);
2142 extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
2143 extent_sn_get(a) : extent_sn_get(b));
2144 extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
2146 extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, NSIZES, false);
2148 extent_unlock2(tsdn, a, b);
2150 extent_dalloc(tsdn, extent_arena_get(b), b);
2156 extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
2157 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) {
2158 return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false);
2163 if (rtree_new(&extents_rtree, true)) {
2167 if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool",
2168 WITNESS_RANK_EXTENT_POOL)) {