1 #define JEMALLOC_EXTENT_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/extent_dss.h"
7 #include "jemalloc/internal/extent_mmap.h"
8 #include "jemalloc/internal/ph.h"
9 #include "jemalloc/internal/rtree.h"
10 #include "jemalloc/internal/mutex.h"
11 #include "jemalloc/internal/mutex_pool.h"
13 /******************************************************************************/
16 rtree_t extents_rtree;
17 /* Keyed by the address of the extent_t being protected. */
18 mutex_pool_t extent_mutex_pool;
20 static const bitmap_info_t extents_bitmap_info =
21 BITMAP_INFO_INITIALIZER(NPSIZES+1);
23 static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr,
24 size_t size, size_t alignment, bool *zero, bool *commit,
26 static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr,
27 size_t size, bool committed, unsigned arena_ind);
28 static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr,
29 size_t size, bool committed, unsigned arena_ind);
30 static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr,
31 size_t size, size_t offset, size_t length, unsigned arena_ind);
32 static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
33 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
34 size_t length, bool growing_retained);
35 static bool extent_decommit_default(extent_hooks_t *extent_hooks,
36 void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
37 #ifdef PAGES_CAN_PURGE_LAZY
38 static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr,
39 size_t size, size_t offset, size_t length, unsigned arena_ind);
41 static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
42 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
43 size_t length, bool growing_retained);
44 #ifdef PAGES_CAN_PURGE_FORCED
45 static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
46 void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
48 static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
49 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
50 size_t length, bool growing_retained);
51 #ifdef JEMALLOC_MAPS_COALESCE
52 static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
53 size_t size, size_t size_a, size_t size_b, bool committed,
56 static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
57 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
58 szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
59 bool growing_retained);
60 #ifdef JEMALLOC_MAPS_COALESCE
61 static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
62 size_t size_a, void *addr_b, size_t size_b, bool committed,
65 static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
66 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
67 bool growing_retained);
69 const extent_hooks_t extent_hooks_default = {
71 extent_dalloc_default,
72 extent_destroy_default,
73 extent_commit_default,
74 extent_decommit_default
75 #ifdef PAGES_CAN_PURGE_LAZY
77 extent_purge_lazy_default
82 #ifdef PAGES_CAN_PURGE_FORCED
84 extent_purge_forced_default
89 #ifdef JEMALLOC_MAPS_COALESCE
96 /* Used exclusively for gdump triggering. */
97 static atomic_zu_t curpages;
98 static atomic_zu_t highpages;
100 /******************************************************************************/
102 * Function prototypes for static functions that are referenced prior to
106 static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
107 static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena,
108 extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
109 size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind,
110 bool *zero, bool *commit, bool growing_retained);
111 static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
112 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
113 extent_t *extent, bool *coalesced, bool growing_retained);
114 static void extent_record(tsdn_t *tsdn, arena_t *arena,
115 extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent,
116 bool growing_retained);
118 /******************************************************************************/
120 rb_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, rb_link,
126 lock_result_no_extent
130 extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
132 extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
135 if (extent1 == NULL) {
136 return lock_result_no_extent;
139 * It's possible that the extent changed out from under us, and with it
140 * the leaf->extent mapping. We have to recheck while holding the lock.
142 extent_lock(tsdn, extent1);
143 extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn,
144 &extents_rtree, elm, true);
146 if (extent1 == extent2) {
148 return lock_result_success;
150 extent_unlock(tsdn, extent1);
151 return lock_result_failure;
156 * Returns a pool-locked extent_t * if there's one associated with the given
157 * address, and NULL otherwise.
160 extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr) {
161 extent_t *ret = NULL;
162 rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
163 rtree_ctx, (uintptr_t)addr, false, false);
167 lock_result_t lock_result;
169 lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret);
170 } while (lock_result == lock_result_failure);
175 extent_alloc(tsdn_t *tsdn, arena_t *arena) {
176 malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
177 extent_t *extent = extent_avail_first(&arena->extent_avail);
178 if (extent == NULL) {
179 malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
180 return base_alloc_extent(tsdn, arena->base);
182 extent_avail_remove(&arena->extent_avail, extent);
183 malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
188 extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
189 malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
190 extent_avail_insert(&arena->extent_avail, extent);
191 malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
195 extent_hooks_get(arena_t *arena) {
196 return base_extent_hooks_get(arena->base);
200 extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) {
201 background_thread_info_t *info;
202 if (have_background_thread) {
203 info = arena_background_thread_info_get(arena);
204 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
206 extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
207 if (have_background_thread) {
208 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
215 extent_hooks_assure_initialized(arena_t *arena,
216 extent_hooks_t **r_extent_hooks) {
217 if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) {
218 *r_extent_hooks = extent_hooks_get(arena);
226 extent_size_quantize_floor(size_t size) {
231 assert((size & PAGE_MASK) == 0);
233 pind = sz_psz2ind(size - sz_large_pad + 1);
236 * Avoid underflow. This short-circuit would also do the right
237 * thing for all sizes in the range for which there are
238 * PAGE-spaced size classes, but it's simplest to just handle
239 * the one case that would cause erroneous results.
243 ret = sz_pind2sz(pind - 1) + sz_large_pad;
252 extent_size_quantize_ceil(size_t size) {
256 assert(size - sz_large_pad <= LARGE_MAXCLASS);
257 assert((size & PAGE_MASK) == 0);
259 ret = extent_size_quantize_floor(size);
262 * Skip a quantization that may have an adequately large extent,
263 * because under-sized extents may be mixed in. This only
264 * happens when an unusual size is requested, i.e. for aligned
265 * allocation, and is just one of several places where linear
266 * search would potentially find sufficiently aligned available
267 * memory somewhere lower.
269 ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
275 /* Generate pairing heap functions. */
276 ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
279 extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
280 bool delay_coalesce) {
281 if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS,
282 malloc_mutex_rank_exclusive)) {
285 for (unsigned i = 0; i < NPSIZES+1; i++) {
286 extent_heap_new(&extents->heaps[i]);
288 bitmap_init(extents->bitmap, &extents_bitmap_info, true);
289 extent_list_init(&extents->lru);
290 atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED);
291 extents->state = state;
292 extents->delay_coalesce = delay_coalesce;
297 extents_state_get(const extents_t *extents) {
298 return extents->state;
302 extents_npages_get(extents_t *extents) {
303 return atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
307 extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent,
309 malloc_mutex_assert_owner(tsdn, &extents->mtx);
310 assert(extent_state_get(extent) == extents->state);
312 size_t size = extent_size_get(extent);
313 size_t psz = extent_size_quantize_floor(size);
314 pszind_t pind = sz_psz2ind(psz);
315 if (extent_heap_empty(&extents->heaps[pind])) {
316 bitmap_unset(extents->bitmap, &extents_bitmap_info,
319 extent_heap_insert(&extents->heaps[pind], extent);
321 extent_list_append(&extents->lru, extent);
323 size_t npages = size >> LG_PAGE;
325 * All modifications to npages hold the mutex (as asserted above), so we
326 * don't need an atomic fetch-add; we can get by with a load followed by
329 size_t cur_extents_npages =
330 atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
331 atomic_store_zu(&extents->npages, cur_extents_npages + npages,
336 extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent,
338 malloc_mutex_assert_owner(tsdn, &extents->mtx);
339 assert(extent_state_get(extent) == extents->state);
341 size_t size = extent_size_get(extent);
342 size_t psz = extent_size_quantize_floor(size);
343 pszind_t pind = sz_psz2ind(psz);
344 extent_heap_remove(&extents->heaps[pind], extent);
345 if (extent_heap_empty(&extents->heaps[pind])) {
346 bitmap_set(extents->bitmap, &extents_bitmap_info,
350 extent_list_remove(&extents->lru, extent);
352 size_t npages = size >> LG_PAGE;
354 * As in extents_insert_locked, we hold extents->mtx and so don't need
355 * atomic operations for updating extents->npages.
357 size_t cur_extents_npages =
358 atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
359 assert(cur_extents_npages >= npages);
360 atomic_store_zu(&extents->npages,
361 cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
364 /* Do any-best-fit extent selection, i.e. select any extent that best fits. */
366 extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
368 pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
369 pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
372 assert(!extent_heap_empty(&extents->heaps[i]));
373 extent_t *extent = extent_heap_any(&extents->heaps[i]);
374 assert(extent_size_get(extent) >= size);
382 * Do first-fit extent selection, i.e. select the oldest/lowest extent that is
386 extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
388 extent_t *ret = NULL;
390 pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
391 for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
392 &extents_bitmap_info, (size_t)pind); i < NPSIZES+1; i =
393 (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
395 assert(!extent_heap_empty(&extents->heaps[i]));
396 extent_t *extent = extent_heap_first(&extents->heaps[i]);
397 assert(extent_size_get(extent) >= size);
398 if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
411 * Do {best,first}-fit extent selection, where the selection policy choice is
412 * based on extents->delay_coalesce. Best-fit selection requires less
413 * searching, but its layout policy is less stable and may cause higher virtual
414 * memory fragmentation as a side effect.
417 extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
419 malloc_mutex_assert_owner(tsdn, &extents->mtx);
421 return extents->delay_coalesce ? extents_best_fit_locked(tsdn, arena,
422 extents, size) : extents_first_fit_locked(tsdn, arena, extents,
427 extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
428 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
430 extent_state_set(extent, extent_state_active);
432 extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx,
433 extents, extent, &coalesced, false);
434 extent_state_set(extent, extents_state_get(extents));
439 extents_insert_locked(tsdn, extents, extent, true);
444 extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
445 extents_t *extents, void *new_addr, size_t size, size_t pad,
446 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
447 assert(size + pad != 0);
448 assert(alignment != 0);
449 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
450 WITNESS_RANK_CORE, 0);
452 return extent_recycle(tsdn, arena, r_extent_hooks, extents, new_addr,
453 size, pad, alignment, slab, szind, zero, commit, false);
457 extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
458 extents_t *extents, extent_t *extent) {
459 assert(extent_base_get(extent) != NULL);
460 assert(extent_size_get(extent) != 0);
461 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
462 WITNESS_RANK_CORE, 0);
464 extent_addr_set(extent, extent_base_get(extent));
465 extent_zeroed_set(extent, false);
467 extent_record(tsdn, arena, r_extent_hooks, extents, extent, false);
471 extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
472 extents_t *extents, size_t npages_min) {
473 rtree_ctx_t rtree_ctx_fallback;
474 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
476 malloc_mutex_lock(tsdn, &extents->mtx);
479 * Get the LRU coalesced extent, if any. If coalescing was delayed,
480 * the loop will iterate until the LRU extent is fully coalesced.
484 /* Get the LRU extent, if any. */
485 extent = extent_list_first(&extents->lru);
486 if (extent == NULL) {
489 /* Check the eviction limit. */
490 size_t npages = extent_size_get(extent) >> LG_PAGE;
491 size_t extents_npages = atomic_load_zu(&extents->npages,
493 if (extents_npages - npages < npages_min) {
497 extents_remove_locked(tsdn, extents, extent, false);
498 if (!extents->delay_coalesce) {
501 /* Try to coalesce. */
502 if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks,
503 rtree_ctx, extents, extent)) {
507 * The LRU extent was just coalesced and the result placed in
508 * the LRU at its neighbor's position. Start over.
513 * Either mark the extent active or deregister it to protect against
514 * concurrent operations.
516 switch (extents_state_get(extents)) {
517 case extent_state_active:
519 case extent_state_dirty:
520 case extent_state_muzzy:
521 extent_state_set(extent, extent_state_active);
523 case extent_state_retained:
524 extent_deregister(tsdn, extent);
531 malloc_mutex_unlock(tsdn, &extents->mtx);
536 extents_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
537 extents_t *extents, extent_t *extent, bool growing_retained) {
539 * Leak extent after making sure its pages have already been purged, so
540 * that this is only a virtual memory leak.
542 if (extents_state_get(extents) == extent_state_dirty) {
543 if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks,
544 extent, 0, extent_size_get(extent), growing_retained)) {
545 extent_purge_forced_impl(tsdn, arena, r_extent_hooks,
546 extent, 0, extent_size_get(extent),
550 extent_dalloc(tsdn, arena, extent);
554 extents_prefork(tsdn_t *tsdn, extents_t *extents) {
555 malloc_mutex_prefork(tsdn, &extents->mtx);
559 extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) {
560 malloc_mutex_postfork_parent(tsdn, &extents->mtx);
564 extents_postfork_child(tsdn_t *tsdn, extents_t *extents) {
565 malloc_mutex_postfork_child(tsdn, &extents->mtx);
569 extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
570 extent_t *extent, bool preserve_lru) {
571 assert(extent_arena_get(extent) == arena);
572 assert(extent_state_get(extent) == extent_state_active);
574 extent_state_set(extent, extents_state_get(extents));
575 extents_insert_locked(tsdn, extents, extent, preserve_lru);
579 extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
580 extent_t *extent, bool preserve_lru) {
581 malloc_mutex_lock(tsdn, &extents->mtx);
582 extent_deactivate_locked(tsdn, arena, extents, extent, preserve_lru);
583 malloc_mutex_unlock(tsdn, &extents->mtx);
587 extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
588 extent_t *extent, bool preserve_lru) {
589 assert(extent_arena_get(extent) == arena);
590 assert(extent_state_get(extent) == extents_state_get(extents));
592 extents_remove_locked(tsdn, extents, extent, preserve_lru);
593 extent_state_set(extent, extent_state_active);
597 extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
598 const extent_t *extent, bool dependent, bool init_missing,
599 rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
600 *r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
601 (uintptr_t)extent_base_get(extent), dependent, init_missing);
602 if (!dependent && *r_elm_a == NULL) {
605 assert(*r_elm_a != NULL);
607 *r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
608 (uintptr_t)extent_last_get(extent), dependent, init_missing);
609 if (!dependent && *r_elm_b == NULL) {
612 assert(*r_elm_b != NULL);
618 extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
619 rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
620 rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab);
622 rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind,
628 extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
630 assert(extent_slab_get(extent));
632 /* Register interior. */
633 for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
634 rtree_write(tsdn, &extents_rtree, rtree_ctx,
635 (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
636 LG_PAGE), extent, szind, true);
641 extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
642 cassert(config_prof);
643 /* prof_gdump() requirement. */
644 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
645 WITNESS_RANK_CORE, 0);
647 if (opt_prof && extent_state_get(extent) == extent_state_active) {
648 size_t nadd = extent_size_get(extent) >> LG_PAGE;
649 size_t cur = atomic_fetch_add_zu(&curpages, nadd,
650 ATOMIC_RELAXED) + nadd;
651 size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
652 while (cur > high && !atomic_compare_exchange_weak_zu(
653 &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) {
655 * Don't refresh cur, because it may have decreased
656 * since this thread lost the highpages update race.
657 * Note that high is updated in case of CAS failure.
660 if (cur > high && prof_gdump_get_unlocked()) {
667 extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
668 cassert(config_prof);
670 if (opt_prof && extent_state_get(extent) == extent_state_active) {
671 size_t nsub = extent_size_get(extent) >> LG_PAGE;
672 assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
673 atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
678 extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
679 rtree_ctx_t rtree_ctx_fallback;
680 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
681 rtree_leaf_elm_t *elm_a, *elm_b;
684 * We need to hold the lock to protect against a concurrent coalesce
685 * operation that sees us in a partial state.
687 extent_lock(tsdn, extent);
689 if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
694 szind_t szind = extent_szind_get_maybe_invalid(extent);
695 bool slab = extent_slab_get(extent);
696 extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
698 extent_interior_register(tsdn, rtree_ctx, extent, szind);
701 extent_unlock(tsdn, extent);
703 if (config_prof && gdump_add) {
704 extent_gdump_add(tsdn, extent);
711 extent_register(tsdn_t *tsdn, extent_t *extent) {
712 return extent_register_impl(tsdn, extent, true);
716 extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
717 return extent_register_impl(tsdn, extent, false);
721 extent_reregister(tsdn_t *tsdn, extent_t *extent) {
722 bool err = extent_register(tsdn, extent);
727 extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
731 assert(extent_slab_get(extent));
733 for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
734 rtree_clear(tsdn, &extents_rtree, rtree_ctx,
735 (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
741 extent_deregister(tsdn_t *tsdn, extent_t *extent) {
742 rtree_ctx_t rtree_ctx_fallback;
743 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
744 rtree_leaf_elm_t *elm_a, *elm_b;
745 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
748 extent_lock(tsdn, extent);
750 extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, NSIZES, false);
751 if (extent_slab_get(extent)) {
752 extent_interior_deregister(tsdn, rtree_ctx, extent);
753 extent_slab_set(extent, false);
756 extent_unlock(tsdn, extent);
759 extent_gdump_sub(tsdn, extent);
764 extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
765 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
766 void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
767 bool *zero, bool *commit, bool growing_retained) {
768 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
769 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
770 assert(alignment > 0);
771 if (config_debug && new_addr != NULL) {
773 * Non-NULL new_addr has two use cases:
775 * 1) Recycle a known-extant extent, e.g. during purging.
776 * 2) Perform in-place expanding reallocation.
778 * Regardless of use case, new_addr must either refer to a
779 * non-existing extent, or to the base of an extant extent,
780 * since only active slabs support interior lookups (which of
781 * course cannot be recycled).
783 assert(PAGE_ADDR2BASE(new_addr) == new_addr);
785 assert(alignment <= PAGE);
788 size_t esize = size + pad;
789 size_t alloc_size = esize + PAGE_CEILING(alignment) - PAGE;
790 /* Beware size_t wrap-around. */
791 if (alloc_size < esize) {
794 malloc_mutex_lock(tsdn, &extents->mtx);
795 extent_hooks_assure_initialized(arena, r_extent_hooks);
797 if (new_addr != NULL) {
798 extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr);
799 if (extent != NULL) {
801 * We might null-out extent to report an error, but we
802 * still need to unlock the associated mutex after.
804 extent_t *unlock_extent = extent;
805 assert(extent_base_get(extent) == new_addr);
806 if (extent_arena_get(extent) != arena ||
807 extent_size_get(extent) < esize ||
808 extent_state_get(extent) !=
809 extents_state_get(extents)) {
812 extent_unlock(tsdn, unlock_extent);
815 extent = extents_fit_locked(tsdn, arena, extents, alloc_size);
817 if (extent == NULL) {
818 malloc_mutex_unlock(tsdn, &extents->mtx);
822 extent_activate_locked(tsdn, arena, extents, extent, false);
823 malloc_mutex_unlock(tsdn, &extents->mtx);
825 if (extent_zeroed_get(extent)) {
828 if (extent_committed_get(extent)) {
836 extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
837 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
838 void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
839 szind_t szind, extent_t *extent, bool growing_retained) {
840 size_t esize = size + pad;
841 size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(extent),
842 PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(extent);
843 assert(new_addr == NULL || leadsize == 0);
844 assert(extent_size_get(extent) >= leadsize + esize);
845 size_t trailsize = extent_size_get(extent) - leadsize - esize;
847 /* Split the lead. */
849 extent_t *lead = extent;
850 extent = extent_split_impl(tsdn, arena, r_extent_hooks,
851 lead, leadsize, NSIZES, false, esize + trailsize, szind,
852 slab, growing_retained);
853 if (extent == NULL) {
854 extent_deregister(tsdn, lead);
855 extents_leak(tsdn, arena, r_extent_hooks, extents,
856 lead, growing_retained);
859 extent_deactivate(tsdn, arena, extents, lead, false);
862 /* Split the trail. */
863 if (trailsize != 0) {
864 extent_t *trail = extent_split_impl(tsdn, arena,
865 r_extent_hooks, extent, esize, szind, slab, trailsize,
866 NSIZES, false, growing_retained);
868 extent_deregister(tsdn, extent);
869 extents_leak(tsdn, arena, r_extent_hooks, extents,
870 extent, growing_retained);
873 extent_deactivate(tsdn, arena, extents, trail, false);
874 } else if (leadsize == 0) {
876 * Splitting causes szind to be set as a side effect, but no
877 * splitting occurred.
879 extent_szind_set(extent, szind);
880 if (szind != NSIZES) {
881 rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
882 (uintptr_t)extent_addr_get(extent), szind, slab);
883 if (slab && extent_size_get(extent) > PAGE) {
884 rtree_szind_slab_update(tsdn, &extents_rtree,
886 (uintptr_t)extent_past_get(extent) -
887 (uintptr_t)PAGE, szind, slab);
896 extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
897 extents_t *extents, void *new_addr, size_t size, size_t pad,
898 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit,
899 bool growing_retained) {
900 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
901 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
902 assert(new_addr == NULL || !slab);
903 assert(pad == 0 || !slab);
904 assert(!*zero || !slab);
906 rtree_ctx_t rtree_ctx_fallback;
907 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
909 bool committed = false;
910 extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
911 rtree_ctx, extents, new_addr, size, pad, alignment, slab, zero,
912 &committed, growing_retained);
913 if (extent == NULL) {
920 extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
921 extents, new_addr, size, pad, alignment, slab, szind, extent,
923 if (extent == NULL) {
927 if (*commit && !extent_committed_get(extent)) {
928 if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent,
929 0, extent_size_get(extent), growing_retained)) {
930 extent_record(tsdn, arena, r_extent_hooks, extents,
931 extent, growing_retained);
934 extent_zeroed_set(extent, true);
938 extent_addr_randomize(tsdn, extent, alignment);
940 assert(extent_state_get(extent) == extent_state_active);
942 extent_slab_set(extent, slab);
943 extent_interior_register(tsdn, rtree_ctx, extent, szind);
947 void *addr = extent_base_get(extent);
948 size_t size = extent_size_get(extent);
949 if (!extent_zeroed_get(extent)) {
950 if (pages_purge_forced(addr, size)) {
951 memset(addr, 0, size);
953 } else if (config_debug) {
954 size_t *p = (size_t *)(uintptr_t)addr;
955 for (size_t i = 0; i < size / sizeof(size_t); i++) {
964 * If the caller specifies (!*zero), it is still possible to receive zeroed
965 * memory, in which case *zero is toggled to true. arena_extent_alloc() takes
966 * advantage of this to avoid demanding zeroed extents, but taking advantage of
967 * them if they are returned.
970 extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
971 size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
975 assert(alignment != 0);
978 if (have_dss && dss_prec == dss_prec_primary && (ret =
979 extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
984 if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
988 /* "secondary" dss. */
989 if (have_dss && dss_prec == dss_prec_secondary && (ret =
990 extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
995 /* All strategies for allocation failed. */
1000 extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
1001 size_t size, size_t alignment, bool *zero, bool *commit) {
1004 ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
1005 commit, (dss_prec_t)atomic_load_u(&arena->dss_prec,
1011 extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
1012 size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
1016 tsdn = tsdn_fetch();
1017 arena = arena_get(tsdn, arena_ind, false);
1019 * The arena we're allocating on behalf of must have been initialized
1022 assert(arena != NULL);
1024 return extent_alloc_default_impl(tsdn, arena, new_addr, size,
1025 alignment, zero, commit);
1029 extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) {
1030 tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
1031 pre_reentrancy(tsd, arena);
1035 extent_hook_post_reentrancy(tsdn_t *tsdn) {
1036 tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
1037 post_reentrancy(tsd);
1041 * If virtual memory is retained, create increasingly larger extents from which
1042 * to split requested extents in order to limit the total number of disjoint
1043 * virtual memory ranges retained by each arena.
1046 extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
1047 extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment,
1048 bool slab, szind_t szind, bool *zero, bool *commit) {
1049 malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx);
1050 assert(pad == 0 || !slab);
1051 assert(!*zero || !slab);
1053 size_t esize = size + pad;
1054 size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE;
1055 /* Beware size_t wrap-around. */
1056 if (alloc_size_min < esize) {
1060 * Find the next extent size in the series that would be large enough to
1061 * satisfy this request.
1063 pszind_t egn_skip = 0;
1064 size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1065 while (alloc_size < alloc_size_min) {
1067 if (arena->extent_grow_next + egn_skip == NPSIZES) {
1068 /* Outside legal range. */
1071 assert(arena->extent_grow_next + egn_skip < NPSIZES);
1072 alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1075 extent_t *extent = extent_alloc(tsdn, arena);
1076 if (extent == NULL) {
1079 bool zeroed = false;
1080 bool committed = false;
1083 if (*r_extent_hooks == &extent_hooks_default) {
1084 ptr = extent_alloc_core(tsdn, arena, NULL, alloc_size, PAGE,
1085 &zeroed, &committed, (dss_prec_t)atomic_load_u(
1086 &arena->dss_prec, ATOMIC_RELAXED));
1088 extent_hook_pre_reentrancy(tsdn, arena);
1089 ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL,
1090 alloc_size, PAGE, &zeroed, &committed,
1091 arena_ind_get(arena));
1092 extent_hook_post_reentrancy(tsdn);
1095 extent_init(extent, arena, ptr, alloc_size, false, NSIZES,
1096 arena_extent_sn_next(arena), extent_state_active, zeroed,
1099 extent_dalloc(tsdn, arena, extent);
1102 if (extent_register_no_gdump_add(tsdn, extent)) {
1103 extents_leak(tsdn, arena, r_extent_hooks,
1104 &arena->extents_retained, extent, true);
1108 size_t leadsize = ALIGNMENT_CEILING((uintptr_t)ptr,
1109 PAGE_CEILING(alignment)) - (uintptr_t)ptr;
1110 assert(alloc_size >= leadsize + esize);
1111 size_t trailsize = alloc_size - leadsize - esize;
1112 if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
1115 if (extent_committed_get(extent)) {
1119 /* Split the lead. */
1120 if (leadsize != 0) {
1121 extent_t *lead = extent;
1122 extent = extent_split_impl(tsdn, arena, r_extent_hooks, lead,
1123 leadsize, NSIZES, false, esize + trailsize, szind, slab,
1125 if (extent == NULL) {
1126 extent_deregister(tsdn, lead);
1127 extents_leak(tsdn, arena, r_extent_hooks,
1128 &arena->extents_retained, lead, true);
1131 extent_record(tsdn, arena, r_extent_hooks,
1132 &arena->extents_retained, lead, true);
1135 /* Split the trail. */
1136 if (trailsize != 0) {
1137 extent_t *trail = extent_split_impl(tsdn, arena, r_extent_hooks,
1138 extent, esize, szind, slab, trailsize, NSIZES, false, true);
1139 if (trail == NULL) {
1140 extent_deregister(tsdn, extent);
1141 extents_leak(tsdn, arena, r_extent_hooks,
1142 &arena->extents_retained, extent, true);
1145 extent_record(tsdn, arena, r_extent_hooks,
1146 &arena->extents_retained, trail, true);
1147 } else if (leadsize == 0) {
1149 * Splitting causes szind to be set as a side effect, but no
1150 * splitting occurred.
1152 rtree_ctx_t rtree_ctx_fallback;
1153 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
1154 &rtree_ctx_fallback);
1156 extent_szind_set(extent, szind);
1157 if (szind != NSIZES) {
1158 rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
1159 (uintptr_t)extent_addr_get(extent), szind, slab);
1160 if (slab && extent_size_get(extent) > PAGE) {
1161 rtree_szind_slab_update(tsdn, &extents_rtree,
1163 (uintptr_t)extent_past_get(extent) -
1164 (uintptr_t)PAGE, szind, slab);
1169 if (*commit && !extent_committed_get(extent)) {
1170 if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0,
1171 extent_size_get(extent), true)) {
1172 extent_record(tsdn, arena, r_extent_hooks,
1173 &arena->extents_retained, extent, true);
1176 extent_zeroed_set(extent, true);
1180 * Increment extent_grow_next if doing so wouldn't exceed the legal
1183 if (arena->extent_grow_next + egn_skip + 1 < NPSIZES) {
1184 arena->extent_grow_next += egn_skip + 1;
1186 arena->extent_grow_next = NPSIZES - 1;
1188 /* All opportunities for failure are past. */
1189 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1192 /* Adjust gdump stats now that extent is final size. */
1193 extent_gdump_add(tsdn, extent);
1196 extent_addr_randomize(tsdn, extent, alignment);
1199 rtree_ctx_t rtree_ctx_fallback;
1200 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
1201 &rtree_ctx_fallback);
1203 extent_slab_set(extent, true);
1204 extent_interior_register(tsdn, rtree_ctx, extent, szind);
1206 if (*zero && !extent_zeroed_get(extent)) {
1207 void *addr = extent_base_get(extent);
1208 size_t size = extent_size_get(extent);
1209 if (pages_purge_forced(addr, size)) {
1210 memset(addr, 0, size);
1216 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1221 extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
1222 extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1223 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1225 assert(alignment != 0);
1227 malloc_mutex_lock(tsdn, &arena->extent_grow_mtx);
1229 extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks,
1230 &arena->extents_retained, new_addr, size, pad, alignment, slab,
1231 szind, zero, commit, true);
1232 if (extent != NULL) {
1233 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1235 extent_gdump_add(tsdn, extent);
1237 } else if (opt_retain && new_addr == NULL) {
1238 extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size,
1239 pad, alignment, slab, szind, zero, commit);
1240 /* extent_grow_retained() always releases extent_grow_mtx. */
1242 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1244 malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx);
1250 extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
1251 extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1252 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1253 size_t esize = size + pad;
1254 extent_t *extent = extent_alloc(tsdn, arena);
1255 if (extent == NULL) {
1259 if (*r_extent_hooks == &extent_hooks_default) {
1260 /* Call directly to propagate tsdn. */
1261 addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
1262 alignment, zero, commit);
1264 extent_hook_pre_reentrancy(tsdn, arena);
1265 addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
1266 esize, alignment, zero, commit, arena_ind_get(arena));
1267 extent_hook_post_reentrancy(tsdn);
1270 extent_dalloc(tsdn, arena, extent);
1273 extent_init(extent, arena, addr, esize, slab, szind,
1274 arena_extent_sn_next(arena), extent_state_active, zero, commit);
1276 extent_addr_randomize(tsdn, extent, alignment);
1278 if (extent_register(tsdn, extent)) {
1279 extents_leak(tsdn, arena, r_extent_hooks,
1280 &arena->extents_retained, extent, false);
1288 extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1289 extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1290 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1291 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1292 WITNESS_RANK_CORE, 0);
1294 extent_hooks_assure_initialized(arena, r_extent_hooks);
1296 extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
1297 new_addr, size, pad, alignment, slab, szind, zero, commit);
1298 if (extent == NULL) {
1299 extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
1300 new_addr, size, pad, alignment, slab, szind, zero, commit);
1307 extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner,
1308 const extent_t *outer) {
1309 assert(extent_arena_get(inner) == arena);
1310 if (extent_arena_get(outer) != arena) {
1314 assert(extent_state_get(inner) == extent_state_active);
1315 if (extent_state_get(outer) != extents->state) {
1319 if (extent_committed_get(inner) != extent_committed_get(outer)) {
1327 extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1328 extents_t *extents, extent_t *inner, extent_t *outer, bool forward,
1329 bool growing_retained) {
1330 assert(extent_can_coalesce(arena, extents, inner, outer));
1332 if (forward && extents->delay_coalesce) {
1334 * The extent that remains after coalescing must occupy the
1335 * outer extent's position in the LRU. For forward coalescing,
1336 * swap the inner extent into the LRU.
1338 extent_list_replace(&extents->lru, outer, inner);
1340 extent_activate_locked(tsdn, arena, extents, outer,
1341 extents->delay_coalesce);
1343 malloc_mutex_unlock(tsdn, &extents->mtx);
1344 bool err = extent_merge_impl(tsdn, arena, r_extent_hooks,
1345 forward ? inner : outer, forward ? outer : inner, growing_retained);
1346 malloc_mutex_lock(tsdn, &extents->mtx);
1349 if (forward && extents->delay_coalesce) {
1350 extent_list_replace(&extents->lru, inner, outer);
1352 extent_deactivate_locked(tsdn, arena, extents, outer,
1353 extents->delay_coalesce);
1360 extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
1361 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1362 extent_t *extent, bool *coalesced, bool growing_retained) {
1364 * Continue attempting to coalesce until failure, to protect against
1365 * races with other threads that are thwarted by this one.
1371 /* Try to coalesce forward. */
1372 extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx,
1373 extent_past_get(extent));
1376 * extents->mtx only protects against races for
1377 * like-state extents, so call extent_can_coalesce()
1378 * before releasing next's pool lock.
1380 bool can_coalesce = extent_can_coalesce(arena, extents,
1383 extent_unlock(tsdn, next);
1385 if (can_coalesce && !extent_coalesce(tsdn, arena,
1386 r_extent_hooks, extents, extent, next, true,
1387 growing_retained)) {
1388 if (extents->delay_coalesce) {
1389 /* Do minimal coalescing. */
1397 /* Try to coalesce backward. */
1398 extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx,
1399 extent_before_get(extent));
1401 bool can_coalesce = extent_can_coalesce(arena, extents,
1403 extent_unlock(tsdn, prev);
1405 if (can_coalesce && !extent_coalesce(tsdn, arena,
1406 r_extent_hooks, extents, extent, prev, false,
1407 growing_retained)) {
1409 if (extents->delay_coalesce) {
1410 /* Do minimal coalescing. */
1419 if (extents->delay_coalesce) {
1426 extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1427 extents_t *extents, extent_t *extent, bool growing_retained) {
1428 rtree_ctx_t rtree_ctx_fallback;
1429 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1431 assert((extents_state_get(extents) != extent_state_dirty &&
1432 extents_state_get(extents) != extent_state_muzzy) ||
1433 !extent_zeroed_get(extent));
1435 malloc_mutex_lock(tsdn, &extents->mtx);
1436 extent_hooks_assure_initialized(arena, r_extent_hooks);
1438 extent_szind_set(extent, NSIZES);
1439 if (extent_slab_get(extent)) {
1440 extent_interior_deregister(tsdn, rtree_ctx, extent);
1441 extent_slab_set(extent, false);
1444 assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
1445 (uintptr_t)extent_base_get(extent), true) == extent);
1447 if (!extents->delay_coalesce) {
1448 extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
1449 rtree_ctx, extents, extent, NULL, growing_retained);
1452 extent_deactivate_locked(tsdn, arena, extents, extent, false);
1454 malloc_mutex_unlock(tsdn, &extents->mtx);
1458 extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
1459 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
1461 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1462 WITNESS_RANK_CORE, 0);
1464 if (extent_register(tsdn, extent)) {
1465 extents_leak(tsdn, arena, &extent_hooks,
1466 &arena->extents_retained, extent, false);
1469 extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
1473 extent_dalloc_default_impl(void *addr, size_t size) {
1474 if (!have_dss || !extent_in_dss(addr)) {
1475 return extent_dalloc_mmap(addr, size);
1481 extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1482 bool committed, unsigned arena_ind) {
1483 return extent_dalloc_default_impl(addr, size);
1487 extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
1488 extent_hooks_t **r_extent_hooks, extent_t *extent) {
1491 assert(extent_base_get(extent) != NULL);
1492 assert(extent_size_get(extent) != 0);
1493 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1494 WITNESS_RANK_CORE, 0);
1496 extent_addr_set(extent, extent_base_get(extent));
1498 extent_hooks_assure_initialized(arena, r_extent_hooks);
1499 /* Try to deallocate. */
1500 if (*r_extent_hooks == &extent_hooks_default) {
1501 /* Call directly to propagate tsdn. */
1502 err = extent_dalloc_default_impl(extent_base_get(extent),
1503 extent_size_get(extent));
1505 extent_hook_pre_reentrancy(tsdn, arena);
1506 err = ((*r_extent_hooks)->dalloc == NULL ||
1507 (*r_extent_hooks)->dalloc(*r_extent_hooks,
1508 extent_base_get(extent), extent_size_get(extent),
1509 extent_committed_get(extent), arena_ind_get(arena)));
1510 extent_hook_post_reentrancy(tsdn);
1514 extent_dalloc(tsdn, arena, extent);
1521 extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1522 extent_hooks_t **r_extent_hooks, extent_t *extent) {
1523 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1524 WITNESS_RANK_CORE, 0);
1527 * Deregister first to avoid a race with other allocating threads, and
1528 * reregister if deallocation fails.
1530 extent_deregister(tsdn, extent);
1531 if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent)) {
1535 extent_reregister(tsdn, extent);
1536 if (*r_extent_hooks != &extent_hooks_default) {
1537 extent_hook_pre_reentrancy(tsdn, arena);
1539 /* Try to decommit; purge if that fails. */
1541 if (!extent_committed_get(extent)) {
1543 } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
1544 0, extent_size_get(extent))) {
1546 } else if ((*r_extent_hooks)->purge_forced != NULL &&
1547 !(*r_extent_hooks)->purge_forced(*r_extent_hooks,
1548 extent_base_get(extent), extent_size_get(extent), 0,
1549 extent_size_get(extent), arena_ind_get(arena))) {
1551 } else if (extent_state_get(extent) == extent_state_muzzy ||
1552 ((*r_extent_hooks)->purge_lazy != NULL &&
1553 !(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
1554 extent_base_get(extent), extent_size_get(extent), 0,
1555 extent_size_get(extent), arena_ind_get(arena)))) {
1560 if (*r_extent_hooks != &extent_hooks_default) {
1561 extent_hook_post_reentrancy(tsdn);
1563 extent_zeroed_set(extent, zeroed);
1566 extent_gdump_sub(tsdn, extent);
1569 extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained,
1574 extent_destroy_default_impl(void *addr, size_t size) {
1575 if (!have_dss || !extent_in_dss(addr)) {
1576 pages_unmap(addr, size);
1581 extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1582 bool committed, unsigned arena_ind) {
1583 extent_destroy_default_impl(addr, size);
1587 extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
1588 extent_hooks_t **r_extent_hooks, extent_t *extent) {
1589 assert(extent_base_get(extent) != NULL);
1590 assert(extent_size_get(extent) != 0);
1591 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1592 WITNESS_RANK_CORE, 0);
1594 /* Deregister first to avoid a race with other allocating threads. */
1595 extent_deregister(tsdn, extent);
1597 extent_addr_set(extent, extent_base_get(extent));
1599 extent_hooks_assure_initialized(arena, r_extent_hooks);
1600 /* Try to destroy; silently fail otherwise. */
1601 if (*r_extent_hooks == &extent_hooks_default) {
1602 /* Call directly to propagate tsdn. */
1603 extent_destroy_default_impl(extent_base_get(extent),
1604 extent_size_get(extent));
1605 } else if ((*r_extent_hooks)->destroy != NULL) {
1606 extent_hook_pre_reentrancy(tsdn, arena);
1607 (*r_extent_hooks)->destroy(*r_extent_hooks,
1608 extent_base_get(extent), extent_size_get(extent),
1609 extent_committed_get(extent), arena_ind_get(arena));
1610 extent_hook_post_reentrancy(tsdn);
1613 extent_dalloc(tsdn, arena, extent);
1617 extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1618 size_t offset, size_t length, unsigned arena_ind) {
1619 return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
1624 extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
1625 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1626 size_t length, bool growing_retained) {
1627 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1628 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1630 extent_hooks_assure_initialized(arena, r_extent_hooks);
1631 if (*r_extent_hooks != &extent_hooks_default) {
1632 extent_hook_pre_reentrancy(tsdn, arena);
1634 bool err = ((*r_extent_hooks)->commit == NULL ||
1635 (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
1636 extent_size_get(extent), offset, length, arena_ind_get(arena)));
1637 if (*r_extent_hooks != &extent_hooks_default) {
1638 extent_hook_post_reentrancy(tsdn);
1640 extent_committed_set(extent, extent_committed_get(extent) || !err);
1645 extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
1646 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1648 return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset,
1653 extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1654 size_t offset, size_t length, unsigned arena_ind) {
1655 return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
1660 extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
1661 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1663 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1664 WITNESS_RANK_CORE, 0);
1666 extent_hooks_assure_initialized(arena, r_extent_hooks);
1668 if (*r_extent_hooks != &extent_hooks_default) {
1669 extent_hook_pre_reentrancy(tsdn, arena);
1671 bool err = ((*r_extent_hooks)->decommit == NULL ||
1672 (*r_extent_hooks)->decommit(*r_extent_hooks,
1673 extent_base_get(extent), extent_size_get(extent), offset, length,
1674 arena_ind_get(arena)));
1675 if (*r_extent_hooks != &extent_hooks_default) {
1676 extent_hook_post_reentrancy(tsdn);
1678 extent_committed_set(extent, extent_committed_get(extent) && err);
1682 #ifdef PAGES_CAN_PURGE_LAZY
1684 extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1685 size_t offset, size_t length, unsigned arena_ind) {
1686 assert(addr != NULL);
1687 assert((offset & PAGE_MASK) == 0);
1688 assert(length != 0);
1689 assert((length & PAGE_MASK) == 0);
1691 return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
1697 extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
1698 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1699 size_t length, bool growing_retained) {
1700 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1701 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1703 extent_hooks_assure_initialized(arena, r_extent_hooks);
1705 if ((*r_extent_hooks)->purge_lazy == NULL) {
1708 if (*r_extent_hooks != &extent_hooks_default) {
1709 extent_hook_pre_reentrancy(tsdn, arena);
1711 bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks,
1712 extent_base_get(extent), extent_size_get(extent), offset, length,
1713 arena_ind_get(arena));
1714 if (*r_extent_hooks != &extent_hooks_default) {
1715 extent_hook_post_reentrancy(tsdn);
1722 extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
1723 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1725 return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent,
1726 offset, length, false);
1729 #ifdef PAGES_CAN_PURGE_FORCED
1731 extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
1732 size_t size, size_t offset, size_t length, unsigned arena_ind) {
1733 assert(addr != NULL);
1734 assert((offset & PAGE_MASK) == 0);
1735 assert(length != 0);
1736 assert((length & PAGE_MASK) == 0);
1738 return pages_purge_forced((void *)((uintptr_t)addr +
1739 (uintptr_t)offset), length);
1744 extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
1745 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1746 size_t length, bool growing_retained) {
1747 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1748 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1750 extent_hooks_assure_initialized(arena, r_extent_hooks);
1752 if ((*r_extent_hooks)->purge_forced == NULL) {
1755 if (*r_extent_hooks != &extent_hooks_default) {
1756 extent_hook_pre_reentrancy(tsdn, arena);
1758 bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks,
1759 extent_base_get(extent), extent_size_get(extent), offset, length,
1760 arena_ind_get(arena));
1761 if (*r_extent_hooks != &extent_hooks_default) {
1762 extent_hook_post_reentrancy(tsdn);
1768 extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
1769 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1771 return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent,
1772 offset, length, false);
1775 #ifdef JEMALLOC_MAPS_COALESCE
1777 extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1778 size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
1779 return !maps_coalesce;
1784 extent_split_impl(tsdn_t *tsdn, arena_t *arena,
1785 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
1786 szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
1787 bool growing_retained) {
1788 assert(extent_size_get(extent) == size_a + size_b);
1789 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1790 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1792 extent_hooks_assure_initialized(arena, r_extent_hooks);
1794 if ((*r_extent_hooks)->split == NULL) {
1798 extent_t *trail = extent_alloc(tsdn, arena);
1799 if (trail == NULL) {
1803 extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
1804 size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
1805 extent_state_get(extent), extent_zeroed_get(extent),
1806 extent_committed_get(extent));
1808 rtree_ctx_t rtree_ctx_fallback;
1809 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1810 rtree_leaf_elm_t *lead_elm_a, *lead_elm_b;
1814 extent_init(&lead, arena, extent_addr_get(extent), size_a,
1815 slab_a, szind_a, extent_sn_get(extent),
1816 extent_state_get(extent), extent_zeroed_get(extent),
1817 extent_committed_get(extent));
1819 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
1820 true, &lead_elm_a, &lead_elm_b);
1822 rtree_leaf_elm_t *trail_elm_a, *trail_elm_b;
1823 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true,
1824 &trail_elm_a, &trail_elm_b);
1826 if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL
1827 || trail_elm_b == NULL) {
1831 extent_lock2(tsdn, extent, trail);
1833 if (*r_extent_hooks != &extent_hooks_default) {
1834 extent_hook_pre_reentrancy(tsdn, arena);
1836 bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
1837 size_a + size_b, size_a, size_b, extent_committed_get(extent),
1838 arena_ind_get(arena));
1839 if (*r_extent_hooks != &extent_hooks_default) {
1840 extent_hook_post_reentrancy(tsdn);
1846 extent_size_set(extent, size_a);
1847 extent_szind_set(extent, szind_a);
1849 extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
1851 extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
1854 extent_unlock2(tsdn, extent, trail);
1858 extent_unlock2(tsdn, extent, trail);
1860 extent_dalloc(tsdn, arena, trail);
1866 extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
1867 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
1868 szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
1869 return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a,
1870 szind_a, slab_a, size_b, szind_b, slab_b, false);
1874 extent_merge_default_impl(void *addr_a, void *addr_b) {
1875 if (!maps_coalesce) {
1878 if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
1885 #ifdef JEMALLOC_MAPS_COALESCE
1887 extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
1888 void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
1889 return extent_merge_default_impl(addr_a, addr_b);
1894 extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
1895 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
1896 bool growing_retained) {
1897 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1898 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1900 extent_hooks_assure_initialized(arena, r_extent_hooks);
1902 if ((*r_extent_hooks)->merge == NULL) {
1907 if (*r_extent_hooks == &extent_hooks_default) {
1908 /* Call directly to propagate tsdn. */
1909 err = extent_merge_default_impl(extent_base_get(a),
1910 extent_base_get(b));
1912 extent_hook_pre_reentrancy(tsdn, arena);
1913 err = (*r_extent_hooks)->merge(*r_extent_hooks,
1914 extent_base_get(a), extent_size_get(a), extent_base_get(b),
1915 extent_size_get(b), extent_committed_get(a),
1916 arena_ind_get(arena));
1917 extent_hook_post_reentrancy(tsdn);
1925 * The rtree writes must happen while all the relevant elements are
1926 * owned, so the following code uses decomposed helper functions rather
1927 * than extent_{,de}register() to do things in the right order.
1929 rtree_ctx_t rtree_ctx_fallback;
1930 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1931 rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
1932 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a,
1934 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a,
1937 extent_lock2(tsdn, a, b);
1939 if (a_elm_b != NULL) {
1940 rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
1943 if (b_elm_b != NULL) {
1944 rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
1950 extent_size_set(a, extent_size_get(a) + extent_size_get(b));
1951 extent_szind_set(a, NSIZES);
1952 extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
1953 extent_sn_get(a) : extent_sn_get(b));
1954 extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
1956 extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, NSIZES, false);
1958 extent_unlock2(tsdn, a, b);
1960 extent_dalloc(tsdn, extent_arena_get(b), b);
1966 extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
1967 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) {
1968 return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false);
1973 if (rtree_new(&extents_rtree, true)) {
1977 if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool",
1978 WITNESS_RANK_EXTENT_POOL)) {