1 #define JEMALLOC_ARENA_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 purge_mode_t opt_purge = PURGE_DEFAULT;
8 const char *purge_mode_names[] = {
13 ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
14 static ssize_t lg_dirty_mult_default;
15 ssize_t opt_decay_time = DECAY_TIME_DEFAULT;
16 static ssize_t decay_time_default;
18 arena_bin_info_t arena_bin_info[NBINS];
21 size_t map_misc_offset;
22 size_t arena_maxrun; /* Max run size for arenas. */
23 size_t large_maxclass; /* Max large size class. */
24 unsigned nlclasses; /* Number of large size classes. */
25 unsigned nhclasses; /* Number of huge size classes. */
27 /******************************************************************************/
29 * Function prototypes for static functions that are referenced prior to
33 static void arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena,
34 arena_chunk_t *chunk);
35 static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena,
37 static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run,
38 bool dirty, bool cleaned, bool decommitted);
39 static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
40 arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
41 static void arena_bin_lower_run(arena_t *arena, arena_run_t *run,
44 /******************************************************************************/
46 JEMALLOC_INLINE_C size_t
47 arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
50 size_t pageind, mapbits;
52 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
53 pageind = arena_miscelm_to_pageind(miscelm);
54 mapbits = arena_mapbits_get(chunk, pageind);
55 return (arena_mapbits_size_decode(mapbits));
58 JEMALLOC_INLINE_C const extent_node_t *
59 arena_miscelm_extent_get(const arena_chunk_map_misc_t *miscelm)
63 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
64 return (&chunk->node);
68 arena_sn_comp(const arena_chunk_map_misc_t *a, const arena_chunk_map_misc_t *b)
75 a_sn = extent_node_sn_get(arena_miscelm_extent_get(a));
76 b_sn = extent_node_sn_get(arena_miscelm_extent_get(b));
78 return ((a_sn > b_sn) - (a_sn < b_sn));
82 arena_ad_comp(const arena_chunk_map_misc_t *a,
83 const arena_chunk_map_misc_t *b)
85 uintptr_t a_miscelm = (uintptr_t)a;
86 uintptr_t b_miscelm = (uintptr_t)b;
91 return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
95 arena_snad_comp(const arena_chunk_map_misc_t *a,
96 const arena_chunk_map_misc_t *b)
103 ret = arena_sn_comp(a, b);
107 ret = arena_ad_comp(a, b);
111 /* Generate pairing heap functions. */
112 ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
113 ph_link, arena_snad_comp)
116 #undef run_quantize_floor
117 #define run_quantize_floor JEMALLOC_N(n_run_quantize_floor)
120 run_quantize_floor(size_t size)
126 assert(size <= HUGE_MAXCLASS);
127 assert((size & PAGE_MASK) == 0);
130 assert(size == PAGE_CEILING(size));
132 pind = psz2ind(size - large_pad + 1);
135 * Avoid underflow. This short-circuit would also do the right
136 * thing for all sizes in the range for which there are
137 * PAGE-spaced size classes, but it's simplest to just handle
138 * the one case that would cause erroneous results.
142 ret = pind2sz(pind - 1) + large_pad;
147 #undef run_quantize_floor
148 #define run_quantize_floor JEMALLOC_N(run_quantize_floor)
149 run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor);
153 #undef run_quantize_ceil
154 #define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil)
157 run_quantize_ceil(size_t size)
162 assert(size <= HUGE_MAXCLASS);
163 assert((size & PAGE_MASK) == 0);
165 ret = run_quantize_floor(size);
168 * Skip a quantization that may have an adequately large run,
169 * because under-sized runs may be mixed in. This only happens
170 * when an unusual size is requested, i.e. for aligned
171 * allocation, and is just one of several places where linear
172 * search would potentially find sufficiently aligned available
173 * memory somewhere lower.
175 ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad;
180 #undef run_quantize_ceil
181 #define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
182 run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil);
186 arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
189 pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
190 arena_miscelm_get_const(chunk, pageind))));
191 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
193 assert((npages << LG_PAGE) < chunksize);
194 assert(pind2sz(pind) <= chunksize);
195 arena_run_heap_insert(&arena->runs_avail[pind],
196 arena_miscelm_get_mutable(chunk, pageind));
200 arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
203 pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
204 arena_miscelm_get_const(chunk, pageind))));
205 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
207 assert((npages << LG_PAGE) < chunksize);
208 assert(pind2sz(pind) <= chunksize);
209 arena_run_heap_remove(&arena->runs_avail[pind],
210 arena_miscelm_get_mutable(chunk, pageind));
214 arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
217 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
220 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
222 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
223 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
226 qr_new(&miscelm->rd, rd_link);
227 qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link);
228 arena->ndirty += npages;
232 arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
235 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
238 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
240 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
241 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
244 qr_remove(&miscelm->rd, rd_link);
245 assert(arena->ndirty >= npages);
246 arena->ndirty -= npages;
250 arena_chunk_dirty_npages(const extent_node_t *node)
253 return (extent_node_size_get(node) >> LG_PAGE);
257 arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
261 extent_node_dirty_linkage_init(node);
262 extent_node_dirty_insert(node, &arena->runs_dirty,
263 &arena->chunks_cache);
264 arena->ndirty += arena_chunk_dirty_npages(node);
269 arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
273 extent_node_dirty_remove(node);
274 assert(arena->ndirty >= arena_chunk_dirty_npages(node));
275 arena->ndirty -= arena_chunk_dirty_npages(node);
279 JEMALLOC_INLINE_C void *
280 arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
284 arena_chunk_map_misc_t *miscelm;
287 assert(run->nfree > 0);
288 assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
290 regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
291 miscelm = arena_run_to_miscelm(run);
292 rpages = arena_miscelm_to_rpages(miscelm);
293 ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
294 (uintptr_t)(bin_info->reg_interval * regind));
299 JEMALLOC_INLINE_C void
300 arena_run_reg_dalloc(arena_run_t *run, void *ptr)
302 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
303 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
304 size_t mapbits = arena_mapbits_get(chunk, pageind);
305 szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
306 arena_bin_info_t *bin_info = &arena_bin_info[binind];
307 size_t regind = arena_run_regind(run, bin_info, ptr);
309 assert(run->nfree < bin_info->nregs);
310 /* Freeing an interior pointer can cause assertion failure. */
311 assert(((uintptr_t)ptr -
312 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
313 (uintptr_t)bin_info->reg0_offset)) %
314 (uintptr_t)bin_info->reg_interval == 0);
315 assert((uintptr_t)ptr >=
316 (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
317 (uintptr_t)bin_info->reg0_offset);
318 /* Freeing an unallocated pointer can cause assertion failure. */
319 assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
321 bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind);
325 JEMALLOC_INLINE_C void
326 arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
329 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
330 (run_ind << LG_PAGE)), (npages << LG_PAGE));
331 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
332 (npages << LG_PAGE));
335 JEMALLOC_INLINE_C void
336 arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
339 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
343 JEMALLOC_INLINE_C void
344 arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
347 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
349 arena_run_page_mark_zeroed(chunk, run_ind);
350 for (i = 0; i < PAGE / sizeof(size_t); i++)
355 arena_nactive_add(arena_t *arena, size_t add_pages)
359 size_t cactive_add = CHUNK_CEILING((arena->nactive +
360 add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
362 if (cactive_add != 0)
363 stats_cactive_add(cactive_add);
365 arena->nactive += add_pages;
369 arena_nactive_sub(arena_t *arena, size_t sub_pages)
373 size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
374 CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
375 if (cactive_sub != 0)
376 stats_cactive_sub(cactive_sub);
378 arena->nactive -= sub_pages;
382 arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
383 size_t flag_dirty, size_t flag_decommitted, size_t need_pages)
385 size_t total_pages, rem_pages;
387 assert(flag_dirty == 0 || flag_decommitted == 0);
389 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
391 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
393 assert(need_pages <= total_pages);
394 rem_pages = total_pages - need_pages;
396 arena_avail_remove(arena, chunk, run_ind, total_pages);
398 arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
399 arena_nactive_add(arena, need_pages);
401 /* Keep track of trailing unused pages for later use. */
403 size_t flags = flag_dirty | flag_decommitted;
404 size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED :
407 arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
408 (rem_pages << LG_PAGE), flags |
409 (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) &
410 flag_unzeroed_mask));
411 arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1,
412 (rem_pages << LG_PAGE), flags |
413 (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) &
414 flag_unzeroed_mask));
415 if (flag_dirty != 0) {
416 arena_run_dirty_insert(arena, chunk, run_ind+need_pages,
419 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
424 arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
425 bool remove, bool zero)
427 arena_chunk_t *chunk;
428 arena_chunk_map_misc_t *miscelm;
429 size_t flag_dirty, flag_decommitted, run_ind, need_pages;
430 size_t flag_unzeroed_mask;
432 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
433 miscelm = arena_run_to_miscelm(run);
434 run_ind = arena_miscelm_to_pageind(miscelm);
435 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
436 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
437 need_pages = (size >> LG_PAGE);
438 assert(need_pages > 0);
440 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
441 run_ind << LG_PAGE, size, arena->ind))
445 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
446 flag_decommitted, need_pages);
450 if (flag_decommitted != 0) {
451 /* The run is untouched, and therefore zeroed. */
452 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
453 *)((uintptr_t)chunk + (run_ind << LG_PAGE)),
454 (need_pages << LG_PAGE));
455 } else if (flag_dirty != 0) {
456 /* The run is dirty, so all pages must be zeroed. */
457 arena_run_zero(chunk, run_ind, need_pages);
460 * The run is clean, so some pages may be zeroed (i.e.
461 * never before touched).
464 for (i = 0; i < need_pages; i++) {
465 if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
467 arena_run_zero(chunk, run_ind+i, 1);
468 else if (config_debug) {
469 arena_run_page_validate_zeroed(chunk,
472 arena_run_page_mark_zeroed(chunk,
478 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
479 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
483 * Set the last element first, in case the run only contains one page
484 * (i.e. both statements set the same element).
486 flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
487 CHUNK_MAP_UNZEROED : 0;
488 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty |
489 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
490 run_ind+need_pages-1)));
491 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty |
492 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind)));
497 arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
500 return (arena_run_split_large_helper(arena, run, size, true, zero));
504 arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
507 return (arena_run_split_large_helper(arena, run, size, false, zero));
511 arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
514 arena_chunk_t *chunk;
515 arena_chunk_map_misc_t *miscelm;
516 size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
518 assert(binind != BININD_INVALID);
520 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
521 miscelm = arena_run_to_miscelm(run);
522 run_ind = arena_miscelm_to_pageind(miscelm);
523 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
524 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
525 need_pages = (size >> LG_PAGE);
526 assert(need_pages > 0);
528 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
529 run_ind << LG_PAGE, size, arena->ind))
532 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
533 flag_decommitted, need_pages);
535 for (i = 0; i < need_pages; i++) {
536 size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk,
538 arena_mapbits_small_set(chunk, run_ind+i, i, binind,
540 if (config_debug && flag_dirty == 0 && flag_unzeroed == 0)
541 arena_run_page_validate_zeroed(chunk, run_ind+i);
543 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
544 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
548 static arena_chunk_t *
549 arena_chunk_init_spare(arena_t *arena)
551 arena_chunk_t *chunk;
553 assert(arena->spare != NULL);
555 chunk = arena->spare;
558 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
559 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
560 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
562 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
564 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
565 arena_mapbits_dirty_get(chunk, chunk_npages-1));
571 arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
572 size_t sn, bool zero)
576 * The extent node notion of "committed" doesn't directly apply to
577 * arena chunks. Arbitrarily mark them as committed. The commit state
578 * of runs is tracked individually, and upon chunk deallocation the
579 * entire chunk is in a consistent commit state.
581 extent_node_init(&chunk->node, arena, chunk, chunksize, sn, zero, true);
582 extent_node_achunk_set(&chunk->node, true);
583 return (chunk_register(tsdn, chunk, &chunk->node));
586 static arena_chunk_t *
587 arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
588 chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
590 arena_chunk_t *chunk;
593 malloc_mutex_unlock(tsdn, &arena->lock);
595 chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
596 NULL, chunksize, chunksize, &sn, zero, commit);
597 if (chunk != NULL && !*commit) {
599 if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
600 LG_PAGE, arena->ind)) {
601 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
602 (void *)chunk, chunksize, sn, *zero, *commit);
606 if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, sn,
609 /* Undo commit of header. */
610 chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
611 LG_PAGE, arena->ind);
613 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
614 chunksize, sn, *zero, *commit);
618 malloc_mutex_lock(tsdn, &arena->lock);
622 static arena_chunk_t *
623 arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
626 arena_chunk_t *chunk;
627 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
630 chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
631 chunksize, &sn, zero, commit, true);
633 if (arena_chunk_register(tsdn, arena, chunk, sn, *zero)) {
634 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
635 chunksize, sn, true);
640 chunk = arena_chunk_alloc_internal_hard(tsdn, arena,
641 &chunk_hooks, zero, commit);
644 if (config_stats && chunk != NULL) {
645 arena->stats.mapped += chunksize;
646 arena->stats.metadata_mapped += (map_bias << LG_PAGE);
652 static arena_chunk_t *
653 arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
655 arena_chunk_t *chunk;
657 size_t flag_unzeroed, flag_decommitted, i;
659 assert(arena->spare == NULL);
663 chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit);
667 chunk->hugepage = true;
670 * Initialize the map to contain one maximal free untouched run. Mark
671 * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
672 * or decommitted chunk.
674 flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
675 flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
676 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun,
677 flag_unzeroed | flag_decommitted);
679 * There is no need to initialize the internal page map entries unless
680 * the chunk is not zeroed.
683 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
684 (void *)arena_bitselm_get_const(chunk, map_bias+1),
685 (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
687 (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
688 for (i = map_bias+1; i < chunk_npages-1; i++)
689 arena_mapbits_internal_set(chunk, i, flag_unzeroed);
691 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
692 *)arena_bitselm_get_const(chunk, map_bias+1),
693 (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
695 (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
697 for (i = map_bias+1; i < chunk_npages-1; i++) {
698 assert(arena_mapbits_unzeroed_get(chunk, i) ==
703 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
709 static arena_chunk_t *
710 arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
712 arena_chunk_t *chunk;
714 if (arena->spare != NULL)
715 chunk = arena_chunk_init_spare(arena);
717 chunk = arena_chunk_init_hard(tsdn, arena);
722 ql_elm_new(&chunk->node, ql_link);
723 ql_tail_insert(&arena->achunks, &chunk->node, ql_link);
724 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
730 arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
734 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
736 chunk_deregister(chunk, &chunk->node);
738 sn = extent_node_sn_get(&chunk->node);
739 hugepage = chunk->hugepage;
740 committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
743 * Decommit the header. Mark the chunk as decommitted even if
744 * header decommit fails, since treating a partially committed
745 * chunk as committed has a high potential for causing later
746 * access of decommitted memory.
748 chunk_hooks = chunk_hooks_get(tsdn, arena);
749 chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
754 * Convert chunk back to the default state, so that all
755 * subsequent chunk allocations start out with chunks that can
756 * be backed by transparent huge pages.
758 pages_huge(chunk, chunksize);
761 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
765 arena->stats.mapped -= chunksize;
766 arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
771 arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare)
774 assert(arena->spare != spare);
776 if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
777 arena_run_dirty_remove(arena, spare, map_bias,
778 chunk_npages-map_bias);
781 arena_chunk_discard(tsdn, arena, spare);
785 arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
787 arena_chunk_t *spare;
789 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
790 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
791 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
793 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
795 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
796 arena_mapbits_dirty_get(chunk, chunk_npages-1));
797 assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
798 arena_mapbits_decommitted_get(chunk, chunk_npages-1));
800 /* Remove run from runs_avail, so that the arena does not use it. */
801 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
803 ql_remove(&arena->achunks, &chunk->node, ql_link);
804 spare = arena->spare;
805 arena->spare = chunk;
807 arena_spare_discard(tsdn, arena, spare);
811 arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
813 szind_t index = size2index(usize) - nlclasses - NBINS;
815 cassert(config_stats);
817 arena->stats.nmalloc_huge++;
818 arena->stats.allocated_huge += usize;
819 arena->stats.hstats[index].nmalloc++;
820 arena->stats.hstats[index].curhchunks++;
824 arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
826 szind_t index = size2index(usize) - nlclasses - NBINS;
828 cassert(config_stats);
830 arena->stats.nmalloc_huge--;
831 arena->stats.allocated_huge -= usize;
832 arena->stats.hstats[index].nmalloc--;
833 arena->stats.hstats[index].curhchunks--;
837 arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
839 szind_t index = size2index(usize) - nlclasses - NBINS;
841 cassert(config_stats);
843 arena->stats.ndalloc_huge++;
844 arena->stats.allocated_huge -= usize;
845 arena->stats.hstats[index].ndalloc++;
846 arena->stats.hstats[index].curhchunks--;
850 arena_huge_reset_stats_cancel(arena_t *arena, size_t usize)
852 szind_t index = size2index(usize) - nlclasses - NBINS;
854 cassert(config_stats);
856 arena->stats.ndalloc_huge++;
857 arena->stats.hstats[index].ndalloc--;
861 arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
863 szind_t index = size2index(usize) - nlclasses - NBINS;
865 cassert(config_stats);
867 arena->stats.ndalloc_huge--;
868 arena->stats.allocated_huge += usize;
869 arena->stats.hstats[index].ndalloc--;
870 arena->stats.hstats[index].curhchunks++;
874 arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
877 arena_huge_dalloc_stats_update(arena, oldsize);
878 arena_huge_malloc_stats_update(arena, usize);
882 arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
886 arena_huge_dalloc_stats_update_undo(arena, oldsize);
887 arena_huge_malloc_stats_update_undo(arena, usize);
891 arena_node_alloc(tsdn_t *tsdn, arena_t *arena)
895 malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
896 node = ql_last(&arena->node_cache, ql_link);
898 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
899 return (base_alloc(tsdn, sizeof(extent_node_t)));
901 ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
902 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
907 arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node)
910 malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
911 ql_elm_new(node, ql_link);
912 ql_tail_insert(&arena->node_cache, node, ql_link);
913 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
917 arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
918 chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, size_t *sn,
919 bool *zero, size_t csize)
924 ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
925 alignment, sn, zero, &commit);
927 /* Revert optimistic stats updates. */
928 malloc_mutex_lock(tsdn, &arena->lock);
930 arena_huge_malloc_stats_update_undo(arena, usize);
931 arena->stats.mapped -= usize;
933 arena_nactive_sub(arena, usize >> LG_PAGE);
934 malloc_mutex_unlock(tsdn, &arena->lock);
941 arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
942 size_t alignment, size_t *sn, bool *zero)
945 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
946 size_t csize = CHUNK_CEILING(usize);
949 malloc_mutex_lock(tsdn, &arena->lock);
951 /* Optimistically update stats. */
953 arena_huge_malloc_stats_update(arena, usize);
954 arena->stats.mapped += usize;
956 arena_nactive_add(arena, usize >> LG_PAGE);
958 ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
959 alignment, sn, zero, &commit, true);
960 malloc_mutex_unlock(tsdn, &arena->lock);
962 ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
963 usize, alignment, sn, zero, csize);
970 arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize,
973 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
976 csize = CHUNK_CEILING(usize);
977 malloc_mutex_lock(tsdn, &arena->lock);
979 arena_huge_dalloc_stats_update(arena, usize);
980 arena->stats.mapped -= usize;
982 arena_nactive_sub(arena, usize >> LG_PAGE);
984 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, sn, true);
985 malloc_mutex_unlock(tsdn, &arena->lock);
989 arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
990 size_t oldsize, size_t usize)
993 assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
994 assert(oldsize != usize);
996 malloc_mutex_lock(tsdn, &arena->lock);
998 arena_huge_ralloc_stats_update(arena, oldsize, usize);
1000 arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE);
1002 arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE);
1003 malloc_mutex_unlock(tsdn, &arena->lock);
1007 arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
1008 size_t oldsize, size_t usize, size_t sn)
1010 size_t udiff = oldsize - usize;
1011 size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
1013 malloc_mutex_lock(tsdn, &arena->lock);
1015 arena_huge_ralloc_stats_update(arena, oldsize, usize);
1017 arena->stats.mapped -= cdiff;
1019 arena_nactive_sub(arena, udiff >> LG_PAGE);
1022 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
1023 void *nchunk = (void *)((uintptr_t)chunk +
1024 CHUNK_CEILING(usize));
1026 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
1029 malloc_mutex_unlock(tsdn, &arena->lock);
1033 arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
1034 chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
1035 size_t *sn, bool *zero, void *nchunk, size_t udiff, size_t cdiff)
1040 err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
1041 chunksize, sn, zero, &commit) == NULL);
1043 /* Revert optimistic stats updates. */
1044 malloc_mutex_lock(tsdn, &arena->lock);
1046 arena_huge_ralloc_stats_update_undo(arena, oldsize,
1048 arena->stats.mapped -= cdiff;
1050 arena_nactive_sub(arena, udiff >> LG_PAGE);
1051 malloc_mutex_unlock(tsdn, &arena->lock);
1052 } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1053 cdiff, true, arena->ind)) {
1054 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
1062 arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
1063 size_t oldsize, size_t usize, bool *zero)
1066 chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
1067 void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
1068 size_t udiff = usize - oldsize;
1069 size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
1073 malloc_mutex_lock(tsdn, &arena->lock);
1075 /* Optimistically update stats. */
1077 arena_huge_ralloc_stats_update(arena, oldsize, usize);
1078 arena->stats.mapped += cdiff;
1080 arena_nactive_add(arena, udiff >> LG_PAGE);
1082 err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
1083 chunksize, &sn, zero, &commit, true) == NULL);
1084 malloc_mutex_unlock(tsdn, &arena->lock);
1086 err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
1087 &chunk_hooks, chunk, oldsize, usize, &sn, zero, nchunk,
1089 } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1090 cdiff, true, arena->ind)) {
1091 chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
1100 * Do first-best-fit run selection, i.e. select the lowest run that best fits.
1101 * Run sizes are indexed, so not all candidate runs are necessarily exactly the
1104 static arena_run_t *
1105 arena_run_first_best_fit(arena_t *arena, size_t size)
1109 pind = psz2ind(run_quantize_ceil(size));
1111 for (i = pind; pind2sz(i) <= chunksize; i++) {
1112 arena_chunk_map_misc_t *miscelm = arena_run_heap_first(
1113 &arena->runs_avail[i]);
1114 if (miscelm != NULL)
1115 return (&miscelm->run);
1121 static arena_run_t *
1122 arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
1124 arena_run_t *run = arena_run_first_best_fit(arena, size);
1126 if (arena_run_split_large(arena, run, size, zero))
1132 static arena_run_t *
1133 arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
1135 arena_chunk_t *chunk;
1138 assert(size <= arena_maxrun);
1139 assert(size == PAGE_CEILING(size));
1141 /* Search the arena's chunks for the lowest best fit. */
1142 run = arena_run_alloc_large_helper(arena, size, zero);
1147 * No usable runs. Create a new chunk from which to allocate the run.
1149 chunk = arena_chunk_alloc(tsdn, arena);
1150 if (chunk != NULL) {
1151 run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
1152 if (arena_run_split_large(arena, run, size, zero))
1158 * arena_chunk_alloc() failed, but another thread may have made
1159 * sufficient memory available while this one dropped arena->lock in
1160 * arena_chunk_alloc(), so search one more time.
1162 return (arena_run_alloc_large_helper(arena, size, zero));
1165 static arena_run_t *
1166 arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
1168 arena_run_t *run = arena_run_first_best_fit(arena, size);
1170 if (arena_run_split_small(arena, run, size, binind))
1176 static arena_run_t *
1177 arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
1179 arena_chunk_t *chunk;
1182 assert(size <= arena_maxrun);
1183 assert(size == PAGE_CEILING(size));
1184 assert(binind != BININD_INVALID);
1186 /* Search the arena's chunks for the lowest best fit. */
1187 run = arena_run_alloc_small_helper(arena, size, binind);
1192 * No usable runs. Create a new chunk from which to allocate the run.
1194 chunk = arena_chunk_alloc(tsdn, arena);
1195 if (chunk != NULL) {
1196 run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
1197 if (arena_run_split_small(arena, run, size, binind))
1203 * arena_chunk_alloc() failed, but another thread may have made
1204 * sufficient memory available while this one dropped arena->lock in
1205 * arena_chunk_alloc(), so search one more time.
1207 return (arena_run_alloc_small_helper(arena, size, binind));
1211 arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
1214 return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t)
1219 arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena)
1221 ssize_t lg_dirty_mult;
1223 malloc_mutex_lock(tsdn, &arena->lock);
1224 lg_dirty_mult = arena->lg_dirty_mult;
1225 malloc_mutex_unlock(tsdn, &arena->lock);
1227 return (lg_dirty_mult);
1231 arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult)
1234 if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
1237 malloc_mutex_lock(tsdn, &arena->lock);
1238 arena->lg_dirty_mult = lg_dirty_mult;
1239 arena_maybe_purge(tsdn, arena);
1240 malloc_mutex_unlock(tsdn, &arena->lock);
1246 arena_decay_deadline_init(arena_t *arena)
1249 assert(opt_purge == purge_mode_decay);
1252 * Generate a new deadline that is uniformly random within the next
1253 * epoch after the current one.
1255 nstime_copy(&arena->decay.deadline, &arena->decay.epoch);
1256 nstime_add(&arena->decay.deadline, &arena->decay.interval);
1257 if (arena->decay.time > 0) {
1260 nstime_init(&jitter, prng_range_u64(&arena->decay.jitter_state,
1261 nstime_ns(&arena->decay.interval)));
1262 nstime_add(&arena->decay.deadline, &jitter);
1267 arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
1270 assert(opt_purge == purge_mode_decay);
1272 return (nstime_compare(&arena->decay.deadline, time) <= 0);
1276 arena_decay_backlog_npages_limit(const arena_t *arena)
1278 static const uint64_t h_steps[] = {
1279 #define STEP(step, h, x, y) \
1285 size_t npages_limit_backlog;
1288 assert(opt_purge == purge_mode_decay);
1291 * For each element of decay_backlog, multiply by the corresponding
1292 * fixed-point smoothstep decay factor. Sum the products, then divide
1293 * to round down to the nearest whole number of pages.
1296 for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
1297 sum += arena->decay.backlog[i] * h_steps[i];
1298 npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
1300 return (npages_limit_backlog);
1304 arena_decay_backlog_update_last(arena_t *arena)
1306 size_t ndirty_delta = (arena->ndirty > arena->decay.ndirty) ?
1307 arena->ndirty - arena->decay.ndirty : 0;
1308 arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
1312 arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64)
1315 if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
1316 memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
1319 size_t nadvance_z = (size_t)nadvance_u64;
1321 assert((uint64_t)nadvance_z == nadvance_u64);
1323 memmove(arena->decay.backlog, &arena->decay.backlog[nadvance_z],
1324 (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
1325 if (nadvance_z > 1) {
1326 memset(&arena->decay.backlog[SMOOTHSTEP_NSTEPS -
1327 nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
1331 arena_decay_backlog_update_last(arena);
1335 arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time)
1337 uint64_t nadvance_u64;
1340 assert(opt_purge == purge_mode_decay);
1341 assert(arena_decay_deadline_reached(arena, time));
1343 nstime_copy(&delta, time);
1344 nstime_subtract(&delta, &arena->decay.epoch);
1345 nadvance_u64 = nstime_divide(&delta, &arena->decay.interval);
1346 assert(nadvance_u64 > 0);
1348 /* Add nadvance_u64 decay intervals to epoch. */
1349 nstime_copy(&delta, &arena->decay.interval);
1350 nstime_imultiply(&delta, nadvance_u64);
1351 nstime_add(&arena->decay.epoch, &delta);
1353 /* Set a new deadline. */
1354 arena_decay_deadline_init(arena);
1356 /* Update the backlog. */
1357 arena_decay_backlog_update(arena, nadvance_u64);
1361 arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena)
1363 size_t ndirty_limit = arena_decay_backlog_npages_limit(arena);
1365 if (arena->ndirty > ndirty_limit)
1366 arena_purge_to_limit(tsdn, arena, ndirty_limit);
1367 arena->decay.ndirty = arena->ndirty;
1371 arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, const nstime_t *time)
1374 arena_decay_epoch_advance_helper(arena, time);
1375 arena_decay_epoch_advance_purge(tsdn, arena);
1379 arena_decay_init(arena_t *arena, ssize_t decay_time)
1382 arena->decay.time = decay_time;
1383 if (decay_time > 0) {
1384 nstime_init2(&arena->decay.interval, decay_time, 0);
1385 nstime_idivide(&arena->decay.interval, SMOOTHSTEP_NSTEPS);
1388 nstime_init(&arena->decay.epoch, 0);
1389 nstime_update(&arena->decay.epoch);
1390 arena->decay.jitter_state = (uint64_t)(uintptr_t)arena;
1391 arena_decay_deadline_init(arena);
1392 arena->decay.ndirty = arena->ndirty;
1393 memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
1397 arena_decay_time_valid(ssize_t decay_time)
1400 if (decay_time < -1)
1402 if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX)
1408 arena_decay_time_get(tsdn_t *tsdn, arena_t *arena)
1412 malloc_mutex_lock(tsdn, &arena->lock);
1413 decay_time = arena->decay.time;
1414 malloc_mutex_unlock(tsdn, &arena->lock);
1416 return (decay_time);
1420 arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time)
1423 if (!arena_decay_time_valid(decay_time))
1426 malloc_mutex_lock(tsdn, &arena->lock);
1428 * Restart decay backlog from scratch, which may cause many dirty pages
1429 * to be immediately purged. It would conceptually be possible to map
1430 * the old backlog onto the new backlog, but there is no justification
1431 * for such complexity since decay_time changes are intended to be
1432 * infrequent, either between the {-1, 0, >0} states, or a one-time
1433 * arbitrary change during initial arena configuration.
1435 arena_decay_init(arena, decay_time);
1436 arena_maybe_purge(tsdn, arena);
1437 malloc_mutex_unlock(tsdn, &arena->lock);
1443 arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena)
1446 assert(opt_purge == purge_mode_ratio);
1448 /* Don't purge if the option is disabled. */
1449 if (arena->lg_dirty_mult < 0)
1453 * Iterate, since preventing recursive purging could otherwise leave too
1457 size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
1458 if (threshold < chunk_npages)
1459 threshold = chunk_npages;
1461 * Don't purge unless the number of purgeable pages exceeds the
1464 if (arena->ndirty <= threshold)
1466 arena_purge_to_limit(tsdn, arena, threshold);
1471 arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena)
1475 assert(opt_purge == purge_mode_decay);
1477 /* Purge all or nothing if the option is disabled. */
1478 if (arena->decay.time <= 0) {
1479 if (arena->decay.time == 0)
1480 arena_purge_to_limit(tsdn, arena, 0);
1484 nstime_init(&time, 0);
1485 nstime_update(&time);
1486 if (unlikely(!nstime_monotonic() && nstime_compare(&arena->decay.epoch,
1489 * Time went backwards. Move the epoch back in time and
1490 * generate a new deadline, with the expectation that time
1491 * typically flows forward for long enough periods of time that
1492 * epochs complete. Unfortunately, this strategy is susceptible
1493 * to clock jitter triggering premature epoch advances, but
1494 * clock jitter estimation and compensation isn't feasible here
1495 * because calls into this code are event-driven.
1497 nstime_copy(&arena->decay.epoch, &time);
1498 arena_decay_deadline_init(arena);
1500 /* Verify that time does not go backwards. */
1501 assert(nstime_compare(&arena->decay.epoch, &time) <= 0);
1505 * If the deadline has been reached, advance to the current epoch and
1506 * purge to the new limit if necessary. Note that dirty pages created
1507 * during the current epoch are not subject to purge until a future
1508 * epoch, so as a result purging only happens during epoch advances.
1510 if (arena_decay_deadline_reached(arena, &time))
1511 arena_decay_epoch_advance(tsdn, arena, &time);
1515 arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
1518 /* Don't recursively purge. */
1522 if (opt_purge == purge_mode_ratio)
1523 arena_maybe_purge_ratio(tsdn, arena);
1525 arena_maybe_purge_decay(tsdn, arena);
1529 arena_dirty_count(arena_t *arena)
1532 arena_runs_dirty_link_t *rdelm;
1533 extent_node_t *chunkselm;
1535 for (rdelm = qr_next(&arena->runs_dirty, rd_link),
1536 chunkselm = qr_next(&arena->chunks_cache, cc_link);
1537 rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) {
1540 if (rdelm == &chunkselm->rd) {
1541 npages = extent_node_size_get(chunkselm) >> LG_PAGE;
1542 chunkselm = qr_next(chunkselm, cc_link);
1544 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
1546 arena_chunk_map_misc_t *miscelm =
1547 arena_rd_to_miscelm(rdelm);
1548 size_t pageind = arena_miscelm_to_pageind(miscelm);
1549 assert(arena_mapbits_allocated_get(chunk, pageind) ==
1551 assert(arena_mapbits_large_get(chunk, pageind) == 0);
1552 assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
1553 npages = arena_mapbits_unallocated_size_get(chunk,
1554 pageind) >> LG_PAGE;
1563 arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
1564 size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
1565 extent_node_t *purge_chunks_sentinel)
1567 arena_runs_dirty_link_t *rdelm, *rdelm_next;
1568 extent_node_t *chunkselm;
1569 size_t nstashed = 0;
1571 /* Stash runs/chunks according to ndirty_limit. */
1572 for (rdelm = qr_next(&arena->runs_dirty, rd_link),
1573 chunkselm = qr_next(&arena->chunks_cache, cc_link);
1574 rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
1576 rdelm_next = qr_next(rdelm, rd_link);
1578 if (rdelm == &chunkselm->rd) {
1579 extent_node_t *chunkselm_next;
1584 npages = extent_node_size_get(chunkselm) >> LG_PAGE;
1585 if (opt_purge == purge_mode_decay && arena->ndirty -
1586 (nstashed + npages) < ndirty_limit)
1589 chunkselm_next = qr_next(chunkselm, cc_link);
1591 * Allocate. chunkselm remains valid due to the
1592 * dalloc_node=false argument to chunk_alloc_cache().
1596 chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
1597 extent_node_addr_get(chunkselm),
1598 extent_node_size_get(chunkselm), chunksize, &sn,
1599 &zero, &commit, false);
1600 assert(chunk == extent_node_addr_get(chunkselm));
1601 assert(zero == extent_node_zeroed_get(chunkselm));
1602 extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
1603 purge_chunks_sentinel);
1604 assert(npages == (extent_node_size_get(chunkselm) >>
1606 chunkselm = chunkselm_next;
1608 arena_chunk_t *chunk =
1609 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1610 arena_chunk_map_misc_t *miscelm =
1611 arena_rd_to_miscelm(rdelm);
1612 size_t pageind = arena_miscelm_to_pageind(miscelm);
1613 arena_run_t *run = &miscelm->run;
1615 arena_mapbits_unallocated_size_get(chunk, pageind);
1617 npages = run_size >> LG_PAGE;
1618 if (opt_purge == purge_mode_decay && arena->ndirty -
1619 (nstashed + npages) < ndirty_limit)
1622 assert(pageind + npages <= chunk_npages);
1623 assert(arena_mapbits_dirty_get(chunk, pageind) ==
1624 arena_mapbits_dirty_get(chunk, pageind+npages-1));
1627 * If purging the spare chunk's run, make it available
1628 * prior to allocation.
1630 if (chunk == arena->spare)
1631 arena_chunk_alloc(tsdn, arena);
1633 /* Temporarily allocate the free dirty run. */
1634 arena_run_split_large(arena, run, run_size, false);
1637 qr_new(rdelm, rd_link); /* Redundant. */
1639 assert(qr_next(rdelm, rd_link) == rdelm);
1640 assert(qr_prev(rdelm, rd_link) == rdelm);
1642 qr_meld(purge_runs_sentinel, rdelm, rd_link);
1646 if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <=
1655 arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
1656 arena_runs_dirty_link_t *purge_runs_sentinel,
1657 extent_node_t *purge_chunks_sentinel)
1659 size_t npurged, nmadvise;
1660 arena_runs_dirty_link_t *rdelm;
1661 extent_node_t *chunkselm;
1667 malloc_mutex_unlock(tsdn, &arena->lock);
1668 for (rdelm = qr_next(purge_runs_sentinel, rd_link),
1669 chunkselm = qr_next(purge_chunks_sentinel, cc_link);
1670 rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
1673 if (rdelm == &chunkselm->rd) {
1675 * Don't actually purge the chunk here because 1)
1676 * chunkselm is embedded in the chunk and must remain
1677 * valid, and 2) we deallocate the chunk in
1678 * arena_unstash_purged(), where it is destroyed,
1679 * decommitted, or purged, depending on chunk
1680 * deallocation policy.
1682 size_t size = extent_node_size_get(chunkselm);
1683 npages = size >> LG_PAGE;
1684 chunkselm = qr_next(chunkselm, cc_link);
1686 size_t pageind, run_size, flag_unzeroed, flags, i;
1688 arena_chunk_t *chunk =
1689 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1690 arena_chunk_map_misc_t *miscelm =
1691 arena_rd_to_miscelm(rdelm);
1692 pageind = arena_miscelm_to_pageind(miscelm);
1693 run_size = arena_mapbits_large_size_get(chunk, pageind);
1694 npages = run_size >> LG_PAGE;
1697 * If this is the first run purged within chunk, mark
1698 * the chunk as non-huge. This will prevent all use of
1699 * transparent huge pages for this chunk until the chunk
1700 * as a whole is deallocated.
1702 if (chunk->hugepage) {
1703 pages_nohuge(chunk, chunksize);
1704 chunk->hugepage = false;
1707 assert(pageind + npages <= chunk_npages);
1708 assert(!arena_mapbits_decommitted_get(chunk, pageind));
1709 assert(!arena_mapbits_decommitted_get(chunk,
1711 decommitted = !chunk_hooks->decommit(chunk, chunksize,
1712 pageind << LG_PAGE, npages << LG_PAGE, arena->ind);
1715 flags = CHUNK_MAP_DECOMMITTED;
1717 flag_unzeroed = chunk_purge_wrapper(tsdn, arena,
1718 chunk_hooks, chunk, chunksize, pageind <<
1719 LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
1720 flags = flag_unzeroed;
1722 arena_mapbits_large_set(chunk, pageind+npages-1, 0,
1724 arena_mapbits_large_set(chunk, pageind, run_size,
1728 * Set the unzeroed flag for internal pages, now that
1729 * chunk_purge_wrapper() has returned whether the pages
1730 * were zeroed as a side effect of purging. This chunk
1731 * map modification is safe even though the arena mutex
1732 * isn't currently owned by this thread, because the run
1733 * is marked as allocated, thus protecting it from being
1734 * modified by any other thread. As long as these
1735 * writes don't perturb the first and last elements'
1736 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
1738 for (i = 1; i < npages-1; i++) {
1739 arena_mapbits_internal_set(chunk, pageind+i,
1748 malloc_mutex_lock(tsdn, &arena->lock);
1751 arena->stats.nmadvise += nmadvise;
1752 arena->stats.purged += npurged;
1759 arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
1760 arena_runs_dirty_link_t *purge_runs_sentinel,
1761 extent_node_t *purge_chunks_sentinel)
1763 arena_runs_dirty_link_t *rdelm, *rdelm_next;
1764 extent_node_t *chunkselm;
1766 /* Deallocate chunks/runs. */
1767 for (rdelm = qr_next(purge_runs_sentinel, rd_link),
1768 chunkselm = qr_next(purge_chunks_sentinel, cc_link);
1769 rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
1770 rdelm_next = qr_next(rdelm, rd_link);
1771 if (rdelm == &chunkselm->rd) {
1772 extent_node_t *chunkselm_next = qr_next(chunkselm,
1774 void *addr = extent_node_addr_get(chunkselm);
1775 size_t size = extent_node_size_get(chunkselm);
1776 size_t sn = extent_node_sn_get(chunkselm);
1777 bool zeroed = extent_node_zeroed_get(chunkselm);
1778 bool committed = extent_node_committed_get(chunkselm);
1779 extent_node_dirty_remove(chunkselm);
1780 arena_node_dalloc(tsdn, arena, chunkselm);
1781 chunkselm = chunkselm_next;
1782 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
1783 size, sn, zeroed, committed);
1785 arena_chunk_t *chunk =
1786 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1787 arena_chunk_map_misc_t *miscelm =
1788 arena_rd_to_miscelm(rdelm);
1789 size_t pageind = arena_miscelm_to_pageind(miscelm);
1790 bool decommitted = (arena_mapbits_decommitted_get(chunk,
1792 arena_run_t *run = &miscelm->run;
1793 qr_remove(rdelm, rd_link);
1794 arena_run_dalloc(tsdn, arena, run, false, true,
1801 * NB: ndirty_limit is interpreted differently depending on opt_purge:
1802 * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
1804 * (arena->ndirty <= ndirty_limit)
1805 * - purge_mode_decay: Purge as many dirty runs/chunks as possible without
1806 * violating the invariant:
1807 * (arena->ndirty >= ndirty_limit)
1810 arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
1812 chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
1813 size_t npurge, npurged;
1814 arena_runs_dirty_link_t purge_runs_sentinel;
1815 extent_node_t purge_chunks_sentinel;
1817 arena->purging = true;
1820 * Calls to arena_dirty_count() are disabled even for debug builds
1821 * because overhead grows nonlinearly as memory usage increases.
1823 if (false && config_debug) {
1824 size_t ndirty = arena_dirty_count(arena);
1825 assert(ndirty == arena->ndirty);
1827 assert(opt_purge != purge_mode_ratio || (arena->nactive >>
1828 arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
1830 qr_new(&purge_runs_sentinel, rd_link);
1831 extent_node_dirty_linkage_init(&purge_chunks_sentinel);
1833 npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit,
1834 &purge_runs_sentinel, &purge_chunks_sentinel);
1837 npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks,
1838 &purge_runs_sentinel, &purge_chunks_sentinel);
1839 assert(npurged == npurge);
1840 arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel,
1841 &purge_chunks_sentinel);
1844 arena->stats.npurge++;
1847 arena->purging = false;
1851 arena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
1854 malloc_mutex_lock(tsdn, &arena->lock);
1856 arena_purge_to_limit(tsdn, arena, 0);
1858 arena_maybe_purge(tsdn, arena);
1859 malloc_mutex_unlock(tsdn, &arena->lock);
1863 arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
1865 size_t pageind, npages;
1867 cassert(config_prof);
1871 * Iterate over the allocated runs and remove profiled allocations from
1874 for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
1875 if (arena_mapbits_allocated_get(chunk, pageind) != 0) {
1876 if (arena_mapbits_large_get(chunk, pageind) != 0) {
1877 void *ptr = (void *)((uintptr_t)chunk + (pageind
1879 size_t usize = isalloc(tsd_tsdn(tsd), ptr,
1882 prof_free(tsd, ptr, usize);
1883 npages = arena_mapbits_large_size_get(chunk,
1884 pageind) >> LG_PAGE;
1886 /* Skip small run. */
1887 size_t binind = arena_mapbits_binind_get(chunk,
1889 arena_bin_info_t *bin_info =
1890 &arena_bin_info[binind];
1891 npages = bin_info->run_size >> LG_PAGE;
1894 /* Skip unallocated run. */
1895 npages = arena_mapbits_unallocated_size_get(chunk,
1896 pageind) >> LG_PAGE;
1898 assert(pageind + npages <= chunk_npages);
1903 arena_reset(tsd_t *tsd, arena_t *arena)
1906 extent_node_t *node;
1909 * Locking in this function is unintuitive. The caller guarantees that
1910 * no concurrent operations are happening in this arena, but there are
1911 * still reasons that some locking is necessary:
1913 * - Some of the functions in the transitive closure of calls assume
1914 * appropriate locks are held, and in some cases these locks are
1915 * temporarily dropped to avoid lock order reversal or deadlock due to
1917 * - mallctl("epoch", ...) may concurrently refresh stats. While
1918 * strictly speaking this is a "concurrent operation", disallowing
1919 * stats refreshes would impose an inconvenient burden.
1922 /* Remove large allocations from prof sample set. */
1923 if (config_prof && opt_prof) {
1924 ql_foreach(node, &arena->achunks, ql_link) {
1925 arena_achunk_prof_reset(tsd, arena,
1926 extent_node_addr_get(node));
1930 /* Reset curruns for large size classes. */
1932 for (i = 0; i < nlclasses; i++)
1933 arena->stats.lstats[i].curruns = 0;
1936 /* Huge allocations. */
1937 malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
1938 for (node = ql_last(&arena->huge, ql_link); node != NULL; node =
1939 ql_last(&arena->huge, ql_link)) {
1940 void *ptr = extent_node_addr_get(node);
1943 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
1944 if (config_stats || (config_prof && opt_prof))
1945 usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
1946 /* Remove huge allocation from prof sample set. */
1947 if (config_prof && opt_prof)
1948 prof_free(tsd, ptr, usize);
1949 huge_dalloc(tsd_tsdn(tsd), ptr);
1950 malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
1951 /* Cancel out unwanted effects on stats. */
1953 arena_huge_reset_stats_cancel(arena, usize);
1955 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
1957 malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
1960 for (i = 0; i < NBINS; i++) {
1961 arena_bin_t *bin = &arena->bins[i];
1962 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1964 arena_run_heap_new(&bin->runs);
1966 bin->stats.curregs = 0;
1967 bin->stats.curruns = 0;
1969 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1973 * Re-initialize runs_dirty such that the chunks_cache and runs_dirty
1974 * chains directly correspond.
1976 qr_new(&arena->runs_dirty, rd_link);
1977 for (node = qr_next(&arena->chunks_cache, cc_link);
1978 node != &arena->chunks_cache; node = qr_next(node, cc_link)) {
1979 qr_new(&node->rd, rd_link);
1980 qr_meld(&arena->runs_dirty, &node->rd, rd_link);
1984 for (node = ql_last(&arena->achunks, ql_link); node != NULL; node =
1985 ql_last(&arena->achunks, ql_link)) {
1986 ql_remove(&arena->achunks, node, ql_link);
1987 arena_chunk_discard(tsd_tsdn(tsd), arena,
1988 extent_node_addr_get(node));
1992 if (arena->spare != NULL) {
1993 arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare);
1994 arena->spare = NULL;
1997 assert(!arena->purging);
2000 for (i = 0; i < NPSIZES; i++)
2001 arena_run_heap_new(&arena->runs_avail[i]);
2003 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
2007 arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
2008 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty,
2009 size_t flag_decommitted)
2011 size_t size = *p_size;
2012 size_t run_ind = *p_run_ind;
2013 size_t run_pages = *p_run_pages;
2015 /* Try to coalesce forward. */
2016 if (run_ind + run_pages < chunk_npages &&
2017 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
2018 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty &&
2019 arena_mapbits_decommitted_get(chunk, run_ind+run_pages) ==
2021 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
2023 size_t nrun_pages = nrun_size >> LG_PAGE;
2026 * Remove successor from runs_avail; the coalesced run is
2029 assert(arena_mapbits_unallocated_size_get(chunk,
2030 run_ind+run_pages+nrun_pages-1) == nrun_size);
2031 assert(arena_mapbits_dirty_get(chunk,
2032 run_ind+run_pages+nrun_pages-1) == flag_dirty);
2033 assert(arena_mapbits_decommitted_get(chunk,
2034 run_ind+run_pages+nrun_pages-1) == flag_decommitted);
2035 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
2038 * If the successor is dirty, remove it from the set of dirty
2041 if (flag_dirty != 0) {
2042 arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
2047 run_pages += nrun_pages;
2049 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
2050 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
2054 /* Try to coalesce backward. */
2055 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
2056 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
2057 flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) ==
2059 size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
2061 size_t prun_pages = prun_size >> LG_PAGE;
2063 run_ind -= prun_pages;
2066 * Remove predecessor from runs_avail; the coalesced run is
2069 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
2071 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
2072 assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
2074 arena_avail_remove(arena, chunk, run_ind, prun_pages);
2077 * If the predecessor is dirty, remove it from the set of dirty
2080 if (flag_dirty != 0) {
2081 arena_run_dirty_remove(arena, chunk, run_ind,
2086 run_pages += prun_pages;
2088 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
2089 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
2094 *p_run_ind = run_ind;
2095 *p_run_pages = run_pages;
2099 arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2104 assert(run_ind >= map_bias);
2105 assert(run_ind < chunk_npages);
2107 if (arena_mapbits_large_get(chunk, run_ind) != 0) {
2108 size = arena_mapbits_large_size_get(chunk, run_ind);
2109 assert(size == PAGE || arena_mapbits_large_size_get(chunk,
2110 run_ind+(size>>LG_PAGE)-1) == 0);
2112 arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
2113 size = bin_info->run_size;
2120 arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
2121 bool cleaned, bool decommitted)
2123 arena_chunk_t *chunk;
2124 arena_chunk_map_misc_t *miscelm;
2125 size_t size, run_ind, run_pages, flag_dirty, flag_decommitted;
2127 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
2128 miscelm = arena_run_to_miscelm(run);
2129 run_ind = arena_miscelm_to_pageind(miscelm);
2130 assert(run_ind >= map_bias);
2131 assert(run_ind < chunk_npages);
2132 size = arena_run_size_get(arena, chunk, run, run_ind);
2133 run_pages = (size >> LG_PAGE);
2134 arena_nactive_sub(arena, run_pages);
2137 * The run is dirty if the caller claims to have dirtied it, as well as
2138 * if it was already dirty before being allocated and the caller
2139 * doesn't claim to have cleaned it.
2141 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
2142 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
2143 if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind)
2146 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
2147 flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0;
2149 /* Mark pages as unallocated in the chunk map. */
2150 if (dirty || decommitted) {
2151 size_t flags = flag_dirty | flag_decommitted;
2152 arena_mapbits_unallocated_set(chunk, run_ind, size, flags);
2153 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
2156 arena_mapbits_unallocated_set(chunk, run_ind, size,
2157 arena_mapbits_unzeroed_get(chunk, run_ind));
2158 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
2159 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
2162 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
2163 flag_dirty, flag_decommitted);
2165 /* Insert into runs_avail, now that coalescing is complete. */
2166 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
2167 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
2168 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
2169 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
2170 assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
2171 arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1));
2172 arena_avail_insert(arena, chunk, run_ind, run_pages);
2175 arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
2177 /* Deallocate chunk if it is now completely unused. */
2178 if (size == arena_maxrun) {
2179 assert(run_ind == map_bias);
2180 assert(run_pages == (arena_maxrun >> LG_PAGE));
2181 arena_chunk_dalloc(tsdn, arena, chunk);
2185 * It is okay to do dirty page processing here even if the chunk was
2186 * deallocated above, since in that case it is the spare. Waiting
2187 * until after possible chunk deallocation to do dirty processing
2188 * allows for an old spare to be fully deallocated, thus decreasing the
2189 * chances of spuriously crossing the dirty page purging threshold.
2192 arena_maybe_purge(tsdn, arena);
2196 arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2197 arena_run_t *run, size_t oldsize, size_t newsize)
2199 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2200 size_t pageind = arena_miscelm_to_pageind(miscelm);
2201 size_t head_npages = (oldsize - newsize) >> LG_PAGE;
2202 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
2203 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
2204 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
2205 CHUNK_MAP_UNZEROED : 0;
2207 assert(oldsize > newsize);
2210 * Update the chunk map so that arena_run_dalloc() can treat the
2211 * leading run as separately allocated. Set the last element of each
2212 * run first, in case of single-page runs.
2214 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
2215 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
2216 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2217 pageind+head_npages-1)));
2218 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
2219 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
2222 UNUSED size_t tail_npages = newsize >> LG_PAGE;
2223 assert(arena_mapbits_large_size_get(chunk,
2224 pageind+head_npages+tail_npages-1) == 0);
2225 assert(arena_mapbits_dirty_get(chunk,
2226 pageind+head_npages+tail_npages-1) == flag_dirty);
2228 arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
2229 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2230 pageind+head_npages)));
2232 arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted !=
2237 arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2238 arena_run_t *run, size_t oldsize, size_t newsize, bool dirty)
2240 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2241 size_t pageind = arena_miscelm_to_pageind(miscelm);
2242 size_t head_npages = newsize >> LG_PAGE;
2243 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
2244 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
2245 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
2246 CHUNK_MAP_UNZEROED : 0;
2247 arena_chunk_map_misc_t *tail_miscelm;
2248 arena_run_t *tail_run;
2250 assert(oldsize > newsize);
2253 * Update the chunk map so that arena_run_dalloc() can treat the
2254 * trailing run as separately allocated. Set the last element of each
2255 * run first, in case of single-page runs.
2257 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
2258 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
2259 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2260 pageind+head_npages-1)));
2261 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
2262 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
2265 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
2266 assert(arena_mapbits_large_size_get(chunk,
2267 pageind+head_npages+tail_npages-1) == 0);
2268 assert(arena_mapbits_dirty_get(chunk,
2269 pageind+head_npages+tail_npages-1) == flag_dirty);
2271 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
2272 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2273 pageind+head_npages)));
2275 tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages);
2276 tail_run = &tail_miscelm->run;
2277 arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted
2282 arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
2284 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2286 arena_run_heap_insert(&bin->runs, miscelm);
2289 static arena_run_t *
2290 arena_bin_nonfull_run_tryget(arena_bin_t *bin)
2292 arena_chunk_map_misc_t *miscelm;
2294 miscelm = arena_run_heap_remove_first(&bin->runs);
2295 if (miscelm == NULL)
2298 bin->stats.reruns++;
2300 return (&miscelm->run);
2303 static arena_run_t *
2304 arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
2308 arena_bin_info_t *bin_info;
2310 /* Look for a usable run. */
2311 run = arena_bin_nonfull_run_tryget(bin);
2314 /* No existing runs have any space available. */
2316 binind = arena_bin_index(arena, bin);
2317 bin_info = &arena_bin_info[binind];
2319 /* Allocate a new run. */
2320 malloc_mutex_unlock(tsdn, &bin->lock);
2321 /******************************/
2322 malloc_mutex_lock(tsdn, &arena->lock);
2323 run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind);
2325 /* Initialize run internals. */
2326 run->binind = binind;
2327 run->nfree = bin_info->nregs;
2328 bitmap_init(run->bitmap, &bin_info->bitmap_info);
2330 malloc_mutex_unlock(tsdn, &arena->lock);
2331 /********************************/
2332 malloc_mutex_lock(tsdn, &bin->lock);
2336 bin->stats.curruns++;
2342 * arena_run_alloc_small() failed, but another thread may have made
2343 * sufficient memory available while this one dropped bin->lock above,
2344 * so search one more time.
2346 run = arena_bin_nonfull_run_tryget(bin);
2353 /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
2355 arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
2358 arena_bin_info_t *bin_info;
2361 binind = arena_bin_index(arena, bin);
2362 bin_info = &arena_bin_info[binind];
2364 run = arena_bin_nonfull_run_get(tsdn, arena, bin);
2365 if (bin->runcur != NULL && bin->runcur->nfree > 0) {
2367 * Another thread updated runcur while this one ran without the
2368 * bin lock in arena_bin_nonfull_run_get().
2371 assert(bin->runcur->nfree > 0);
2372 ret = arena_run_reg_alloc(bin->runcur, bin_info);
2374 arena_chunk_t *chunk;
2377 * arena_run_alloc_small() may have allocated run, or
2378 * it may have pulled run from the bin's run tree.
2379 * Therefore it is unsafe to make any assumptions about
2380 * how run has previously been used, and
2381 * arena_bin_lower_run() must be called, as if a region
2382 * were just deallocated from the run.
2384 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
2385 if (run->nfree == bin_info->nregs) {
2386 arena_dalloc_bin_run(tsdn, arena, chunk, run,
2389 arena_bin_lower_run(arena, run, bin);
2399 assert(bin->runcur->nfree > 0);
2401 return (arena_run_reg_alloc(bin->runcur, bin_info));
2405 arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
2406 szind_t binind, uint64_t prof_accumbytes)
2411 assert(tbin->ncached == 0);
2413 if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes))
2415 bin = &arena->bins[binind];
2416 malloc_mutex_lock(tsdn, &bin->lock);
2417 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
2418 tbin->lg_fill_div); i < nfill; i++) {
2421 if ((run = bin->runcur) != NULL && run->nfree > 0)
2422 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
2424 ptr = arena_bin_malloc_hard(tsdn, arena, bin);
2427 * OOM. tbin->avail isn't yet filled down to its first
2428 * element, so the successful allocations (if any) must
2429 * be moved just before tbin->avail before bailing out.
2432 memmove(tbin->avail - i, tbin->avail - nfill,
2433 i * sizeof(void *));
2437 if (config_fill && unlikely(opt_junk_alloc)) {
2438 arena_alloc_junk_small(ptr, &arena_bin_info[binind],
2441 /* Insert such that low regions get used first. */
2442 *(tbin->avail - nfill + i) = ptr;
2445 bin->stats.nmalloc += i;
2446 bin->stats.nrequests += tbin->tstats.nrequests;
2447 bin->stats.curregs += i;
2448 bin->stats.nfills++;
2449 tbin->tstats.nrequests = 0;
2451 malloc_mutex_unlock(tsdn, &bin->lock);
2453 arena_decay_tick(tsdn, arena);
2457 arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
2460 size_t redzone_size = bin_info->redzone_size;
2463 memset((void *)((uintptr_t)ptr - redzone_size),
2464 JEMALLOC_ALLOC_JUNK, redzone_size);
2465 memset((void *)((uintptr_t)ptr + bin_info->reg_size),
2466 JEMALLOC_ALLOC_JUNK, redzone_size);
2468 memset((void *)((uintptr_t)ptr - redzone_size),
2469 JEMALLOC_ALLOC_JUNK, bin_info->reg_interval);
2474 #undef arena_redzone_corruption
2475 #define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption)
2478 arena_redzone_corruption(void *ptr, size_t usize, bool after,
2479 size_t offset, uint8_t byte)
2482 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
2483 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
2484 after ? "after" : "before", ptr, usize, byte);
2487 #undef arena_redzone_corruption
2488 #define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
2489 arena_redzone_corruption_t *arena_redzone_corruption =
2490 JEMALLOC_N(n_arena_redzone_corruption);
2494 arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
2498 if (opt_junk_alloc) {
2499 size_t size = bin_info->reg_size;
2500 size_t redzone_size = bin_info->redzone_size;
2503 for (i = 1; i <= redzone_size; i++) {
2504 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
2505 if (*byte != JEMALLOC_ALLOC_JUNK) {
2507 arena_redzone_corruption(ptr, size, false, i,
2510 *byte = JEMALLOC_ALLOC_JUNK;
2513 for (i = 0; i < redzone_size; i++) {
2514 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
2515 if (*byte != JEMALLOC_ALLOC_JUNK) {
2517 arena_redzone_corruption(ptr, size, true, i,
2520 *byte = JEMALLOC_ALLOC_JUNK;
2525 if (opt_abort && error)
2530 #undef arena_dalloc_junk_small
2531 #define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small)
2534 arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
2536 size_t redzone_size = bin_info->redzone_size;
2538 arena_redzones_validate(ptr, bin_info, false);
2539 memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK,
2540 bin_info->reg_interval);
2543 #undef arena_dalloc_junk_small
2544 #define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
2545 arena_dalloc_junk_small_t *arena_dalloc_junk_small =
2546 JEMALLOC_N(n_arena_dalloc_junk_small);
2550 arena_quarantine_junk_small(void *ptr, size_t usize)
2553 arena_bin_info_t *bin_info;
2554 cassert(config_fill);
2555 assert(opt_junk_free);
2556 assert(opt_quarantine);
2557 assert(usize <= SMALL_MAXCLASS);
2559 binind = size2index(usize);
2560 bin_info = &arena_bin_info[binind];
2561 arena_redzones_validate(ptr, bin_info, true);
2565 arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
2572 assert(binind < NBINS);
2573 bin = &arena->bins[binind];
2574 usize = index2size(binind);
2576 malloc_mutex_lock(tsdn, &bin->lock);
2577 if ((run = bin->runcur) != NULL && run->nfree > 0)
2578 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
2580 ret = arena_bin_malloc_hard(tsdn, arena, bin);
2583 malloc_mutex_unlock(tsdn, &bin->lock);
2588 bin->stats.nmalloc++;
2589 bin->stats.nrequests++;
2590 bin->stats.curregs++;
2592 malloc_mutex_unlock(tsdn, &bin->lock);
2593 if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize))
2598 if (unlikely(opt_junk_alloc)) {
2599 arena_alloc_junk_small(ret,
2600 &arena_bin_info[binind], false);
2601 } else if (unlikely(opt_zero))
2602 memset(ret, 0, usize);
2604 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
2606 if (config_fill && unlikely(opt_junk_alloc)) {
2607 arena_alloc_junk_small(ret, &arena_bin_info[binind],
2610 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
2611 memset(ret, 0, usize);
2614 arena_decay_tick(tsdn, arena);
2619 arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
2623 uintptr_t random_offset;
2625 arena_chunk_map_misc_t *miscelm;
2626 UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
2628 /* Large allocation. */
2629 usize = index2size(binind);
2630 malloc_mutex_lock(tsdn, &arena->lock);
2631 if (config_cache_oblivious) {
2635 * Compute a uniformly distributed offset within the first page
2636 * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
2637 * for 4 KiB pages and 64-byte cachelines.
2639 r = prng_lg_range_zu(&arena->offset_state, LG_PAGE -
2640 LG_CACHELINE, false);
2641 random_offset = ((uintptr_t)r) << LG_CACHELINE;
2644 run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero);
2646 malloc_mutex_unlock(tsdn, &arena->lock);
2649 miscelm = arena_run_to_miscelm(run);
2650 ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
2653 szind_t index = binind - NBINS;
2655 arena->stats.nmalloc_large++;
2656 arena->stats.nrequests_large++;
2657 arena->stats.allocated_large += usize;
2658 arena->stats.lstats[index].nmalloc++;
2659 arena->stats.lstats[index].nrequests++;
2660 arena->stats.lstats[index].curruns++;
2663 idump = arena_prof_accum_locked(arena, usize);
2664 malloc_mutex_unlock(tsdn, &arena->lock);
2665 if (config_prof && idump)
2670 if (unlikely(opt_junk_alloc))
2671 memset(ret, JEMALLOC_ALLOC_JUNK, usize);
2672 else if (unlikely(opt_zero))
2673 memset(ret, 0, usize);
2677 arena_decay_tick(tsdn, arena);
2682 arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
2686 assert(!tsdn_null(tsdn) || arena != NULL);
2688 if (likely(!tsdn_null(tsdn)))
2689 arena = arena_choose(tsdn_tsd(tsdn), arena);
2690 if (unlikely(arena == NULL))
2693 if (likely(size <= SMALL_MAXCLASS))
2694 return (arena_malloc_small(tsdn, arena, ind, zero));
2695 if (likely(size <= large_maxclass))
2696 return (arena_malloc_large(tsdn, arena, ind, zero));
2697 return (huge_malloc(tsdn, arena, index2size(ind), zero));
2700 /* Only handles large allocations that require more than page alignment. */
2702 arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
2706 size_t alloc_size, leadsize, trailsize;
2708 arena_chunk_t *chunk;
2709 arena_chunk_map_misc_t *miscelm;
2712 assert(!tsdn_null(tsdn) || arena != NULL);
2713 assert(usize == PAGE_CEILING(usize));
2715 if (likely(!tsdn_null(tsdn)))
2716 arena = arena_choose(tsdn_tsd(tsdn), arena);
2717 if (unlikely(arena == NULL))
2720 alignment = PAGE_CEILING(alignment);
2721 alloc_size = usize + large_pad + alignment - PAGE;
2723 malloc_mutex_lock(tsdn, &arena->lock);
2724 run = arena_run_alloc_large(tsdn, arena, alloc_size, false);
2726 malloc_mutex_unlock(tsdn, &arena->lock);
2729 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
2730 miscelm = arena_run_to_miscelm(run);
2731 rpages = arena_miscelm_to_rpages(miscelm);
2733 leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) -
2735 assert(alloc_size >= leadsize + usize);
2736 trailsize = alloc_size - leadsize - usize - large_pad;
2737 if (leadsize != 0) {
2738 arena_chunk_map_misc_t *head_miscelm = miscelm;
2739 arena_run_t *head_run = run;
2741 miscelm = arena_miscelm_get_mutable(chunk,
2742 arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
2744 run = &miscelm->run;
2746 arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size,
2747 alloc_size - leadsize);
2749 if (trailsize != 0) {
2750 arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad +
2751 trailsize, usize + large_pad, false);
2753 if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
2755 arena_miscelm_to_pageind(arena_run_to_miscelm(run));
2756 bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0);
2757 bool decommitted = (arena_mapbits_decommitted_get(chunk,
2760 assert(decommitted); /* Cause of OOM. */
2761 arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted);
2762 malloc_mutex_unlock(tsdn, &arena->lock);
2765 ret = arena_miscelm_to_rpages(miscelm);
2768 szind_t index = size2index(usize) - NBINS;
2770 arena->stats.nmalloc_large++;
2771 arena->stats.nrequests_large++;
2772 arena->stats.allocated_large += usize;
2773 arena->stats.lstats[index].nmalloc++;
2774 arena->stats.lstats[index].nrequests++;
2775 arena->stats.lstats[index].curruns++;
2777 malloc_mutex_unlock(tsdn, &arena->lock);
2779 if (config_fill && !zero) {
2780 if (unlikely(opt_junk_alloc))
2781 memset(ret, JEMALLOC_ALLOC_JUNK, usize);
2782 else if (unlikely(opt_zero))
2783 memset(ret, 0, usize);
2785 arena_decay_tick(tsdn, arena);
2790 arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
2791 bool zero, tcache_t *tcache)
2795 if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
2796 && (usize & PAGE_MASK) == 0))) {
2797 /* Small; alignment doesn't require special run placement. */
2798 ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
2800 } else if (usize <= large_maxclass && alignment <= PAGE) {
2802 * Large; alignment doesn't require special run placement.
2803 * However, the cached pointer may be at a random offset from
2804 * the base of the run, so do some bit manipulation to retrieve
2807 ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
2809 if (config_cache_oblivious)
2810 ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
2812 if (likely(usize <= large_maxclass)) {
2813 ret = arena_palloc_large(tsdn, arena, usize, alignment,
2815 } else if (likely(alignment <= chunksize))
2816 ret = huge_malloc(tsdn, arena, usize, zero);
2818 ret = huge_palloc(tsdn, arena, usize, alignment, zero);
2825 arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size)
2827 arena_chunk_t *chunk;
2831 cassert(config_prof);
2832 assert(ptr != NULL);
2833 assert(CHUNK_ADDR2BASE(ptr) != ptr);
2834 assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
2835 assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS);
2836 assert(size <= SMALL_MAXCLASS);
2838 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
2839 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2840 binind = size2index(size);
2841 assert(binind < NBINS);
2842 arena_mapbits_large_binind_set(chunk, pageind, binind);
2844 assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
2845 assert(isalloc(tsdn, ptr, true) == size);
2849 arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
2853 /* Dissociate run from bin. */
2854 if (run == bin->runcur)
2857 szind_t binind = arena_bin_index(extent_node_arena_get(
2858 &chunk->node), bin);
2859 arena_bin_info_t *bin_info = &arena_bin_info[binind];
2862 * The following block's conditional is necessary because if the
2863 * run only contains one region, then it never gets inserted
2864 * into the non-full runs tree.
2866 if (bin_info->nregs != 1) {
2867 arena_chunk_map_misc_t *miscelm =
2868 arena_run_to_miscelm(run);
2870 arena_run_heap_remove(&bin->runs, miscelm);
2876 arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2877 arena_run_t *run, arena_bin_t *bin)
2880 assert(run != bin->runcur);
2882 malloc_mutex_unlock(tsdn, &bin->lock);
2883 /******************************/
2884 malloc_mutex_lock(tsdn, &arena->lock);
2885 arena_run_dalloc(tsdn, arena, run, true, false, false);
2886 malloc_mutex_unlock(tsdn, &arena->lock);
2887 /****************************/
2888 malloc_mutex_lock(tsdn, &bin->lock);
2890 bin->stats.curruns--;
2894 arena_bin_lower_run(arena_t *arena, arena_run_t *run, arena_bin_t *bin)
2898 * Make sure that if bin->runcur is non-NULL, it refers to the
2899 * oldest/lowest non-full run. It is okay to NULL runcur out rather
2900 * than proactively keeping it pointing at the oldest/lowest non-full
2903 if (bin->runcur != NULL &&
2904 arena_snad_comp(arena_run_to_miscelm(bin->runcur),
2905 arena_run_to_miscelm(run)) > 0) {
2906 /* Switch runcur. */
2907 if (bin->runcur->nfree > 0)
2908 arena_bin_runs_insert(bin, bin->runcur);
2911 bin->stats.reruns++;
2913 arena_bin_runs_insert(bin, run);
2917 arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2918 void *ptr, arena_chunk_map_bits_t *bitselm, bool junked)
2920 size_t pageind, rpages_ind;
2923 arena_bin_info_t *bin_info;
2926 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2927 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
2928 run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
2929 binind = run->binind;
2930 bin = &arena->bins[binind];
2931 bin_info = &arena_bin_info[binind];
2933 if (!junked && config_fill && unlikely(opt_junk_free))
2934 arena_dalloc_junk_small(ptr, bin_info);
2936 arena_run_reg_dalloc(run, ptr);
2937 if (run->nfree == bin_info->nregs) {
2938 arena_dissociate_bin_run(chunk, run, bin);
2939 arena_dalloc_bin_run(tsdn, arena, chunk, run, bin);
2940 } else if (run->nfree == 1 && run != bin->runcur)
2941 arena_bin_lower_run(arena, run, bin);
2944 bin->stats.ndalloc++;
2945 bin->stats.curregs--;
2950 arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
2951 arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm)
2954 arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true);
2958 arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr,
2959 size_t pageind, arena_chunk_map_bits_t *bitselm)
2965 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
2966 run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
2967 bin = &arena->bins[run->binind];
2968 malloc_mutex_lock(tsdn, &bin->lock);
2969 arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false);
2970 malloc_mutex_unlock(tsdn, &bin->lock);
2974 arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2975 void *ptr, size_t pageind)
2977 arena_chunk_map_bits_t *bitselm;
2980 /* arena_ptr_small_binind_get() does extra sanity checking. */
2981 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
2982 pageind)) != BININD_INVALID);
2984 bitselm = arena_bitselm_get_mutable(chunk, pageind);
2985 arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm);
2986 arena_decay_tick(tsdn, arena);
2990 #undef arena_dalloc_junk_large
2991 #define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large)
2994 arena_dalloc_junk_large(void *ptr, size_t usize)
2997 if (config_fill && unlikely(opt_junk_free))
2998 memset(ptr, JEMALLOC_FREE_JUNK, usize);
3001 #undef arena_dalloc_junk_large
3002 #define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
3003 arena_dalloc_junk_large_t *arena_dalloc_junk_large =
3004 JEMALLOC_N(n_arena_dalloc_junk_large);
3008 arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena,
3009 arena_chunk_t *chunk, void *ptr, bool junked)
3011 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
3012 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
3014 arena_run_t *run = &miscelm->run;
3016 if (config_fill || config_stats) {
3017 size_t usize = arena_mapbits_large_size_get(chunk, pageind) -
3021 arena_dalloc_junk_large(ptr, usize);
3023 szind_t index = size2index(usize) - NBINS;
3025 arena->stats.ndalloc_large++;
3026 arena->stats.allocated_large -= usize;
3027 arena->stats.lstats[index].ndalloc++;
3028 arena->stats.lstats[index].curruns--;
3032 arena_run_dalloc(tsdn, arena, run, true, false, false);
3036 arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
3037 arena_chunk_t *chunk, void *ptr)
3040 arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true);
3044 arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
3048 malloc_mutex_lock(tsdn, &arena->lock);
3049 arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false);
3050 malloc_mutex_unlock(tsdn, &arena->lock);
3051 arena_decay_tick(tsdn, arena);
3055 arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
3056 void *ptr, size_t oldsize, size_t size)
3058 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
3059 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
3061 arena_run_t *run = &miscelm->run;
3063 assert(size < oldsize);
3066 * Shrink the run, and make trailing pages available for other
3069 malloc_mutex_lock(tsdn, &arena->lock);
3070 arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size +
3073 szind_t oldindex = size2index(oldsize) - NBINS;
3074 szind_t index = size2index(size) - NBINS;
3076 arena->stats.ndalloc_large++;
3077 arena->stats.allocated_large -= oldsize;
3078 arena->stats.lstats[oldindex].ndalloc++;
3079 arena->stats.lstats[oldindex].curruns--;
3081 arena->stats.nmalloc_large++;
3082 arena->stats.nrequests_large++;
3083 arena->stats.allocated_large += size;
3084 arena->stats.lstats[index].nmalloc++;
3085 arena->stats.lstats[index].nrequests++;
3086 arena->stats.lstats[index].curruns++;
3088 malloc_mutex_unlock(tsdn, &arena->lock);
3092 arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
3093 void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
3095 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
3096 size_t npages = (oldsize + large_pad) >> LG_PAGE;
3099 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
3102 /* Try to extend the run. */
3103 malloc_mutex_lock(tsdn, &arena->lock);
3104 if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
3105 pageind+npages) != 0)
3107 followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages);
3108 if (oldsize + followsize >= usize_min) {
3110 * The next run is available and sufficiently large. Split the
3111 * following run, then merge the first part with the existing
3115 size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask;
3118 while (oldsize + followsize < usize)
3119 usize = index2size(size2index(usize)-1);
3120 assert(usize >= usize_min);
3121 assert(usize >= oldsize);
3122 splitsize = usize - oldsize;
3126 run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run;
3127 if (arena_run_split_large(arena, run, splitsize, zero))
3130 if (config_cache_oblivious && zero) {
3132 * Zero the trailing bytes of the original allocation's
3133 * last page, since they are in an indeterminate state.
3134 * There will always be trailing bytes, because ptr's
3135 * offset from the beginning of the run is a multiple of
3136 * CACHELINE in [0 .. PAGE).
3138 void *zbase = (void *)((uintptr_t)ptr + oldsize);
3139 void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
3141 size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
3143 memset(zbase, 0, nzero);
3146 size = oldsize + splitsize;
3147 npages = (size + large_pad) >> LG_PAGE;
3150 * Mark the extended run as dirty if either portion of the run
3151 * was dirty before allocation. This is rather pedantic,
3152 * because there's not actually any sequence of events that
3153 * could cause the resulting run to be passed to
3154 * arena_run_dalloc() with the dirty argument set to false
3155 * (which is when dirty flag consistency would really matter).
3157 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
3158 arena_mapbits_dirty_get(chunk, pageind+npages-1);
3159 flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0;
3160 arena_mapbits_large_set(chunk, pageind, size + large_pad,
3161 flag_dirty | (flag_unzeroed_mask &
3162 arena_mapbits_unzeroed_get(chunk, pageind)));
3163 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty |
3164 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
3165 pageind+npages-1)));
3168 szind_t oldindex = size2index(oldsize) - NBINS;
3169 szind_t index = size2index(size) - NBINS;
3171 arena->stats.ndalloc_large++;
3172 arena->stats.allocated_large -= oldsize;
3173 arena->stats.lstats[oldindex].ndalloc++;
3174 arena->stats.lstats[oldindex].curruns--;
3176 arena->stats.nmalloc_large++;
3177 arena->stats.nrequests_large++;
3178 arena->stats.allocated_large += size;
3179 arena->stats.lstats[index].nmalloc++;
3180 arena->stats.lstats[index].nrequests++;
3181 arena->stats.lstats[index].curruns++;
3183 malloc_mutex_unlock(tsdn, &arena->lock);
3187 malloc_mutex_unlock(tsdn, &arena->lock);
3192 #undef arena_ralloc_junk_large
3193 #define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large)
3196 arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
3199 if (config_fill && unlikely(opt_junk_free)) {
3200 memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK,
3205 #undef arena_ralloc_junk_large
3206 #define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
3207 arena_ralloc_junk_large_t *arena_ralloc_junk_large =
3208 JEMALLOC_N(n_arena_ralloc_junk_large);
3212 * Try to resize a large allocation, in order to avoid copying. This will
3213 * always fail if growing an object, and the following run is already in use.
3216 arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
3217 size_t usize_max, bool zero)
3219 arena_chunk_t *chunk;
3222 if (oldsize == usize_max) {
3223 /* Current size class is compatible and maximal. */
3227 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
3228 arena = extent_node_arena_get(&chunk->node);
3230 if (oldsize < usize_max) {
3231 bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr,
3232 oldsize, usize_min, usize_max, zero);
3233 if (config_fill && !ret && !zero) {
3234 if (unlikely(opt_junk_alloc)) {
3235 memset((void *)((uintptr_t)ptr + oldsize),
3236 JEMALLOC_ALLOC_JUNK,
3237 isalloc(tsdn, ptr, config_prof) - oldsize);
3238 } else if (unlikely(opt_zero)) {
3239 memset((void *)((uintptr_t)ptr + oldsize), 0,
3240 isalloc(tsdn, ptr, config_prof) - oldsize);
3246 assert(oldsize > usize_max);
3247 /* Fill before shrinking in order avoid a race. */
3248 arena_ralloc_junk_large(ptr, oldsize, usize_max);
3249 arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max);
3254 arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
3255 size_t extra, bool zero)
3257 size_t usize_min, usize_max;
3259 /* Calls with non-zero extra had to clamp extra. */
3260 assert(extra == 0 || size + extra <= HUGE_MAXCLASS);
3262 if (unlikely(size > HUGE_MAXCLASS))
3265 usize_min = s2u(size);
3266 usize_max = s2u(size + extra);
3267 if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
3268 arena_chunk_t *chunk;
3271 * Avoid moving the allocation if the size class can be left the
3274 if (oldsize <= SMALL_MAXCLASS) {
3275 assert(arena_bin_info[size2index(oldsize)].reg_size ==
3277 if ((usize_max > SMALL_MAXCLASS ||
3278 size2index(usize_max) != size2index(oldsize)) &&
3279 (size > oldsize || usize_max < oldsize))
3282 if (usize_max <= SMALL_MAXCLASS)
3284 if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min,
3289 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
3290 arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node));
3293 return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min,
3299 arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
3300 size_t alignment, bool zero, tcache_t *tcache)
3304 return (arena_malloc(tsdn, arena, usize, size2index(usize),
3305 zero, tcache, true));
3306 usize = sa2u(usize, alignment);
3307 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
3309 return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
3313 arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
3314 size_t alignment, bool zero, tcache_t *tcache)
3320 if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
3323 if (likely(usize <= large_maxclass)) {
3326 /* Try to avoid moving the allocation. */
3327 if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0,
3332 * size and oldsize are different enough that we need to move
3333 * the object. In that case, fall back to allocating new space
3336 ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize,
3337 alignment, zero, tcache);
3342 * Junk/zero-filling were already done by
3343 * ipalloc()/arena_malloc().
3346 copysize = (usize < oldsize) ? usize : oldsize;
3347 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
3348 memcpy(ret, ptr, copysize);
3349 isqalloc(tsd, ptr, oldsize, tcache, true);
3351 ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
3358 arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena)
3362 malloc_mutex_lock(tsdn, &arena->lock);
3363 ret = arena->dss_prec;
3364 malloc_mutex_unlock(tsdn, &arena->lock);
3369 arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec)
3373 return (dss_prec != dss_prec_disabled);
3374 malloc_mutex_lock(tsdn, &arena->lock);
3375 arena->dss_prec = dss_prec;
3376 malloc_mutex_unlock(tsdn, &arena->lock);
3381 arena_lg_dirty_mult_default_get(void)
3384 return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default));
3388 arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
3391 if (opt_purge != purge_mode_ratio)
3393 if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
3395 atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
3400 arena_decay_time_default_get(void)
3403 return ((ssize_t)atomic_read_z((size_t *)&decay_time_default));
3407 arena_decay_time_default_set(ssize_t decay_time)
3410 if (opt_purge != purge_mode_decay)
3412 if (!arena_decay_time_valid(decay_time))
3414 atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time);
3419 arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
3420 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3421 size_t *nactive, size_t *ndirty)
3424 *nthreads += arena_nthreads_get(arena, false);
3425 *dss = dss_prec_names[arena->dss_prec];
3426 *lg_dirty_mult = arena->lg_dirty_mult;
3427 *decay_time = arena->decay.time;
3428 *nactive += arena->nactive;
3429 *ndirty += arena->ndirty;
3433 arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
3434 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3435 size_t *nactive, size_t *ndirty)
3438 malloc_mutex_lock(tsdn, &arena->lock);
3439 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3440 decay_time, nactive, ndirty);
3441 malloc_mutex_unlock(tsdn, &arena->lock);
3445 arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
3446 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3447 size_t *nactive, size_t *ndirty, arena_stats_t *astats,
3448 malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
3449 malloc_huge_stats_t *hstats)
3453 cassert(config_stats);
3455 malloc_mutex_lock(tsdn, &arena->lock);
3456 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3457 decay_time, nactive, ndirty);
3459 astats->mapped += arena->stats.mapped;
3460 astats->retained += arena->stats.retained;
3461 astats->npurge += arena->stats.npurge;
3462 astats->nmadvise += arena->stats.nmadvise;
3463 astats->purged += arena->stats.purged;
3464 astats->metadata_mapped += arena->stats.metadata_mapped;
3465 astats->metadata_allocated += arena_metadata_allocated_get(arena);
3466 astats->allocated_large += arena->stats.allocated_large;
3467 astats->nmalloc_large += arena->stats.nmalloc_large;
3468 astats->ndalloc_large += arena->stats.ndalloc_large;
3469 astats->nrequests_large += arena->stats.nrequests_large;
3470 astats->allocated_huge += arena->stats.allocated_huge;
3471 astats->nmalloc_huge += arena->stats.nmalloc_huge;
3472 astats->ndalloc_huge += arena->stats.ndalloc_huge;
3474 for (i = 0; i < nlclasses; i++) {
3475 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
3476 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
3477 lstats[i].nrequests += arena->stats.lstats[i].nrequests;
3478 lstats[i].curruns += arena->stats.lstats[i].curruns;
3481 for (i = 0; i < nhclasses; i++) {
3482 hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
3483 hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
3484 hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
3486 malloc_mutex_unlock(tsdn, &arena->lock);
3488 for (i = 0; i < NBINS; i++) {
3489 arena_bin_t *bin = &arena->bins[i];
3491 malloc_mutex_lock(tsdn, &bin->lock);
3492 bstats[i].nmalloc += bin->stats.nmalloc;
3493 bstats[i].ndalloc += bin->stats.ndalloc;
3494 bstats[i].nrequests += bin->stats.nrequests;
3495 bstats[i].curregs += bin->stats.curregs;
3496 if (config_tcache) {
3497 bstats[i].nfills += bin->stats.nfills;
3498 bstats[i].nflushes += bin->stats.nflushes;
3500 bstats[i].nruns += bin->stats.nruns;
3501 bstats[i].reruns += bin->stats.reruns;
3502 bstats[i].curruns += bin->stats.curruns;
3503 malloc_mutex_unlock(tsdn, &bin->lock);
3508 arena_nthreads_get(arena_t *arena, bool internal)
3511 return (atomic_read_u(&arena->nthreads[internal]));
3515 arena_nthreads_inc(arena_t *arena, bool internal)
3518 atomic_add_u(&arena->nthreads[internal], 1);
3522 arena_nthreads_dec(arena_t *arena, bool internal)
3525 atomic_sub_u(&arena->nthreads[internal], 1);
3529 arena_extent_sn_next(arena_t *arena)
3532 return (atomic_add_z(&arena->extent_sn_next, 1) - 1);
3536 arena_new(tsdn_t *tsdn, unsigned ind)
3542 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
3543 * because there is no way to clean up if base_alloc() OOMs.
3546 arena = (arena_t *)base_alloc(tsdn,
3547 CACHELINE_CEILING(sizeof(arena_t)) +
3548 QUANTUM_CEILING((nlclasses * sizeof(malloc_large_stats_t)))
3549 + (nhclasses * sizeof(malloc_huge_stats_t)));
3551 arena = (arena_t *)base_alloc(tsdn, sizeof(arena_t));
3556 arena->nthreads[0] = arena->nthreads[1] = 0;
3557 if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA))
3561 memset(&arena->stats, 0, sizeof(arena_stats_t));
3562 arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
3563 + CACHELINE_CEILING(sizeof(arena_t)));
3564 memset(arena->stats.lstats, 0, nlclasses *
3565 sizeof(malloc_large_stats_t));
3566 arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
3567 + CACHELINE_CEILING(sizeof(arena_t)) +
3568 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
3569 memset(arena->stats.hstats, 0, nhclasses *
3570 sizeof(malloc_huge_stats_t));
3572 ql_new(&arena->tcache_ql);
3576 arena->prof_accumbytes = 0;
3578 if (config_cache_oblivious) {
3580 * A nondeterministic seed based on the address of arena reduces
3581 * the likelihood of lockstep non-uniform cache index
3582 * utilization among identical concurrent processes, but at the
3583 * cost of test repeatability. For debug builds, instead use a
3584 * deterministic seed.
3586 arena->offset_state = config_debug ? ind :
3587 (size_t)(uintptr_t)arena;
3590 arena->dss_prec = chunk_dss_prec_get();
3592 ql_new(&arena->achunks);
3594 arena->extent_sn_next = 0;
3596 arena->spare = NULL;
3598 arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
3599 arena->purging = false;
3603 for (i = 0; i < NPSIZES; i++)
3604 arena_run_heap_new(&arena->runs_avail[i]);
3606 qr_new(&arena->runs_dirty, rd_link);
3607 qr_new(&arena->chunks_cache, cc_link);
3609 if (opt_purge == purge_mode_decay)
3610 arena_decay_init(arena, arena_decay_time_default_get());
3612 ql_new(&arena->huge);
3613 if (malloc_mutex_init(&arena->huge_mtx, "arena_huge",
3614 WITNESS_RANK_ARENA_HUGE))
3617 extent_tree_szsnad_new(&arena->chunks_szsnad_cached);
3618 extent_tree_ad_new(&arena->chunks_ad_cached);
3619 extent_tree_szsnad_new(&arena->chunks_szsnad_retained);
3620 extent_tree_ad_new(&arena->chunks_ad_retained);
3621 if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
3622 WITNESS_RANK_ARENA_CHUNKS))
3624 ql_new(&arena->node_cache);
3625 if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache",
3626 WITNESS_RANK_ARENA_NODE_CACHE))
3629 arena->chunk_hooks = chunk_hooks_default;
3631 /* Initialize bins. */
3632 for (i = 0; i < NBINS; i++) {
3633 arena_bin_t *bin = &arena->bins[i];
3634 if (malloc_mutex_init(&bin->lock, "arena_bin",
3635 WITNESS_RANK_ARENA_BIN))
3638 arena_run_heap_new(&bin->runs);
3640 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
3647 * Calculate bin_info->run_size such that it meets the following constraints:
3649 * *) bin_info->run_size <= arena_maxrun
3650 * *) bin_info->nregs <= RUN_MAXREGS
3652 * bin_info->nregs and bin_info->reg0_offset are also calculated here, since
3653 * these settings are all interdependent.
3656 bin_info_run_size_calc(arena_bin_info_t *bin_info)
3659 size_t try_run_size, perfect_run_size, actual_run_size;
3660 uint32_t try_nregs, perfect_nregs, actual_nregs;
3663 * Determine redzone size based on minimum alignment and minimum
3664 * redzone size. Add padding to the end of the run if it is needed to
3665 * align the regions. The padding allows each redzone to be half the
3666 * minimum alignment; without the padding, each redzone would have to
3667 * be twice as large in order to maintain alignment.
3669 if (config_fill && unlikely(opt_redzone)) {
3670 size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1);
3671 if (align_min <= REDZONE_MINSIZE) {
3672 bin_info->redzone_size = REDZONE_MINSIZE;
3675 bin_info->redzone_size = align_min >> 1;
3676 pad_size = bin_info->redzone_size;
3679 bin_info->redzone_size = 0;
3682 bin_info->reg_interval = bin_info->reg_size +
3683 (bin_info->redzone_size << 1);
3686 * Compute run size under ideal conditions (no redzones, no limit on run
3689 try_run_size = PAGE;
3690 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
3692 perfect_run_size = try_run_size;
3693 perfect_nregs = try_nregs;
3695 try_run_size += PAGE;
3696 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
3697 } while (perfect_run_size != perfect_nregs * bin_info->reg_size);
3698 assert(perfect_nregs <= RUN_MAXREGS);
3700 actual_run_size = perfect_run_size;
3701 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3702 bin_info->reg_interval);
3705 * Redzones can require enough padding that not even a single region can
3706 * fit within the number of pages that would normally be dedicated to a
3707 * run for this size class. Increase the run size until at least one
3710 while (actual_nregs == 0) {
3711 assert(config_fill && unlikely(opt_redzone));
3713 actual_run_size += PAGE;
3714 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3715 bin_info->reg_interval);
3719 * Make sure that the run will fit within an arena chunk.
3721 while (actual_run_size > arena_maxrun) {
3722 actual_run_size -= PAGE;
3723 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3724 bin_info->reg_interval);
3726 assert(actual_nregs > 0);
3727 assert(actual_run_size == s2u(actual_run_size));
3729 /* Copy final settings. */
3730 bin_info->run_size = actual_run_size;
3731 bin_info->nregs = actual_nregs;
3732 bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs *
3733 bin_info->reg_interval) - pad_size + bin_info->redzone_size);
3735 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
3736 * bin_info->reg_interval) + pad_size == bin_info->run_size);
3742 arena_bin_info_t *bin_info;
3744 #define BIN_INFO_INIT_bin_yes(index, size) \
3745 bin_info = &arena_bin_info[index]; \
3746 bin_info->reg_size = size; \
3747 bin_info_run_size_calc(bin_info); \
3748 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
3749 #define BIN_INFO_INIT_bin_no(index, size)
3750 #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
3751 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
3753 #undef BIN_INFO_INIT_bin_yes
3754 #undef BIN_INFO_INIT_bin_no
3763 arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
3764 arena_decay_time_default_set(opt_decay_time);
3767 * Compute the header size such that it is large enough to contain the
3768 * page map. The page map is biased to omit entries for the header
3769 * itself, so some iteration is necessary to compute the map bias.
3771 * 1) Compute safe header_size and map_bias values that include enough
3772 * space for an unbiased page map.
3773 * 2) Refine map_bias based on (1) to omit the header pages in the page
3774 * map. The resulting map_bias may be one too small.
3775 * 3) Refine map_bias based on (2). The result will be >= the result
3776 * from (2), and will always be correct.
3779 for (i = 0; i < 3; i++) {
3780 size_t header_size = offsetof(arena_chunk_t, map_bits) +
3781 ((sizeof(arena_chunk_map_bits_t) +
3782 sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
3783 map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
3785 assert(map_bias > 0);
3787 map_misc_offset = offsetof(arena_chunk_t, map_bits) +
3788 sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
3790 arena_maxrun = chunksize - (map_bias << LG_PAGE);
3791 assert(arena_maxrun > 0);
3792 large_maxclass = index2size(size2index(chunksize)-1);
3793 if (large_maxclass > arena_maxrun) {
3795 * For small chunk sizes it's possible for there to be fewer
3796 * non-header pages available than are necessary to serve the
3797 * size classes just below chunksize.
3799 large_maxclass = arena_maxrun;
3801 assert(large_maxclass > 0);
3802 nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
3803 nhclasses = NSIZES - nlclasses - NBINS;
3809 arena_prefork0(tsdn_t *tsdn, arena_t *arena)
3812 malloc_mutex_prefork(tsdn, &arena->lock);
3816 arena_prefork1(tsdn_t *tsdn, arena_t *arena)
3819 malloc_mutex_prefork(tsdn, &arena->chunks_mtx);
3823 arena_prefork2(tsdn_t *tsdn, arena_t *arena)
3826 malloc_mutex_prefork(tsdn, &arena->node_cache_mtx);
3830 arena_prefork3(tsdn_t *tsdn, arena_t *arena)
3834 for (i = 0; i < NBINS; i++)
3835 malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
3836 malloc_mutex_prefork(tsdn, &arena->huge_mtx);
3840 arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
3844 malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx);
3845 for (i = 0; i < NBINS; i++)
3846 malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
3847 malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx);
3848 malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx);
3849 malloc_mutex_postfork_parent(tsdn, &arena->lock);
3853 arena_postfork_child(tsdn_t *tsdn, arena_t *arena)
3857 malloc_mutex_postfork_child(tsdn, &arena->huge_mtx);
3858 for (i = 0; i < NBINS; i++)
3859 malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
3860 malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx);
3861 malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx);
3862 malloc_mutex_postfork_child(tsdn, &arena->lock);