1 #define JEMALLOC_ARENA_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 purge_mode_t opt_purge = PURGE_DEFAULT;
8 const char *purge_mode_names[] = {
13 ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
14 static ssize_t lg_dirty_mult_default;
15 ssize_t opt_decay_time = DECAY_TIME_DEFAULT;
16 static ssize_t decay_time_default;
18 arena_bin_info_t arena_bin_info[NBINS];
21 size_t map_misc_offset;
22 size_t arena_maxrun; /* Max run size for arenas. */
23 size_t large_maxclass; /* Max large size class. */
24 size_t run_quantize_max; /* Max run_quantize_*() input. */
25 static size_t small_maxrun; /* Max run size for small size classes. */
26 static bool *small_run_tab; /* Valid small run page multiples. */
27 static size_t *run_quantize_floor_tab; /* run_quantize_floor() memoization. */
28 static size_t *run_quantize_ceil_tab; /* run_quantize_ceil() memoization. */
29 unsigned nlclasses; /* Number of large size classes. */
30 unsigned nhclasses; /* Number of huge size classes. */
31 static szind_t runs_avail_bias; /* Size index for first runs_avail tree. */
32 static szind_t runs_avail_nclasses; /* Number of runs_avail trees. */
34 /******************************************************************************/
36 * Function prototypes for static functions that are referenced prior to
40 static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena,
42 static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run,
43 bool dirty, bool cleaned, bool decommitted);
44 static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
45 arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
46 static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
47 arena_run_t *run, arena_bin_t *bin);
49 /******************************************************************************/
51 JEMALLOC_INLINE_C size_t
52 arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
55 size_t pageind, mapbits;
57 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
58 pageind = arena_miscelm_to_pageind(miscelm);
59 mapbits = arena_mapbits_get(chunk, pageind);
60 return (arena_mapbits_size_decode(mapbits));
64 arena_run_addr_comp(const arena_chunk_map_misc_t *a,
65 const arena_chunk_map_misc_t *b)
67 uintptr_t a_miscelm = (uintptr_t)a;
68 uintptr_t b_miscelm = (uintptr_t)b;
73 return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
76 /* Generate pairing heap functions. */
77 ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
78 ph_link, arena_run_addr_comp)
81 run_quantize_floor_compute(size_t size)
86 assert(size == PAGE_CEILING(size));
88 /* Don't change sizes that are valid small run sizes. */
89 if (size <= small_maxrun && small_run_tab[size >> LG_PAGE])
93 * Round down to the nearest run size that can actually be requested
94 * during normal large allocation. Add large_pad so that cache index
95 * randomization can offset the allocation from the page boundary.
97 qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad;
98 if (qsize <= SMALL_MAXCLASS + large_pad)
99 return (run_quantize_floor_compute(size - large_pad));
100 assert(qsize <= size);
105 run_quantize_ceil_compute_hard(size_t size)
107 size_t large_run_size_next;
110 assert(size == PAGE_CEILING(size));
113 * Return the next quantized size greater than the input size.
114 * Quantized sizes comprise the union of run sizes that back small
115 * region runs, and run sizes that back large regions with no explicit
116 * alignment constraints.
119 if (size > SMALL_MAXCLASS) {
120 large_run_size_next = PAGE_CEILING(index2size(size2index(size -
121 large_pad) + 1) + large_pad);
123 large_run_size_next = SIZE_T_MAX;
124 if (size >= small_maxrun)
125 return (large_run_size_next);
129 assert(size <= small_maxrun);
130 if (small_run_tab[size >> LG_PAGE]) {
131 if (large_run_size_next < size)
132 return (large_run_size_next);
139 run_quantize_ceil_compute(size_t size)
141 size_t qsize = run_quantize_floor_compute(size);
145 * Skip a quantization that may have an adequately large run,
146 * because under-sized runs may be mixed in. This only happens
147 * when an unusual size is requested, i.e. for aligned
148 * allocation, and is just one of several places where linear
149 * search would potentially find sufficiently aligned available
150 * memory somewhere lower.
152 qsize = run_quantize_ceil_compute_hard(qsize);
158 #undef run_quantize_floor
159 #define run_quantize_floor JEMALLOC_N(n_run_quantize_floor)
162 run_quantize_floor(size_t size)
167 assert(size <= run_quantize_max);
168 assert((size & PAGE_MASK) == 0);
170 ret = run_quantize_floor_tab[(size >> LG_PAGE) - 1];
171 assert(ret == run_quantize_floor_compute(size));
175 #undef run_quantize_floor
176 #define run_quantize_floor JEMALLOC_N(run_quantize_floor)
177 run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor);
181 #undef run_quantize_ceil
182 #define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil)
185 run_quantize_ceil(size_t size)
190 assert(size <= run_quantize_max);
191 assert((size & PAGE_MASK) == 0);
193 ret = run_quantize_ceil_tab[(size >> LG_PAGE) - 1];
194 assert(ret == run_quantize_ceil_compute(size));
198 #undef run_quantize_ceil
199 #define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
200 run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil);
203 static arena_run_heap_t *
204 arena_runs_avail_get(arena_t *arena, szind_t ind)
207 assert(ind >= runs_avail_bias);
208 assert(ind - runs_avail_bias < runs_avail_nclasses);
210 return (&arena->runs_avail[ind - runs_avail_bias]);
214 arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
217 szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get(
218 arena_miscelm_get_const(chunk, pageind))));
219 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
221 arena_run_heap_insert(arena_runs_avail_get(arena, ind),
222 arena_miscelm_get_mutable(chunk, pageind));
226 arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
229 szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get(
230 arena_miscelm_get_const(chunk, pageind))));
231 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
233 arena_run_heap_remove(arena_runs_avail_get(arena, ind),
234 arena_miscelm_get_mutable(chunk, pageind));
238 arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
241 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
244 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
246 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
247 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
250 qr_new(&miscelm->rd, rd_link);
251 qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link);
252 arena->ndirty += npages;
256 arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
259 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
262 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
264 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
265 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
268 qr_remove(&miscelm->rd, rd_link);
269 assert(arena->ndirty >= npages);
270 arena->ndirty -= npages;
274 arena_chunk_dirty_npages(const extent_node_t *node)
277 return (extent_node_size_get(node) >> LG_PAGE);
281 arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
285 extent_node_dirty_linkage_init(node);
286 extent_node_dirty_insert(node, &arena->runs_dirty,
287 &arena->chunks_cache);
288 arena->ndirty += arena_chunk_dirty_npages(node);
293 arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
297 extent_node_dirty_remove(node);
298 assert(arena->ndirty >= arena_chunk_dirty_npages(node));
299 arena->ndirty -= arena_chunk_dirty_npages(node);
303 JEMALLOC_INLINE_C void *
304 arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
308 arena_chunk_map_misc_t *miscelm;
311 assert(run->nfree > 0);
312 assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
314 regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
315 miscelm = arena_run_to_miscelm(run);
316 rpages = arena_miscelm_to_rpages(miscelm);
317 ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
318 (uintptr_t)(bin_info->reg_interval * regind));
323 JEMALLOC_INLINE_C void
324 arena_run_reg_dalloc(arena_run_t *run, void *ptr)
326 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
327 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
328 size_t mapbits = arena_mapbits_get(chunk, pageind);
329 szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
330 arena_bin_info_t *bin_info = &arena_bin_info[binind];
331 size_t regind = arena_run_regind(run, bin_info, ptr);
333 assert(run->nfree < bin_info->nregs);
334 /* Freeing an interior pointer can cause assertion failure. */
335 assert(((uintptr_t)ptr -
336 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
337 (uintptr_t)bin_info->reg0_offset)) %
338 (uintptr_t)bin_info->reg_interval == 0);
339 assert((uintptr_t)ptr >=
340 (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
341 (uintptr_t)bin_info->reg0_offset);
342 /* Freeing an unallocated pointer can cause assertion failure. */
343 assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
345 bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind);
349 JEMALLOC_INLINE_C void
350 arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
353 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
354 (run_ind << LG_PAGE)), (npages << LG_PAGE));
355 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
356 (npages << LG_PAGE));
359 JEMALLOC_INLINE_C void
360 arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
363 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
367 JEMALLOC_INLINE_C void
368 arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
371 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
373 arena_run_page_mark_zeroed(chunk, run_ind);
374 for (i = 0; i < PAGE / sizeof(size_t); i++)
379 arena_nactive_add(arena_t *arena, size_t add_pages)
383 size_t cactive_add = CHUNK_CEILING((arena->nactive +
384 add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
386 if (cactive_add != 0)
387 stats_cactive_add(cactive_add);
389 arena->nactive += add_pages;
393 arena_nactive_sub(arena_t *arena, size_t sub_pages)
397 size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
398 CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
399 if (cactive_sub != 0)
400 stats_cactive_sub(cactive_sub);
402 arena->nactive -= sub_pages;
406 arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
407 size_t flag_dirty, size_t flag_decommitted, size_t need_pages)
409 size_t total_pages, rem_pages;
411 assert(flag_dirty == 0 || flag_decommitted == 0);
413 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
415 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
417 assert(need_pages <= total_pages);
418 rem_pages = total_pages - need_pages;
420 arena_avail_remove(arena, chunk, run_ind, total_pages);
422 arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
423 arena_nactive_add(arena, need_pages);
425 /* Keep track of trailing unused pages for later use. */
427 size_t flags = flag_dirty | flag_decommitted;
428 size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED :
431 arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
432 (rem_pages << LG_PAGE), flags |
433 (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) &
434 flag_unzeroed_mask));
435 arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1,
436 (rem_pages << LG_PAGE), flags |
437 (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) &
438 flag_unzeroed_mask));
439 if (flag_dirty != 0) {
440 arena_run_dirty_insert(arena, chunk, run_ind+need_pages,
443 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
448 arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
449 bool remove, bool zero)
451 arena_chunk_t *chunk;
452 arena_chunk_map_misc_t *miscelm;
453 size_t flag_dirty, flag_decommitted, run_ind, need_pages;
454 size_t flag_unzeroed_mask;
456 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
457 miscelm = arena_run_to_miscelm(run);
458 run_ind = arena_miscelm_to_pageind(miscelm);
459 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
460 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
461 need_pages = (size >> LG_PAGE);
462 assert(need_pages > 0);
464 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
465 run_ind << LG_PAGE, size, arena->ind))
469 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
470 flag_decommitted, need_pages);
474 if (flag_decommitted != 0) {
475 /* The run is untouched, and therefore zeroed. */
476 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
477 *)((uintptr_t)chunk + (run_ind << LG_PAGE)),
478 (need_pages << LG_PAGE));
479 } else if (flag_dirty != 0) {
480 /* The run is dirty, so all pages must be zeroed. */
481 arena_run_zero(chunk, run_ind, need_pages);
484 * The run is clean, so some pages may be zeroed (i.e.
485 * never before touched).
488 for (i = 0; i < need_pages; i++) {
489 if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
491 arena_run_zero(chunk, run_ind+i, 1);
492 else if (config_debug) {
493 arena_run_page_validate_zeroed(chunk,
496 arena_run_page_mark_zeroed(chunk,
502 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
503 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
507 * Set the last element first, in case the run only contains one page
508 * (i.e. both statements set the same element).
510 flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
511 CHUNK_MAP_UNZEROED : 0;
512 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty |
513 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
514 run_ind+need_pages-1)));
515 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty |
516 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind)));
521 arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
524 return (arena_run_split_large_helper(arena, run, size, true, zero));
528 arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
531 return (arena_run_split_large_helper(arena, run, size, false, zero));
535 arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
538 arena_chunk_t *chunk;
539 arena_chunk_map_misc_t *miscelm;
540 size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
542 assert(binind != BININD_INVALID);
544 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
545 miscelm = arena_run_to_miscelm(run);
546 run_ind = arena_miscelm_to_pageind(miscelm);
547 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
548 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
549 need_pages = (size >> LG_PAGE);
550 assert(need_pages > 0);
552 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
553 run_ind << LG_PAGE, size, arena->ind))
556 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
557 flag_decommitted, need_pages);
559 for (i = 0; i < need_pages; i++) {
560 size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk,
562 arena_mapbits_small_set(chunk, run_ind+i, i, binind,
564 if (config_debug && flag_dirty == 0 && flag_unzeroed == 0)
565 arena_run_page_validate_zeroed(chunk, run_ind+i);
567 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
568 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
572 static arena_chunk_t *
573 arena_chunk_init_spare(arena_t *arena)
575 arena_chunk_t *chunk;
577 assert(arena->spare != NULL);
579 chunk = arena->spare;
582 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
583 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
584 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
586 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
588 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
589 arena_mapbits_dirty_get(chunk, chunk_npages-1));
595 arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
600 * The extent node notion of "committed" doesn't directly apply to
601 * arena chunks. Arbitrarily mark them as committed. The commit state
602 * of runs is tracked individually, and upon chunk deallocation the
603 * entire chunk is in a consistent commit state.
605 extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
606 extent_node_achunk_set(&chunk->node, true);
607 return (chunk_register(tsdn, chunk, &chunk->node));
610 static arena_chunk_t *
611 arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
612 chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
614 arena_chunk_t *chunk;
616 malloc_mutex_unlock(tsdn, &arena->lock);
618 chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
619 NULL, chunksize, chunksize, zero, commit);
620 if (chunk != NULL && !*commit) {
622 if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
623 LG_PAGE, arena->ind)) {
624 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
625 (void *)chunk, chunksize, *zero, *commit);
629 if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, *zero)) {
631 /* Undo commit of header. */
632 chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
633 LG_PAGE, arena->ind);
635 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
636 chunksize, *zero, *commit);
640 malloc_mutex_lock(tsdn, &arena->lock);
644 static arena_chunk_t *
645 arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
648 arena_chunk_t *chunk;
649 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
651 chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
652 chunksize, zero, true);
654 if (arena_chunk_register(tsdn, arena, chunk, *zero)) {
655 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
662 chunk = arena_chunk_alloc_internal_hard(tsdn, arena,
663 &chunk_hooks, zero, commit);
666 if (config_stats && chunk != NULL) {
667 arena->stats.mapped += chunksize;
668 arena->stats.metadata_mapped += (map_bias << LG_PAGE);
674 static arena_chunk_t *
675 arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
677 arena_chunk_t *chunk;
679 size_t flag_unzeroed, flag_decommitted, i;
681 assert(arena->spare == NULL);
685 chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit);
690 * Initialize the map to contain one maximal free untouched run. Mark
691 * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
692 * or decommitted chunk.
694 flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
695 flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
696 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun,
697 flag_unzeroed | flag_decommitted);
699 * There is no need to initialize the internal page map entries unless
700 * the chunk is not zeroed.
703 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
704 (void *)arena_bitselm_get_const(chunk, map_bias+1),
705 (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
707 (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
708 for (i = map_bias+1; i < chunk_npages-1; i++)
709 arena_mapbits_internal_set(chunk, i, flag_unzeroed);
711 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
712 *)arena_bitselm_get_const(chunk, map_bias+1),
713 (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
715 (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
717 for (i = map_bias+1; i < chunk_npages-1; i++) {
718 assert(arena_mapbits_unzeroed_get(chunk, i) ==
723 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
729 static arena_chunk_t *
730 arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
732 arena_chunk_t *chunk;
734 if (arena->spare != NULL)
735 chunk = arena_chunk_init_spare(arena);
737 chunk = arena_chunk_init_hard(tsdn, arena);
742 ql_elm_new(&chunk->node, ql_link);
743 ql_tail_insert(&arena->achunks, &chunk->node, ql_link);
744 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
750 arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
753 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
755 chunk_deregister(chunk, &chunk->node);
757 committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
760 * Decommit the header. Mark the chunk as decommitted even if
761 * header decommit fails, since treating a partially committed
762 * chunk as committed has a high potential for causing later
763 * access of decommitted memory.
765 chunk_hooks = chunk_hooks_get(tsdn, arena);
766 chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
770 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
774 arena->stats.mapped -= chunksize;
775 arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
780 arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare)
783 assert(arena->spare != spare);
785 if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
786 arena_run_dirty_remove(arena, spare, map_bias,
787 chunk_npages-map_bias);
790 arena_chunk_discard(tsdn, arena, spare);
794 arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
796 arena_chunk_t *spare;
798 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
799 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
800 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
802 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
804 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
805 arena_mapbits_dirty_get(chunk, chunk_npages-1));
806 assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
807 arena_mapbits_decommitted_get(chunk, chunk_npages-1));
809 /* Remove run from runs_avail, so that the arena does not use it. */
810 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
812 ql_remove(&arena->achunks, &chunk->node, ql_link);
813 spare = arena->spare;
814 arena->spare = chunk;
816 arena_spare_discard(tsdn, arena, spare);
820 arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
822 szind_t index = size2index(usize) - nlclasses - NBINS;
824 cassert(config_stats);
826 arena->stats.nmalloc_huge++;
827 arena->stats.allocated_huge += usize;
828 arena->stats.hstats[index].nmalloc++;
829 arena->stats.hstats[index].curhchunks++;
833 arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
835 szind_t index = size2index(usize) - nlclasses - NBINS;
837 cassert(config_stats);
839 arena->stats.nmalloc_huge--;
840 arena->stats.allocated_huge -= usize;
841 arena->stats.hstats[index].nmalloc--;
842 arena->stats.hstats[index].curhchunks--;
846 arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
848 szind_t index = size2index(usize) - nlclasses - NBINS;
850 cassert(config_stats);
852 arena->stats.ndalloc_huge++;
853 arena->stats.allocated_huge -= usize;
854 arena->stats.hstats[index].ndalloc++;
855 arena->stats.hstats[index].curhchunks--;
859 arena_huge_reset_stats_cancel(arena_t *arena, size_t usize)
861 szind_t index = size2index(usize) - nlclasses - NBINS;
863 cassert(config_stats);
865 arena->stats.ndalloc_huge++;
866 arena->stats.hstats[index].ndalloc--;
870 arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
872 szind_t index = size2index(usize) - nlclasses - NBINS;
874 cassert(config_stats);
876 arena->stats.ndalloc_huge--;
877 arena->stats.allocated_huge += usize;
878 arena->stats.hstats[index].ndalloc--;
879 arena->stats.hstats[index].curhchunks++;
883 arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
886 arena_huge_dalloc_stats_update(arena, oldsize);
887 arena_huge_malloc_stats_update(arena, usize);
891 arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
895 arena_huge_dalloc_stats_update_undo(arena, oldsize);
896 arena_huge_malloc_stats_update_undo(arena, usize);
900 arena_node_alloc(tsdn_t *tsdn, arena_t *arena)
904 malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
905 node = ql_last(&arena->node_cache, ql_link);
907 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
908 return (base_alloc(tsdn, sizeof(extent_node_t)));
910 ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
911 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
916 arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node)
919 malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
920 ql_elm_new(node, ql_link);
921 ql_tail_insert(&arena->node_cache, node, ql_link);
922 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
926 arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
927 chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero,
933 ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
934 alignment, zero, &commit);
936 /* Revert optimistic stats updates. */
937 malloc_mutex_lock(tsdn, &arena->lock);
939 arena_huge_malloc_stats_update_undo(arena, usize);
940 arena->stats.mapped -= usize;
942 arena_nactive_sub(arena, usize >> LG_PAGE);
943 malloc_mutex_unlock(tsdn, &arena->lock);
950 arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
951 size_t alignment, bool *zero)
954 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
955 size_t csize = CHUNK_CEILING(usize);
957 malloc_mutex_lock(tsdn, &arena->lock);
959 /* Optimistically update stats. */
961 arena_huge_malloc_stats_update(arena, usize);
962 arena->stats.mapped += usize;
964 arena_nactive_add(arena, usize >> LG_PAGE);
966 ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
967 alignment, zero, true);
968 malloc_mutex_unlock(tsdn, &arena->lock);
970 ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
971 usize, alignment, zero, csize);
978 arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize)
980 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
983 csize = CHUNK_CEILING(usize);
984 malloc_mutex_lock(tsdn, &arena->lock);
986 arena_huge_dalloc_stats_update(arena, usize);
987 arena->stats.mapped -= usize;
989 arena_nactive_sub(arena, usize >> LG_PAGE);
991 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, true);
992 malloc_mutex_unlock(tsdn, &arena->lock);
996 arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
997 size_t oldsize, size_t usize)
1000 assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
1001 assert(oldsize != usize);
1003 malloc_mutex_lock(tsdn, &arena->lock);
1005 arena_huge_ralloc_stats_update(arena, oldsize, usize);
1006 if (oldsize < usize)
1007 arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE);
1009 arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE);
1010 malloc_mutex_unlock(tsdn, &arena->lock);
1014 arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
1015 size_t oldsize, size_t usize)
1017 size_t udiff = oldsize - usize;
1018 size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
1020 malloc_mutex_lock(tsdn, &arena->lock);
1022 arena_huge_ralloc_stats_update(arena, oldsize, usize);
1024 arena->stats.mapped -= cdiff;
1026 arena_nactive_sub(arena, udiff >> LG_PAGE);
1029 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
1030 void *nchunk = (void *)((uintptr_t)chunk +
1031 CHUNK_CEILING(usize));
1033 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
1036 malloc_mutex_unlock(tsdn, &arena->lock);
1040 arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
1041 chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
1042 bool *zero, void *nchunk, size_t udiff, size_t cdiff)
1047 err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
1048 chunksize, zero, &commit) == NULL);
1050 /* Revert optimistic stats updates. */
1051 malloc_mutex_lock(tsdn, &arena->lock);
1053 arena_huge_ralloc_stats_update_undo(arena, oldsize,
1055 arena->stats.mapped -= cdiff;
1057 arena_nactive_sub(arena, udiff >> LG_PAGE);
1058 malloc_mutex_unlock(tsdn, &arena->lock);
1059 } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1060 cdiff, true, arena->ind)) {
1061 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
1069 arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
1070 size_t oldsize, size_t usize, bool *zero)
1073 chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
1074 void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
1075 size_t udiff = usize - oldsize;
1076 size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
1078 malloc_mutex_lock(tsdn, &arena->lock);
1080 /* Optimistically update stats. */
1082 arena_huge_ralloc_stats_update(arena, oldsize, usize);
1083 arena->stats.mapped += cdiff;
1085 arena_nactive_add(arena, udiff >> LG_PAGE);
1087 err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
1088 chunksize, zero, true) == NULL);
1089 malloc_mutex_unlock(tsdn, &arena->lock);
1091 err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
1092 &chunk_hooks, chunk, oldsize, usize, zero, nchunk, udiff,
1094 } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1095 cdiff, true, arena->ind)) {
1096 chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
1105 * Do first-best-fit run selection, i.e. select the lowest run that best fits.
1106 * Run sizes are indexed, so not all candidate runs are necessarily exactly the
1109 static arena_run_t *
1110 arena_run_first_best_fit(arena_t *arena, size_t size)
1114 ind = size2index(run_quantize_ceil(size));
1115 for (i = ind; i < runs_avail_nclasses + runs_avail_bias; i++) {
1116 arena_chunk_map_misc_t *miscelm = arena_run_heap_first(
1117 arena_runs_avail_get(arena, i));
1118 if (miscelm != NULL)
1119 return (&miscelm->run);
1125 static arena_run_t *
1126 arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
1128 arena_run_t *run = arena_run_first_best_fit(arena, s2u(size));
1130 if (arena_run_split_large(arena, run, size, zero))
1136 static arena_run_t *
1137 arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
1139 arena_chunk_t *chunk;
1142 assert(size <= arena_maxrun);
1143 assert(size == PAGE_CEILING(size));
1145 /* Search the arena's chunks for the lowest best fit. */
1146 run = arena_run_alloc_large_helper(arena, size, zero);
1151 * No usable runs. Create a new chunk from which to allocate the run.
1153 chunk = arena_chunk_alloc(tsdn, arena);
1154 if (chunk != NULL) {
1155 run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
1156 if (arena_run_split_large(arena, run, size, zero))
1162 * arena_chunk_alloc() failed, but another thread may have made
1163 * sufficient memory available while this one dropped arena->lock in
1164 * arena_chunk_alloc(), so search one more time.
1166 return (arena_run_alloc_large_helper(arena, size, zero));
1169 static arena_run_t *
1170 arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
1172 arena_run_t *run = arena_run_first_best_fit(arena, size);
1174 if (arena_run_split_small(arena, run, size, binind))
1180 static arena_run_t *
1181 arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
1183 arena_chunk_t *chunk;
1186 assert(size <= arena_maxrun);
1187 assert(size == PAGE_CEILING(size));
1188 assert(binind != BININD_INVALID);
1190 /* Search the arena's chunks for the lowest best fit. */
1191 run = arena_run_alloc_small_helper(arena, size, binind);
1196 * No usable runs. Create a new chunk from which to allocate the run.
1198 chunk = arena_chunk_alloc(tsdn, arena);
1199 if (chunk != NULL) {
1200 run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
1201 if (arena_run_split_small(arena, run, size, binind))
1207 * arena_chunk_alloc() failed, but another thread may have made
1208 * sufficient memory available while this one dropped arena->lock in
1209 * arena_chunk_alloc(), so search one more time.
1211 return (arena_run_alloc_small_helper(arena, size, binind));
1215 arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
1218 return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t)
1223 arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena)
1225 ssize_t lg_dirty_mult;
1227 malloc_mutex_lock(tsdn, &arena->lock);
1228 lg_dirty_mult = arena->lg_dirty_mult;
1229 malloc_mutex_unlock(tsdn, &arena->lock);
1231 return (lg_dirty_mult);
1235 arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult)
1238 if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
1241 malloc_mutex_lock(tsdn, &arena->lock);
1242 arena->lg_dirty_mult = lg_dirty_mult;
1243 arena_maybe_purge(tsdn, arena);
1244 malloc_mutex_unlock(tsdn, &arena->lock);
1250 arena_decay_deadline_init(arena_t *arena)
1253 assert(opt_purge == purge_mode_decay);
1256 * Generate a new deadline that is uniformly random within the next
1257 * epoch after the current one.
1259 nstime_copy(&arena->decay_deadline, &arena->decay_epoch);
1260 nstime_add(&arena->decay_deadline, &arena->decay_interval);
1261 if (arena->decay_time > 0) {
1264 nstime_init(&jitter, prng_range(&arena->decay_jitter_state,
1265 nstime_ns(&arena->decay_interval)));
1266 nstime_add(&arena->decay_deadline, &jitter);
1271 arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
1274 assert(opt_purge == purge_mode_decay);
1276 return (nstime_compare(&arena->decay_deadline, time) <= 0);
1280 arena_decay_backlog_npages_limit(const arena_t *arena)
1282 static const uint64_t h_steps[] = {
1283 #define STEP(step, h, x, y) \
1289 size_t npages_limit_backlog;
1292 assert(opt_purge == purge_mode_decay);
1295 * For each element of decay_backlog, multiply by the corresponding
1296 * fixed-point smoothstep decay factor. Sum the products, then divide
1297 * to round down to the nearest whole number of pages.
1300 for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
1301 sum += arena->decay_backlog[i] * h_steps[i];
1302 npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
1304 return (npages_limit_backlog);
1308 arena_decay_epoch_advance(arena_t *arena, const nstime_t *time)
1310 uint64_t nadvance_u64;
1312 size_t ndirty_delta;
1314 assert(opt_purge == purge_mode_decay);
1315 assert(arena_decay_deadline_reached(arena, time));
1317 nstime_copy(&delta, time);
1318 nstime_subtract(&delta, &arena->decay_epoch);
1319 nadvance_u64 = nstime_divide(&delta, &arena->decay_interval);
1320 assert(nadvance_u64 > 0);
1322 /* Add nadvance_u64 decay intervals to epoch. */
1323 nstime_copy(&delta, &arena->decay_interval);
1324 nstime_imultiply(&delta, nadvance_u64);
1325 nstime_add(&arena->decay_epoch, &delta);
1327 /* Set a new deadline. */
1328 arena_decay_deadline_init(arena);
1330 /* Update the backlog. */
1331 if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
1332 memset(arena->decay_backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
1335 size_t nadvance_z = (size_t)nadvance_u64;
1337 assert((uint64_t)nadvance_z == nadvance_u64);
1339 memmove(arena->decay_backlog, &arena->decay_backlog[nadvance_z],
1340 (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
1341 if (nadvance_z > 1) {
1342 memset(&arena->decay_backlog[SMOOTHSTEP_NSTEPS -
1343 nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
1346 ndirty_delta = (arena->ndirty > arena->decay_ndirty) ? arena->ndirty -
1347 arena->decay_ndirty : 0;
1348 arena->decay_ndirty = arena->ndirty;
1349 arena->decay_backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
1350 arena->decay_backlog_npages_limit =
1351 arena_decay_backlog_npages_limit(arena);
1355 arena_decay_npages_limit(arena_t *arena)
1357 size_t npages_limit;
1359 assert(opt_purge == purge_mode_decay);
1361 npages_limit = arena->decay_backlog_npages_limit;
1363 /* Add in any dirty pages created during the current epoch. */
1364 if (arena->ndirty > arena->decay_ndirty)
1365 npages_limit += arena->ndirty - arena->decay_ndirty;
1367 return (npages_limit);
1371 arena_decay_init(arena_t *arena, ssize_t decay_time)
1374 arena->decay_time = decay_time;
1375 if (decay_time > 0) {
1376 nstime_init2(&arena->decay_interval, decay_time, 0);
1377 nstime_idivide(&arena->decay_interval, SMOOTHSTEP_NSTEPS);
1380 nstime_init(&arena->decay_epoch, 0);
1381 nstime_update(&arena->decay_epoch);
1382 arena->decay_jitter_state = (uint64_t)(uintptr_t)arena;
1383 arena_decay_deadline_init(arena);
1384 arena->decay_ndirty = arena->ndirty;
1385 arena->decay_backlog_npages_limit = 0;
1386 memset(arena->decay_backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
1390 arena_decay_time_valid(ssize_t decay_time)
1393 if (decay_time < -1)
1395 if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX)
1401 arena_decay_time_get(tsdn_t *tsdn, arena_t *arena)
1405 malloc_mutex_lock(tsdn, &arena->lock);
1406 decay_time = arena->decay_time;
1407 malloc_mutex_unlock(tsdn, &arena->lock);
1409 return (decay_time);
1413 arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time)
1416 if (!arena_decay_time_valid(decay_time))
1419 malloc_mutex_lock(tsdn, &arena->lock);
1421 * Restart decay backlog from scratch, which may cause many dirty pages
1422 * to be immediately purged. It would conceptually be possible to map
1423 * the old backlog onto the new backlog, but there is no justification
1424 * for such complexity since decay_time changes are intended to be
1425 * infrequent, either between the {-1, 0, >0} states, or a one-time
1426 * arbitrary change during initial arena configuration.
1428 arena_decay_init(arena, decay_time);
1429 arena_maybe_purge(tsdn, arena);
1430 malloc_mutex_unlock(tsdn, &arena->lock);
1436 arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena)
1439 assert(opt_purge == purge_mode_ratio);
1441 /* Don't purge if the option is disabled. */
1442 if (arena->lg_dirty_mult < 0)
1446 * Iterate, since preventing recursive purging could otherwise leave too
1450 size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
1451 if (threshold < chunk_npages)
1452 threshold = chunk_npages;
1454 * Don't purge unless the number of purgeable pages exceeds the
1457 if (arena->ndirty <= threshold)
1459 arena_purge_to_limit(tsdn, arena, threshold);
1464 arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena)
1467 size_t ndirty_limit;
1469 assert(opt_purge == purge_mode_decay);
1471 /* Purge all or nothing if the option is disabled. */
1472 if (arena->decay_time <= 0) {
1473 if (arena->decay_time == 0)
1474 arena_purge_to_limit(tsdn, arena, 0);
1478 nstime_copy(&time, &arena->decay_epoch);
1479 if (unlikely(nstime_update(&time))) {
1480 /* Time went backwards. Force an epoch advance. */
1481 nstime_copy(&time, &arena->decay_deadline);
1484 if (arena_decay_deadline_reached(arena, &time))
1485 arena_decay_epoch_advance(arena, &time);
1487 ndirty_limit = arena_decay_npages_limit(arena);
1490 * Don't try to purge unless the number of purgeable pages exceeds the
1493 if (arena->ndirty <= ndirty_limit)
1495 arena_purge_to_limit(tsdn, arena, ndirty_limit);
1499 arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
1502 /* Don't recursively purge. */
1506 if (opt_purge == purge_mode_ratio)
1507 arena_maybe_purge_ratio(tsdn, arena);
1509 arena_maybe_purge_decay(tsdn, arena);
1513 arena_dirty_count(arena_t *arena)
1516 arena_runs_dirty_link_t *rdelm;
1517 extent_node_t *chunkselm;
1519 for (rdelm = qr_next(&arena->runs_dirty, rd_link),
1520 chunkselm = qr_next(&arena->chunks_cache, cc_link);
1521 rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) {
1524 if (rdelm == &chunkselm->rd) {
1525 npages = extent_node_size_get(chunkselm) >> LG_PAGE;
1526 chunkselm = qr_next(chunkselm, cc_link);
1528 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
1530 arena_chunk_map_misc_t *miscelm =
1531 arena_rd_to_miscelm(rdelm);
1532 size_t pageind = arena_miscelm_to_pageind(miscelm);
1533 assert(arena_mapbits_allocated_get(chunk, pageind) ==
1535 assert(arena_mapbits_large_get(chunk, pageind) == 0);
1536 assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
1537 npages = arena_mapbits_unallocated_size_get(chunk,
1538 pageind) >> LG_PAGE;
1547 arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
1548 size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
1549 extent_node_t *purge_chunks_sentinel)
1551 arena_runs_dirty_link_t *rdelm, *rdelm_next;
1552 extent_node_t *chunkselm;
1553 size_t nstashed = 0;
1555 /* Stash runs/chunks according to ndirty_limit. */
1556 for (rdelm = qr_next(&arena->runs_dirty, rd_link),
1557 chunkselm = qr_next(&arena->chunks_cache, cc_link);
1558 rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
1560 rdelm_next = qr_next(rdelm, rd_link);
1562 if (rdelm == &chunkselm->rd) {
1563 extent_node_t *chunkselm_next;
1567 npages = extent_node_size_get(chunkselm) >> LG_PAGE;
1568 if (opt_purge == purge_mode_decay && arena->ndirty -
1569 (nstashed + npages) < ndirty_limit)
1572 chunkselm_next = qr_next(chunkselm, cc_link);
1574 * Allocate. chunkselm remains valid due to the
1575 * dalloc_node=false argument to chunk_alloc_cache().
1578 chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
1579 extent_node_addr_get(chunkselm),
1580 extent_node_size_get(chunkselm), chunksize, &zero,
1582 assert(chunk == extent_node_addr_get(chunkselm));
1583 assert(zero == extent_node_zeroed_get(chunkselm));
1584 extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
1585 purge_chunks_sentinel);
1586 assert(npages == (extent_node_size_get(chunkselm) >>
1588 chunkselm = chunkselm_next;
1590 arena_chunk_t *chunk =
1591 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1592 arena_chunk_map_misc_t *miscelm =
1593 arena_rd_to_miscelm(rdelm);
1594 size_t pageind = arena_miscelm_to_pageind(miscelm);
1595 arena_run_t *run = &miscelm->run;
1597 arena_mapbits_unallocated_size_get(chunk, pageind);
1599 npages = run_size >> LG_PAGE;
1600 if (opt_purge == purge_mode_decay && arena->ndirty -
1601 (nstashed + npages) < ndirty_limit)
1604 assert(pageind + npages <= chunk_npages);
1605 assert(arena_mapbits_dirty_get(chunk, pageind) ==
1606 arena_mapbits_dirty_get(chunk, pageind+npages-1));
1609 * If purging the spare chunk's run, make it available
1610 * prior to allocation.
1612 if (chunk == arena->spare)
1613 arena_chunk_alloc(tsdn, arena);
1615 /* Temporarily allocate the free dirty run. */
1616 arena_run_split_large(arena, run, run_size, false);
1619 qr_new(rdelm, rd_link); /* Redundant. */
1621 assert(qr_next(rdelm, rd_link) == rdelm);
1622 assert(qr_prev(rdelm, rd_link) == rdelm);
1624 qr_meld(purge_runs_sentinel, rdelm, rd_link);
1628 if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <=
1637 arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
1638 arena_runs_dirty_link_t *purge_runs_sentinel,
1639 extent_node_t *purge_chunks_sentinel)
1641 size_t npurged, nmadvise;
1642 arena_runs_dirty_link_t *rdelm;
1643 extent_node_t *chunkselm;
1649 malloc_mutex_unlock(tsdn, &arena->lock);
1650 for (rdelm = qr_next(purge_runs_sentinel, rd_link),
1651 chunkselm = qr_next(purge_chunks_sentinel, cc_link);
1652 rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
1655 if (rdelm == &chunkselm->rd) {
1657 * Don't actually purge the chunk here because 1)
1658 * chunkselm is embedded in the chunk and must remain
1659 * valid, and 2) we deallocate the chunk in
1660 * arena_unstash_purged(), where it is destroyed,
1661 * decommitted, or purged, depending on chunk
1662 * deallocation policy.
1664 size_t size = extent_node_size_get(chunkselm);
1665 npages = size >> LG_PAGE;
1666 chunkselm = qr_next(chunkselm, cc_link);
1668 size_t pageind, run_size, flag_unzeroed, flags, i;
1670 arena_chunk_t *chunk =
1671 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1672 arena_chunk_map_misc_t *miscelm =
1673 arena_rd_to_miscelm(rdelm);
1674 pageind = arena_miscelm_to_pageind(miscelm);
1675 run_size = arena_mapbits_large_size_get(chunk, pageind);
1676 npages = run_size >> LG_PAGE;
1678 assert(pageind + npages <= chunk_npages);
1679 assert(!arena_mapbits_decommitted_get(chunk, pageind));
1680 assert(!arena_mapbits_decommitted_get(chunk,
1682 decommitted = !chunk_hooks->decommit(chunk, chunksize,
1683 pageind << LG_PAGE, npages << LG_PAGE, arena->ind);
1686 flags = CHUNK_MAP_DECOMMITTED;
1688 flag_unzeroed = chunk_purge_wrapper(tsdn, arena,
1689 chunk_hooks, chunk, chunksize, pageind <<
1690 LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
1691 flags = flag_unzeroed;
1693 arena_mapbits_large_set(chunk, pageind+npages-1, 0,
1695 arena_mapbits_large_set(chunk, pageind, run_size,
1699 * Set the unzeroed flag for internal pages, now that
1700 * chunk_purge_wrapper() has returned whether the pages
1701 * were zeroed as a side effect of purging. This chunk
1702 * map modification is safe even though the arena mutex
1703 * isn't currently owned by this thread, because the run
1704 * is marked as allocated, thus protecting it from being
1705 * modified by any other thread. As long as these
1706 * writes don't perturb the first and last elements'
1707 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
1709 for (i = 1; i < npages-1; i++) {
1710 arena_mapbits_internal_set(chunk, pageind+i,
1719 malloc_mutex_lock(tsdn, &arena->lock);
1722 arena->stats.nmadvise += nmadvise;
1723 arena->stats.purged += npurged;
1730 arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
1731 arena_runs_dirty_link_t *purge_runs_sentinel,
1732 extent_node_t *purge_chunks_sentinel)
1734 arena_runs_dirty_link_t *rdelm, *rdelm_next;
1735 extent_node_t *chunkselm;
1737 /* Deallocate chunks/runs. */
1738 for (rdelm = qr_next(purge_runs_sentinel, rd_link),
1739 chunkselm = qr_next(purge_chunks_sentinel, cc_link);
1740 rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
1741 rdelm_next = qr_next(rdelm, rd_link);
1742 if (rdelm == &chunkselm->rd) {
1743 extent_node_t *chunkselm_next = qr_next(chunkselm,
1745 void *addr = extent_node_addr_get(chunkselm);
1746 size_t size = extent_node_size_get(chunkselm);
1747 bool zeroed = extent_node_zeroed_get(chunkselm);
1748 bool committed = extent_node_committed_get(chunkselm);
1749 extent_node_dirty_remove(chunkselm);
1750 arena_node_dalloc(tsdn, arena, chunkselm);
1751 chunkselm = chunkselm_next;
1752 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
1753 size, zeroed, committed);
1755 arena_chunk_t *chunk =
1756 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1757 arena_chunk_map_misc_t *miscelm =
1758 arena_rd_to_miscelm(rdelm);
1759 size_t pageind = arena_miscelm_to_pageind(miscelm);
1760 bool decommitted = (arena_mapbits_decommitted_get(chunk,
1762 arena_run_t *run = &miscelm->run;
1763 qr_remove(rdelm, rd_link);
1764 arena_run_dalloc(tsdn, arena, run, false, true,
1771 * NB: ndirty_limit is interpreted differently depending on opt_purge:
1772 * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
1774 * (arena->ndirty <= ndirty_limit)
1775 * - purge_mode_decay: Purge as many dirty runs/chunks as possible without
1776 * violating the invariant:
1777 * (arena->ndirty >= ndirty_limit)
1780 arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
1782 chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
1783 size_t npurge, npurged;
1784 arena_runs_dirty_link_t purge_runs_sentinel;
1785 extent_node_t purge_chunks_sentinel;
1787 arena->purging = true;
1790 * Calls to arena_dirty_count() are disabled even for debug builds
1791 * because overhead grows nonlinearly as memory usage increases.
1793 if (false && config_debug) {
1794 size_t ndirty = arena_dirty_count(arena);
1795 assert(ndirty == arena->ndirty);
1797 assert(opt_purge != purge_mode_ratio || (arena->nactive >>
1798 arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
1800 qr_new(&purge_runs_sentinel, rd_link);
1801 extent_node_dirty_linkage_init(&purge_chunks_sentinel);
1803 npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit,
1804 &purge_runs_sentinel, &purge_chunks_sentinel);
1807 npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks,
1808 &purge_runs_sentinel, &purge_chunks_sentinel);
1809 assert(npurged == npurge);
1810 arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel,
1811 &purge_chunks_sentinel);
1814 arena->stats.npurge++;
1817 arena->purging = false;
1821 arena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
1824 malloc_mutex_lock(tsdn, &arena->lock);
1826 arena_purge_to_limit(tsdn, arena, 0);
1828 arena_maybe_purge(tsdn, arena);
1829 malloc_mutex_unlock(tsdn, &arena->lock);
1833 arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
1835 size_t pageind, npages;
1837 cassert(config_prof);
1841 * Iterate over the allocated runs and remove profiled allocations from
1844 for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
1845 if (arena_mapbits_allocated_get(chunk, pageind) != 0) {
1846 if (arena_mapbits_large_get(chunk, pageind) != 0) {
1847 void *ptr = (void *)((uintptr_t)chunk + (pageind
1849 size_t usize = isalloc(tsd_tsdn(tsd), ptr,
1852 prof_free(tsd, ptr, usize);
1853 npages = arena_mapbits_large_size_get(chunk,
1854 pageind) >> LG_PAGE;
1856 /* Skip small run. */
1857 size_t binind = arena_mapbits_binind_get(chunk,
1859 arena_bin_info_t *bin_info =
1860 &arena_bin_info[binind];
1861 npages = bin_info->run_size >> LG_PAGE;
1864 /* Skip unallocated run. */
1865 npages = arena_mapbits_unallocated_size_get(chunk,
1866 pageind) >> LG_PAGE;
1868 assert(pageind + npages <= chunk_npages);
1873 arena_reset(tsd_t *tsd, arena_t *arena)
1876 extent_node_t *node;
1879 * Locking in this function is unintuitive. The caller guarantees that
1880 * no concurrent operations are happening in this arena, but there are
1881 * still reasons that some locking is necessary:
1883 * - Some of the functions in the transitive closure of calls assume
1884 * appropriate locks are held, and in some cases these locks are
1885 * temporarily dropped to avoid lock order reversal or deadlock due to
1887 * - mallctl("epoch", ...) may concurrently refresh stats. While
1888 * strictly speaking this is a "concurrent operation", disallowing
1889 * stats refreshes would impose an inconvenient burden.
1892 /* Remove large allocations from prof sample set. */
1893 if (config_prof && opt_prof) {
1894 ql_foreach(node, &arena->achunks, ql_link) {
1895 arena_achunk_prof_reset(tsd, arena,
1896 extent_node_addr_get(node));
1900 /* Reset curruns for large size classes. */
1902 for (i = 0; i < nlclasses; i++)
1903 arena->stats.lstats[i].curruns = 0;
1906 /* Huge allocations. */
1907 malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
1908 for (node = ql_last(&arena->huge, ql_link); node != NULL; node =
1909 ql_last(&arena->huge, ql_link)) {
1910 void *ptr = extent_node_addr_get(node);
1913 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
1914 if (config_stats || (config_prof && opt_prof))
1915 usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
1916 /* Remove huge allocation from prof sample set. */
1917 if (config_prof && opt_prof)
1918 prof_free(tsd, ptr, usize);
1919 huge_dalloc(tsd_tsdn(tsd), ptr);
1920 malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
1921 /* Cancel out unwanted effects on stats. */
1923 arena_huge_reset_stats_cancel(arena, usize);
1925 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
1927 malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
1930 for (i = 0; i < NBINS; i++) {
1931 arena_bin_t *bin = &arena->bins[i];
1932 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1934 arena_run_heap_new(&bin->runs);
1936 bin->stats.curregs = 0;
1937 bin->stats.curruns = 0;
1939 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1943 * Re-initialize runs_dirty such that the chunks_cache and runs_dirty
1944 * chains directly correspond.
1946 qr_new(&arena->runs_dirty, rd_link);
1947 for (node = qr_next(&arena->chunks_cache, cc_link);
1948 node != &arena->chunks_cache; node = qr_next(node, cc_link)) {
1949 qr_new(&node->rd, rd_link);
1950 qr_meld(&arena->runs_dirty, &node->rd, rd_link);
1954 for (node = ql_last(&arena->achunks, ql_link); node != NULL; node =
1955 ql_last(&arena->achunks, ql_link)) {
1956 ql_remove(&arena->achunks, node, ql_link);
1957 arena_chunk_discard(tsd_tsdn(tsd), arena,
1958 extent_node_addr_get(node));
1962 if (arena->spare != NULL) {
1963 arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare);
1964 arena->spare = NULL;
1967 assert(!arena->purging);
1970 for(i = 0; i < runs_avail_nclasses; i++)
1971 arena_run_heap_new(&arena->runs_avail[i]);
1973 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
1977 arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
1978 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty,
1979 size_t flag_decommitted)
1981 size_t size = *p_size;
1982 size_t run_ind = *p_run_ind;
1983 size_t run_pages = *p_run_pages;
1985 /* Try to coalesce forward. */
1986 if (run_ind + run_pages < chunk_npages &&
1987 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
1988 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty &&
1989 arena_mapbits_decommitted_get(chunk, run_ind+run_pages) ==
1991 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
1993 size_t nrun_pages = nrun_size >> LG_PAGE;
1996 * Remove successor from runs_avail; the coalesced run is
1999 assert(arena_mapbits_unallocated_size_get(chunk,
2000 run_ind+run_pages+nrun_pages-1) == nrun_size);
2001 assert(arena_mapbits_dirty_get(chunk,
2002 run_ind+run_pages+nrun_pages-1) == flag_dirty);
2003 assert(arena_mapbits_decommitted_get(chunk,
2004 run_ind+run_pages+nrun_pages-1) == flag_decommitted);
2005 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
2008 * If the successor is dirty, remove it from the set of dirty
2011 if (flag_dirty != 0) {
2012 arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
2017 run_pages += nrun_pages;
2019 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
2020 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
2024 /* Try to coalesce backward. */
2025 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
2026 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
2027 flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) ==
2029 size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
2031 size_t prun_pages = prun_size >> LG_PAGE;
2033 run_ind -= prun_pages;
2036 * Remove predecessor from runs_avail; the coalesced run is
2039 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
2041 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
2042 assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
2044 arena_avail_remove(arena, chunk, run_ind, prun_pages);
2047 * If the predecessor is dirty, remove it from the set of dirty
2050 if (flag_dirty != 0) {
2051 arena_run_dirty_remove(arena, chunk, run_ind,
2056 run_pages += prun_pages;
2058 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
2059 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
2064 *p_run_ind = run_ind;
2065 *p_run_pages = run_pages;
2069 arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2074 assert(run_ind >= map_bias);
2075 assert(run_ind < chunk_npages);
2077 if (arena_mapbits_large_get(chunk, run_ind) != 0) {
2078 size = arena_mapbits_large_size_get(chunk, run_ind);
2079 assert(size == PAGE || arena_mapbits_large_size_get(chunk,
2080 run_ind+(size>>LG_PAGE)-1) == 0);
2082 arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
2083 size = bin_info->run_size;
2090 arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
2091 bool cleaned, bool decommitted)
2093 arena_chunk_t *chunk;
2094 arena_chunk_map_misc_t *miscelm;
2095 size_t size, run_ind, run_pages, flag_dirty, flag_decommitted;
2097 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
2098 miscelm = arena_run_to_miscelm(run);
2099 run_ind = arena_miscelm_to_pageind(miscelm);
2100 assert(run_ind >= map_bias);
2101 assert(run_ind < chunk_npages);
2102 size = arena_run_size_get(arena, chunk, run, run_ind);
2103 run_pages = (size >> LG_PAGE);
2104 arena_nactive_sub(arena, run_pages);
2107 * The run is dirty if the caller claims to have dirtied it, as well as
2108 * if it was already dirty before being allocated and the caller
2109 * doesn't claim to have cleaned it.
2111 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
2112 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
2113 if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind)
2116 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
2117 flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0;
2119 /* Mark pages as unallocated in the chunk map. */
2120 if (dirty || decommitted) {
2121 size_t flags = flag_dirty | flag_decommitted;
2122 arena_mapbits_unallocated_set(chunk, run_ind, size, flags);
2123 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
2126 arena_mapbits_unallocated_set(chunk, run_ind, size,
2127 arena_mapbits_unzeroed_get(chunk, run_ind));
2128 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
2129 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
2132 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
2133 flag_dirty, flag_decommitted);
2135 /* Insert into runs_avail, now that coalescing is complete. */
2136 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
2137 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
2138 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
2139 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
2140 assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
2141 arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1));
2142 arena_avail_insert(arena, chunk, run_ind, run_pages);
2145 arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
2147 /* Deallocate chunk if it is now completely unused. */
2148 if (size == arena_maxrun) {
2149 assert(run_ind == map_bias);
2150 assert(run_pages == (arena_maxrun >> LG_PAGE));
2151 arena_chunk_dalloc(tsdn, arena, chunk);
2155 * It is okay to do dirty page processing here even if the chunk was
2156 * deallocated above, since in that case it is the spare. Waiting
2157 * until after possible chunk deallocation to do dirty processing
2158 * allows for an old spare to be fully deallocated, thus decreasing the
2159 * chances of spuriously crossing the dirty page purging threshold.
2162 arena_maybe_purge(tsdn, arena);
2166 arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2167 arena_run_t *run, size_t oldsize, size_t newsize)
2169 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2170 size_t pageind = arena_miscelm_to_pageind(miscelm);
2171 size_t head_npages = (oldsize - newsize) >> LG_PAGE;
2172 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
2173 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
2174 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
2175 CHUNK_MAP_UNZEROED : 0;
2177 assert(oldsize > newsize);
2180 * Update the chunk map so that arena_run_dalloc() can treat the
2181 * leading run as separately allocated. Set the last element of each
2182 * run first, in case of single-page runs.
2184 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
2185 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
2186 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2187 pageind+head_npages-1)));
2188 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
2189 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
2192 UNUSED size_t tail_npages = newsize >> LG_PAGE;
2193 assert(arena_mapbits_large_size_get(chunk,
2194 pageind+head_npages+tail_npages-1) == 0);
2195 assert(arena_mapbits_dirty_get(chunk,
2196 pageind+head_npages+tail_npages-1) == flag_dirty);
2198 arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
2199 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2200 pageind+head_npages)));
2202 arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted !=
2207 arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2208 arena_run_t *run, size_t oldsize, size_t newsize, bool dirty)
2210 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2211 size_t pageind = arena_miscelm_to_pageind(miscelm);
2212 size_t head_npages = newsize >> LG_PAGE;
2213 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
2214 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
2215 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
2216 CHUNK_MAP_UNZEROED : 0;
2217 arena_chunk_map_misc_t *tail_miscelm;
2218 arena_run_t *tail_run;
2220 assert(oldsize > newsize);
2223 * Update the chunk map so that arena_run_dalloc() can treat the
2224 * trailing run as separately allocated. Set the last element of each
2225 * run first, in case of single-page runs.
2227 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
2228 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
2229 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2230 pageind+head_npages-1)));
2231 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
2232 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
2235 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
2236 assert(arena_mapbits_large_size_get(chunk,
2237 pageind+head_npages+tail_npages-1) == 0);
2238 assert(arena_mapbits_dirty_get(chunk,
2239 pageind+head_npages+tail_npages-1) == flag_dirty);
2241 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
2242 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2243 pageind+head_npages)));
2245 tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages);
2246 tail_run = &tail_miscelm->run;
2247 arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted
2252 arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
2254 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2256 arena_run_heap_insert(&bin->runs, miscelm);
2259 static arena_run_t *
2260 arena_bin_nonfull_run_tryget(arena_bin_t *bin)
2262 arena_chunk_map_misc_t *miscelm;
2264 miscelm = arena_run_heap_remove_first(&bin->runs);
2265 if (miscelm == NULL)
2268 bin->stats.reruns++;
2270 return (&miscelm->run);
2273 static arena_run_t *
2274 arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
2278 arena_bin_info_t *bin_info;
2280 /* Look for a usable run. */
2281 run = arena_bin_nonfull_run_tryget(bin);
2284 /* No existing runs have any space available. */
2286 binind = arena_bin_index(arena, bin);
2287 bin_info = &arena_bin_info[binind];
2289 /* Allocate a new run. */
2290 malloc_mutex_unlock(tsdn, &bin->lock);
2291 /******************************/
2292 malloc_mutex_lock(tsdn, &arena->lock);
2293 run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind);
2295 /* Initialize run internals. */
2296 run->binind = binind;
2297 run->nfree = bin_info->nregs;
2298 bitmap_init(run->bitmap, &bin_info->bitmap_info);
2300 malloc_mutex_unlock(tsdn, &arena->lock);
2301 /********************************/
2302 malloc_mutex_lock(tsdn, &bin->lock);
2306 bin->stats.curruns++;
2312 * arena_run_alloc_small() failed, but another thread may have made
2313 * sufficient memory available while this one dropped bin->lock above,
2314 * so search one more time.
2316 run = arena_bin_nonfull_run_tryget(bin);
2323 /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
2325 arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
2328 arena_bin_info_t *bin_info;
2331 binind = arena_bin_index(arena, bin);
2332 bin_info = &arena_bin_info[binind];
2334 run = arena_bin_nonfull_run_get(tsdn, arena, bin);
2335 if (bin->runcur != NULL && bin->runcur->nfree > 0) {
2337 * Another thread updated runcur while this one ran without the
2338 * bin lock in arena_bin_nonfull_run_get().
2341 assert(bin->runcur->nfree > 0);
2342 ret = arena_run_reg_alloc(bin->runcur, bin_info);
2344 arena_chunk_t *chunk;
2347 * arena_run_alloc_small() may have allocated run, or
2348 * it may have pulled run from the bin's run tree.
2349 * Therefore it is unsafe to make any assumptions about
2350 * how run has previously been used, and
2351 * arena_bin_lower_run() must be called, as if a region
2352 * were just deallocated from the run.
2354 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
2355 if (run->nfree == bin_info->nregs) {
2356 arena_dalloc_bin_run(tsdn, arena, chunk, run,
2359 arena_bin_lower_run(arena, chunk, run, bin);
2369 assert(bin->runcur->nfree > 0);
2371 return (arena_run_reg_alloc(bin->runcur, bin_info));
2375 arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
2376 szind_t binind, uint64_t prof_accumbytes)
2381 assert(tbin->ncached == 0);
2383 if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes))
2385 bin = &arena->bins[binind];
2386 malloc_mutex_lock(tsdn, &bin->lock);
2387 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
2388 tbin->lg_fill_div); i < nfill; i++) {
2391 if ((run = bin->runcur) != NULL && run->nfree > 0)
2392 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
2394 ptr = arena_bin_malloc_hard(tsdn, arena, bin);
2397 * OOM. tbin->avail isn't yet filled down to its first
2398 * element, so the successful allocations (if any) must
2399 * be moved just before tbin->avail before bailing out.
2402 memmove(tbin->avail - i, tbin->avail - nfill,
2403 i * sizeof(void *));
2407 if (config_fill && unlikely(opt_junk_alloc)) {
2408 arena_alloc_junk_small(ptr, &arena_bin_info[binind],
2411 /* Insert such that low regions get used first. */
2412 *(tbin->avail - nfill + i) = ptr;
2415 bin->stats.nmalloc += i;
2416 bin->stats.nrequests += tbin->tstats.nrequests;
2417 bin->stats.curregs += i;
2418 bin->stats.nfills++;
2419 tbin->tstats.nrequests = 0;
2421 malloc_mutex_unlock(tsdn, &bin->lock);
2423 arena_decay_tick(tsdn, arena);
2427 arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
2430 size_t redzone_size = bin_info->redzone_size;
2433 memset((void *)((uintptr_t)ptr - redzone_size),
2434 JEMALLOC_ALLOC_JUNK, redzone_size);
2435 memset((void *)((uintptr_t)ptr + bin_info->reg_size),
2436 JEMALLOC_ALLOC_JUNK, redzone_size);
2438 memset((void *)((uintptr_t)ptr - redzone_size),
2439 JEMALLOC_ALLOC_JUNK, bin_info->reg_interval);
2444 #undef arena_redzone_corruption
2445 #define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption)
2448 arena_redzone_corruption(void *ptr, size_t usize, bool after,
2449 size_t offset, uint8_t byte)
2452 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
2453 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
2454 after ? "after" : "before", ptr, usize, byte);
2457 #undef arena_redzone_corruption
2458 #define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
2459 arena_redzone_corruption_t *arena_redzone_corruption =
2460 JEMALLOC_N(n_arena_redzone_corruption);
2464 arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
2468 if (opt_junk_alloc) {
2469 size_t size = bin_info->reg_size;
2470 size_t redzone_size = bin_info->redzone_size;
2473 for (i = 1; i <= redzone_size; i++) {
2474 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
2475 if (*byte != JEMALLOC_ALLOC_JUNK) {
2477 arena_redzone_corruption(ptr, size, false, i,
2480 *byte = JEMALLOC_ALLOC_JUNK;
2483 for (i = 0; i < redzone_size; i++) {
2484 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
2485 if (*byte != JEMALLOC_ALLOC_JUNK) {
2487 arena_redzone_corruption(ptr, size, true, i,
2490 *byte = JEMALLOC_ALLOC_JUNK;
2495 if (opt_abort && error)
2500 #undef arena_dalloc_junk_small
2501 #define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small)
2504 arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
2506 size_t redzone_size = bin_info->redzone_size;
2508 arena_redzones_validate(ptr, bin_info, false);
2509 memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK,
2510 bin_info->reg_interval);
2513 #undef arena_dalloc_junk_small
2514 #define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
2515 arena_dalloc_junk_small_t *arena_dalloc_junk_small =
2516 JEMALLOC_N(n_arena_dalloc_junk_small);
2520 arena_quarantine_junk_small(void *ptr, size_t usize)
2523 arena_bin_info_t *bin_info;
2524 cassert(config_fill);
2525 assert(opt_junk_free);
2526 assert(opt_quarantine);
2527 assert(usize <= SMALL_MAXCLASS);
2529 binind = size2index(usize);
2530 bin_info = &arena_bin_info[binind];
2531 arena_redzones_validate(ptr, bin_info, true);
2535 arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
2542 assert(binind < NBINS);
2543 bin = &arena->bins[binind];
2544 usize = index2size(binind);
2546 malloc_mutex_lock(tsdn, &bin->lock);
2547 if ((run = bin->runcur) != NULL && run->nfree > 0)
2548 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
2550 ret = arena_bin_malloc_hard(tsdn, arena, bin);
2553 malloc_mutex_unlock(tsdn, &bin->lock);
2558 bin->stats.nmalloc++;
2559 bin->stats.nrequests++;
2560 bin->stats.curregs++;
2562 malloc_mutex_unlock(tsdn, &bin->lock);
2563 if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize))
2568 if (unlikely(opt_junk_alloc)) {
2569 arena_alloc_junk_small(ret,
2570 &arena_bin_info[binind], false);
2571 } else if (unlikely(opt_zero))
2572 memset(ret, 0, usize);
2574 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
2576 if (config_fill && unlikely(opt_junk_alloc)) {
2577 arena_alloc_junk_small(ret, &arena_bin_info[binind],
2580 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
2581 memset(ret, 0, usize);
2584 arena_decay_tick(tsdn, arena);
2589 arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
2593 uintptr_t random_offset;
2595 arena_chunk_map_misc_t *miscelm;
2596 UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
2598 /* Large allocation. */
2599 usize = index2size(binind);
2600 malloc_mutex_lock(tsdn, &arena->lock);
2601 if (config_cache_oblivious) {
2605 * Compute a uniformly distributed offset within the first page
2606 * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
2607 * for 4 KiB pages and 64-byte cachelines.
2609 r = prng_lg_range(&arena->offset_state, LG_PAGE - LG_CACHELINE);
2610 random_offset = ((uintptr_t)r) << LG_CACHELINE;
2613 run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero);
2615 malloc_mutex_unlock(tsdn, &arena->lock);
2618 miscelm = arena_run_to_miscelm(run);
2619 ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
2622 szind_t index = binind - NBINS;
2624 arena->stats.nmalloc_large++;
2625 arena->stats.nrequests_large++;
2626 arena->stats.allocated_large += usize;
2627 arena->stats.lstats[index].nmalloc++;
2628 arena->stats.lstats[index].nrequests++;
2629 arena->stats.lstats[index].curruns++;
2632 idump = arena_prof_accum_locked(arena, usize);
2633 malloc_mutex_unlock(tsdn, &arena->lock);
2634 if (config_prof && idump)
2639 if (unlikely(opt_junk_alloc))
2640 memset(ret, JEMALLOC_ALLOC_JUNK, usize);
2641 else if (unlikely(opt_zero))
2642 memset(ret, 0, usize);
2646 arena_decay_tick(tsdn, arena);
2651 arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
2655 assert(!tsdn_null(tsdn) || arena != NULL);
2657 if (likely(!tsdn_null(tsdn)))
2658 arena = arena_choose(tsdn_tsd(tsdn), arena);
2659 if (unlikely(arena == NULL))
2662 if (likely(size <= SMALL_MAXCLASS))
2663 return (arena_malloc_small(tsdn, arena, ind, zero));
2664 if (likely(size <= large_maxclass))
2665 return (arena_malloc_large(tsdn, arena, ind, zero));
2666 return (huge_malloc(tsdn, arena, index2size(ind), zero));
2669 /* Only handles large allocations that require more than page alignment. */
2671 arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
2675 size_t alloc_size, leadsize, trailsize;
2677 arena_chunk_t *chunk;
2678 arena_chunk_map_misc_t *miscelm;
2681 assert(!tsdn_null(tsdn) || arena != NULL);
2682 assert(usize == PAGE_CEILING(usize));
2684 if (likely(!tsdn_null(tsdn)))
2685 arena = arena_choose(tsdn_tsd(tsdn), arena);
2686 if (unlikely(arena == NULL))
2689 alignment = PAGE_CEILING(alignment);
2690 alloc_size = usize + large_pad + alignment - PAGE;
2692 malloc_mutex_lock(tsdn, &arena->lock);
2693 run = arena_run_alloc_large(tsdn, arena, alloc_size, false);
2695 malloc_mutex_unlock(tsdn, &arena->lock);
2698 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
2699 miscelm = arena_run_to_miscelm(run);
2700 rpages = arena_miscelm_to_rpages(miscelm);
2702 leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) -
2704 assert(alloc_size >= leadsize + usize);
2705 trailsize = alloc_size - leadsize - usize - large_pad;
2706 if (leadsize != 0) {
2707 arena_chunk_map_misc_t *head_miscelm = miscelm;
2708 arena_run_t *head_run = run;
2710 miscelm = arena_miscelm_get_mutable(chunk,
2711 arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
2713 run = &miscelm->run;
2715 arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size,
2716 alloc_size - leadsize);
2718 if (trailsize != 0) {
2719 arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad +
2720 trailsize, usize + large_pad, false);
2722 if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
2724 arena_miscelm_to_pageind(arena_run_to_miscelm(run));
2725 bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0);
2726 bool decommitted = (arena_mapbits_decommitted_get(chunk,
2729 assert(decommitted); /* Cause of OOM. */
2730 arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted);
2731 malloc_mutex_unlock(tsdn, &arena->lock);
2734 ret = arena_miscelm_to_rpages(miscelm);
2737 szind_t index = size2index(usize) - NBINS;
2739 arena->stats.nmalloc_large++;
2740 arena->stats.nrequests_large++;
2741 arena->stats.allocated_large += usize;
2742 arena->stats.lstats[index].nmalloc++;
2743 arena->stats.lstats[index].nrequests++;
2744 arena->stats.lstats[index].curruns++;
2746 malloc_mutex_unlock(tsdn, &arena->lock);
2748 if (config_fill && !zero) {
2749 if (unlikely(opt_junk_alloc))
2750 memset(ret, JEMALLOC_ALLOC_JUNK, usize);
2751 else if (unlikely(opt_zero))
2752 memset(ret, 0, usize);
2754 arena_decay_tick(tsdn, arena);
2759 arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
2760 bool zero, tcache_t *tcache)
2764 if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
2765 && (usize & PAGE_MASK) == 0))) {
2766 /* Small; alignment doesn't require special run placement. */
2767 ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
2769 } else if (usize <= large_maxclass && alignment <= PAGE) {
2771 * Large; alignment doesn't require special run placement.
2772 * However, the cached pointer may be at a random offset from
2773 * the base of the run, so do some bit manipulation to retrieve
2776 ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
2778 if (config_cache_oblivious)
2779 ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
2781 if (likely(usize <= large_maxclass)) {
2782 ret = arena_palloc_large(tsdn, arena, usize, alignment,
2784 } else if (likely(alignment <= chunksize))
2785 ret = huge_malloc(tsdn, arena, usize, zero);
2787 ret = huge_palloc(tsdn, arena, usize, alignment, zero);
2794 arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size)
2796 arena_chunk_t *chunk;
2800 cassert(config_prof);
2801 assert(ptr != NULL);
2802 assert(CHUNK_ADDR2BASE(ptr) != ptr);
2803 assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
2804 assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS);
2805 assert(size <= SMALL_MAXCLASS);
2807 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
2808 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2809 binind = size2index(size);
2810 assert(binind < NBINS);
2811 arena_mapbits_large_binind_set(chunk, pageind, binind);
2813 assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
2814 assert(isalloc(tsdn, ptr, true) == size);
2818 arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
2822 /* Dissociate run from bin. */
2823 if (run == bin->runcur)
2826 szind_t binind = arena_bin_index(extent_node_arena_get(
2827 &chunk->node), bin);
2828 arena_bin_info_t *bin_info = &arena_bin_info[binind];
2831 * The following block's conditional is necessary because if the
2832 * run only contains one region, then it never gets inserted
2833 * into the non-full runs tree.
2835 if (bin_info->nregs != 1) {
2836 arena_chunk_map_misc_t *miscelm =
2837 arena_run_to_miscelm(run);
2839 arena_run_heap_remove(&bin->runs, miscelm);
2845 arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2846 arena_run_t *run, arena_bin_t *bin)
2849 assert(run != bin->runcur);
2851 malloc_mutex_unlock(tsdn, &bin->lock);
2852 /******************************/
2853 malloc_mutex_lock(tsdn, &arena->lock);
2854 arena_run_dalloc(tsdn, arena, run, true, false, false);
2855 malloc_mutex_unlock(tsdn, &arena->lock);
2856 /****************************/
2857 malloc_mutex_lock(tsdn, &bin->lock);
2859 bin->stats.curruns--;
2863 arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2868 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
2869 * non-full run. It is okay to NULL runcur out rather than proactively
2870 * keeping it pointing at the lowest non-full run.
2872 if ((uintptr_t)run < (uintptr_t)bin->runcur) {
2873 /* Switch runcur. */
2874 if (bin->runcur->nfree > 0)
2875 arena_bin_runs_insert(bin, bin->runcur);
2878 bin->stats.reruns++;
2880 arena_bin_runs_insert(bin, run);
2884 arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2885 void *ptr, arena_chunk_map_bits_t *bitselm, bool junked)
2887 size_t pageind, rpages_ind;
2890 arena_bin_info_t *bin_info;
2893 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2894 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
2895 run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
2896 binind = run->binind;
2897 bin = &arena->bins[binind];
2898 bin_info = &arena_bin_info[binind];
2900 if (!junked && config_fill && unlikely(opt_junk_free))
2901 arena_dalloc_junk_small(ptr, bin_info);
2903 arena_run_reg_dalloc(run, ptr);
2904 if (run->nfree == bin_info->nregs) {
2905 arena_dissociate_bin_run(chunk, run, bin);
2906 arena_dalloc_bin_run(tsdn, arena, chunk, run, bin);
2907 } else if (run->nfree == 1 && run != bin->runcur)
2908 arena_bin_lower_run(arena, chunk, run, bin);
2911 bin->stats.ndalloc++;
2912 bin->stats.curregs--;
2917 arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
2918 arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm)
2921 arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true);
2925 arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr,
2926 size_t pageind, arena_chunk_map_bits_t *bitselm)
2932 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
2933 run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
2934 bin = &arena->bins[run->binind];
2935 malloc_mutex_lock(tsdn, &bin->lock);
2936 arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false);
2937 malloc_mutex_unlock(tsdn, &bin->lock);
2941 arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2942 void *ptr, size_t pageind)
2944 arena_chunk_map_bits_t *bitselm;
2947 /* arena_ptr_small_binind_get() does extra sanity checking. */
2948 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
2949 pageind)) != BININD_INVALID);
2951 bitselm = arena_bitselm_get_mutable(chunk, pageind);
2952 arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm);
2953 arena_decay_tick(tsdn, arena);
2957 #undef arena_dalloc_junk_large
2958 #define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large)
2961 arena_dalloc_junk_large(void *ptr, size_t usize)
2964 if (config_fill && unlikely(opt_junk_free))
2965 memset(ptr, JEMALLOC_FREE_JUNK, usize);
2968 #undef arena_dalloc_junk_large
2969 #define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
2970 arena_dalloc_junk_large_t *arena_dalloc_junk_large =
2971 JEMALLOC_N(n_arena_dalloc_junk_large);
2975 arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena,
2976 arena_chunk_t *chunk, void *ptr, bool junked)
2978 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2979 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
2981 arena_run_t *run = &miscelm->run;
2983 if (config_fill || config_stats) {
2984 size_t usize = arena_mapbits_large_size_get(chunk, pageind) -
2988 arena_dalloc_junk_large(ptr, usize);
2990 szind_t index = size2index(usize) - NBINS;
2992 arena->stats.ndalloc_large++;
2993 arena->stats.allocated_large -= usize;
2994 arena->stats.lstats[index].ndalloc++;
2995 arena->stats.lstats[index].curruns--;
2999 arena_run_dalloc(tsdn, arena, run, true, false, false);
3003 arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
3004 arena_chunk_t *chunk, void *ptr)
3007 arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true);
3011 arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
3015 malloc_mutex_lock(tsdn, &arena->lock);
3016 arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false);
3017 malloc_mutex_unlock(tsdn, &arena->lock);
3018 arena_decay_tick(tsdn, arena);
3022 arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
3023 void *ptr, size_t oldsize, size_t size)
3025 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
3026 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
3028 arena_run_t *run = &miscelm->run;
3030 assert(size < oldsize);
3033 * Shrink the run, and make trailing pages available for other
3036 malloc_mutex_lock(tsdn, &arena->lock);
3037 arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size +
3040 szind_t oldindex = size2index(oldsize) - NBINS;
3041 szind_t index = size2index(size) - NBINS;
3043 arena->stats.ndalloc_large++;
3044 arena->stats.allocated_large -= oldsize;
3045 arena->stats.lstats[oldindex].ndalloc++;
3046 arena->stats.lstats[oldindex].curruns--;
3048 arena->stats.nmalloc_large++;
3049 arena->stats.nrequests_large++;
3050 arena->stats.allocated_large += size;
3051 arena->stats.lstats[index].nmalloc++;
3052 arena->stats.lstats[index].nrequests++;
3053 arena->stats.lstats[index].curruns++;
3055 malloc_mutex_unlock(tsdn, &arena->lock);
3059 arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
3060 void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
3062 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
3063 size_t npages = (oldsize + large_pad) >> LG_PAGE;
3066 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
3069 /* Try to extend the run. */
3070 malloc_mutex_lock(tsdn, &arena->lock);
3071 if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
3072 pageind+npages) != 0)
3074 followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages);
3075 if (oldsize + followsize >= usize_min) {
3077 * The next run is available and sufficiently large. Split the
3078 * following run, then merge the first part with the existing
3082 size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask;
3085 while (oldsize + followsize < usize)
3086 usize = index2size(size2index(usize)-1);
3087 assert(usize >= usize_min);
3088 assert(usize >= oldsize);
3089 splitsize = usize - oldsize;
3093 run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run;
3094 if (arena_run_split_large(arena, run, splitsize, zero))
3097 if (config_cache_oblivious && zero) {
3099 * Zero the trailing bytes of the original allocation's
3100 * last page, since they are in an indeterminate state.
3101 * There will always be trailing bytes, because ptr's
3102 * offset from the beginning of the run is a multiple of
3103 * CACHELINE in [0 .. PAGE).
3105 void *zbase = (void *)((uintptr_t)ptr + oldsize);
3106 void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
3108 size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
3110 memset(zbase, 0, nzero);
3113 size = oldsize + splitsize;
3114 npages = (size + large_pad) >> LG_PAGE;
3117 * Mark the extended run as dirty if either portion of the run
3118 * was dirty before allocation. This is rather pedantic,
3119 * because there's not actually any sequence of events that
3120 * could cause the resulting run to be passed to
3121 * arena_run_dalloc() with the dirty argument set to false
3122 * (which is when dirty flag consistency would really matter).
3124 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
3125 arena_mapbits_dirty_get(chunk, pageind+npages-1);
3126 flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0;
3127 arena_mapbits_large_set(chunk, pageind, size + large_pad,
3128 flag_dirty | (flag_unzeroed_mask &
3129 arena_mapbits_unzeroed_get(chunk, pageind)));
3130 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty |
3131 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
3132 pageind+npages-1)));
3135 szind_t oldindex = size2index(oldsize) - NBINS;
3136 szind_t index = size2index(size) - NBINS;
3138 arena->stats.ndalloc_large++;
3139 arena->stats.allocated_large -= oldsize;
3140 arena->stats.lstats[oldindex].ndalloc++;
3141 arena->stats.lstats[oldindex].curruns--;
3143 arena->stats.nmalloc_large++;
3144 arena->stats.nrequests_large++;
3145 arena->stats.allocated_large += size;
3146 arena->stats.lstats[index].nmalloc++;
3147 arena->stats.lstats[index].nrequests++;
3148 arena->stats.lstats[index].curruns++;
3150 malloc_mutex_unlock(tsdn, &arena->lock);
3154 malloc_mutex_unlock(tsdn, &arena->lock);
3159 #undef arena_ralloc_junk_large
3160 #define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large)
3163 arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
3166 if (config_fill && unlikely(opt_junk_free)) {
3167 memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK,
3172 #undef arena_ralloc_junk_large
3173 #define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
3174 arena_ralloc_junk_large_t *arena_ralloc_junk_large =
3175 JEMALLOC_N(n_arena_ralloc_junk_large);
3179 * Try to resize a large allocation, in order to avoid copying. This will
3180 * always fail if growing an object, and the following run is already in use.
3183 arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
3184 size_t usize_max, bool zero)
3186 arena_chunk_t *chunk;
3189 if (oldsize == usize_max) {
3190 /* Current size class is compatible and maximal. */
3194 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
3195 arena = extent_node_arena_get(&chunk->node);
3197 if (oldsize < usize_max) {
3198 bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr,
3199 oldsize, usize_min, usize_max, zero);
3200 if (config_fill && !ret && !zero) {
3201 if (unlikely(opt_junk_alloc)) {
3202 memset((void *)((uintptr_t)ptr + oldsize),
3203 JEMALLOC_ALLOC_JUNK,
3204 isalloc(tsdn, ptr, config_prof) - oldsize);
3205 } else if (unlikely(opt_zero)) {
3206 memset((void *)((uintptr_t)ptr + oldsize), 0,
3207 isalloc(tsdn, ptr, config_prof) - oldsize);
3213 assert(oldsize > usize_max);
3214 /* Fill before shrinking in order avoid a race. */
3215 arena_ralloc_junk_large(ptr, oldsize, usize_max);
3216 arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max);
3221 arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
3222 size_t extra, bool zero)
3224 size_t usize_min, usize_max;
3226 /* Calls with non-zero extra had to clamp extra. */
3227 assert(extra == 0 || size + extra <= HUGE_MAXCLASS);
3229 if (unlikely(size > HUGE_MAXCLASS))
3232 usize_min = s2u(size);
3233 usize_max = s2u(size + extra);
3234 if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
3235 arena_chunk_t *chunk;
3238 * Avoid moving the allocation if the size class can be left the
3241 if (oldsize <= SMALL_MAXCLASS) {
3242 assert(arena_bin_info[size2index(oldsize)].reg_size ==
3244 if ((usize_max > SMALL_MAXCLASS ||
3245 size2index(usize_max) != size2index(oldsize)) &&
3246 (size > oldsize || usize_max < oldsize))
3249 if (usize_max <= SMALL_MAXCLASS)
3251 if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min,
3256 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
3257 arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node));
3260 return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min,
3266 arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
3267 size_t alignment, bool zero, tcache_t *tcache)
3271 return (arena_malloc(tsdn, arena, usize, size2index(usize),
3272 zero, tcache, true));
3273 usize = sa2u(usize, alignment);
3274 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
3276 return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
3280 arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
3281 size_t alignment, bool zero, tcache_t *tcache)
3287 if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
3290 if (likely(usize <= large_maxclass)) {
3293 /* Try to avoid moving the allocation. */
3294 if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0,
3299 * size and oldsize are different enough that we need to move
3300 * the object. In that case, fall back to allocating new space
3303 ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize,
3304 alignment, zero, tcache);
3309 * Junk/zero-filling were already done by
3310 * ipalloc()/arena_malloc().
3313 copysize = (usize < oldsize) ? usize : oldsize;
3314 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
3315 memcpy(ret, ptr, copysize);
3316 isqalloc(tsd, ptr, oldsize, tcache, true);
3318 ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
3325 arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena)
3329 malloc_mutex_lock(tsdn, &arena->lock);
3330 ret = arena->dss_prec;
3331 malloc_mutex_unlock(tsdn, &arena->lock);
3336 arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec)
3340 return (dss_prec != dss_prec_disabled);
3341 malloc_mutex_lock(tsdn, &arena->lock);
3342 arena->dss_prec = dss_prec;
3343 malloc_mutex_unlock(tsdn, &arena->lock);
3348 arena_lg_dirty_mult_default_get(void)
3351 return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default));
3355 arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
3358 if (opt_purge != purge_mode_ratio)
3360 if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
3362 atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
3367 arena_decay_time_default_get(void)
3370 return ((ssize_t)atomic_read_z((size_t *)&decay_time_default));
3374 arena_decay_time_default_set(ssize_t decay_time)
3377 if (opt_purge != purge_mode_decay)
3379 if (!arena_decay_time_valid(decay_time))
3381 atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time);
3386 arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
3387 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3388 size_t *nactive, size_t *ndirty)
3391 *nthreads += arena_nthreads_get(arena, false);
3392 *dss = dss_prec_names[arena->dss_prec];
3393 *lg_dirty_mult = arena->lg_dirty_mult;
3394 *decay_time = arena->decay_time;
3395 *nactive += arena->nactive;
3396 *ndirty += arena->ndirty;
3400 arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
3401 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3402 size_t *nactive, size_t *ndirty)
3405 malloc_mutex_lock(tsdn, &arena->lock);
3406 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3407 decay_time, nactive, ndirty);
3408 malloc_mutex_unlock(tsdn, &arena->lock);
3412 arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
3413 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3414 size_t *nactive, size_t *ndirty, arena_stats_t *astats,
3415 malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
3416 malloc_huge_stats_t *hstats)
3420 cassert(config_stats);
3422 malloc_mutex_lock(tsdn, &arena->lock);
3423 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3424 decay_time, nactive, ndirty);
3426 astats->mapped += arena->stats.mapped;
3427 astats->retained += arena->stats.retained;
3428 astats->npurge += arena->stats.npurge;
3429 astats->nmadvise += arena->stats.nmadvise;
3430 astats->purged += arena->stats.purged;
3431 astats->metadata_mapped += arena->stats.metadata_mapped;
3432 astats->metadata_allocated += arena_metadata_allocated_get(arena);
3433 astats->allocated_large += arena->stats.allocated_large;
3434 astats->nmalloc_large += arena->stats.nmalloc_large;
3435 astats->ndalloc_large += arena->stats.ndalloc_large;
3436 astats->nrequests_large += arena->stats.nrequests_large;
3437 astats->allocated_huge += arena->stats.allocated_huge;
3438 astats->nmalloc_huge += arena->stats.nmalloc_huge;
3439 astats->ndalloc_huge += arena->stats.ndalloc_huge;
3441 for (i = 0; i < nlclasses; i++) {
3442 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
3443 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
3444 lstats[i].nrequests += arena->stats.lstats[i].nrequests;
3445 lstats[i].curruns += arena->stats.lstats[i].curruns;
3448 for (i = 0; i < nhclasses; i++) {
3449 hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
3450 hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
3451 hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
3453 malloc_mutex_unlock(tsdn, &arena->lock);
3455 for (i = 0; i < NBINS; i++) {
3456 arena_bin_t *bin = &arena->bins[i];
3458 malloc_mutex_lock(tsdn, &bin->lock);
3459 bstats[i].nmalloc += bin->stats.nmalloc;
3460 bstats[i].ndalloc += bin->stats.ndalloc;
3461 bstats[i].nrequests += bin->stats.nrequests;
3462 bstats[i].curregs += bin->stats.curregs;
3463 if (config_tcache) {
3464 bstats[i].nfills += bin->stats.nfills;
3465 bstats[i].nflushes += bin->stats.nflushes;
3467 bstats[i].nruns += bin->stats.nruns;
3468 bstats[i].reruns += bin->stats.reruns;
3469 bstats[i].curruns += bin->stats.curruns;
3470 malloc_mutex_unlock(tsdn, &bin->lock);
3475 arena_nthreads_get(arena_t *arena, bool internal)
3478 return (atomic_read_u(&arena->nthreads[internal]));
3482 arena_nthreads_inc(arena_t *arena, bool internal)
3485 atomic_add_u(&arena->nthreads[internal], 1);
3489 arena_nthreads_dec(arena_t *arena, bool internal)
3492 atomic_sub_u(&arena->nthreads[internal], 1);
3496 arena_new(tsdn_t *tsdn, unsigned ind)
3502 /* Compute arena size to incorporate sufficient runs_avail elements. */
3503 arena_size = offsetof(arena_t, runs_avail) + (sizeof(arena_run_heap_t) *
3504 runs_avail_nclasses);
3506 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
3507 * because there is no way to clean up if base_alloc() OOMs.
3510 arena = (arena_t *)base_alloc(tsdn,
3511 CACHELINE_CEILING(arena_size) + QUANTUM_CEILING(nlclasses *
3512 sizeof(malloc_large_stats_t) + nhclasses) *
3513 sizeof(malloc_huge_stats_t));
3515 arena = (arena_t *)base_alloc(tsdn, arena_size);
3520 arena->nthreads[0] = arena->nthreads[1] = 0;
3521 if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA))
3525 memset(&arena->stats, 0, sizeof(arena_stats_t));
3526 arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
3527 + CACHELINE_CEILING(arena_size));
3528 memset(arena->stats.lstats, 0, nlclasses *
3529 sizeof(malloc_large_stats_t));
3530 arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
3531 + CACHELINE_CEILING(arena_size) +
3532 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
3533 memset(arena->stats.hstats, 0, nhclasses *
3534 sizeof(malloc_huge_stats_t));
3536 ql_new(&arena->tcache_ql);
3540 arena->prof_accumbytes = 0;
3542 if (config_cache_oblivious) {
3544 * A nondeterministic seed based on the address of arena reduces
3545 * the likelihood of lockstep non-uniform cache index
3546 * utilization among identical concurrent processes, but at the
3547 * cost of test repeatability. For debug builds, instead use a
3548 * deterministic seed.
3550 arena->offset_state = config_debug ? ind :
3551 (uint64_t)(uintptr_t)arena;
3554 arena->dss_prec = chunk_dss_prec_get(tsdn);
3556 ql_new(&arena->achunks);
3558 arena->spare = NULL;
3560 arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
3561 arena->purging = false;
3565 for(i = 0; i < runs_avail_nclasses; i++)
3566 arena_run_heap_new(&arena->runs_avail[i]);
3567 qr_new(&arena->runs_dirty, rd_link);
3568 qr_new(&arena->chunks_cache, cc_link);
3570 if (opt_purge == purge_mode_decay)
3571 arena_decay_init(arena, arena_decay_time_default_get());
3573 ql_new(&arena->huge);
3574 if (malloc_mutex_init(&arena->huge_mtx, "arena_huge",
3575 WITNESS_RANK_ARENA_HUGE))
3578 extent_tree_szad_new(&arena->chunks_szad_cached);
3579 extent_tree_ad_new(&arena->chunks_ad_cached);
3580 extent_tree_szad_new(&arena->chunks_szad_retained);
3581 extent_tree_ad_new(&arena->chunks_ad_retained);
3582 if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
3583 WITNESS_RANK_ARENA_CHUNKS))
3585 ql_new(&arena->node_cache);
3586 if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache",
3587 WITNESS_RANK_ARENA_NODE_CACHE))
3590 arena->chunk_hooks = chunk_hooks_default;
3592 /* Initialize bins. */
3593 for (i = 0; i < NBINS; i++) {
3594 arena_bin_t *bin = &arena->bins[i];
3595 if (malloc_mutex_init(&bin->lock, "arena_bin",
3596 WITNESS_RANK_ARENA_BIN))
3599 arena_run_heap_new(&bin->runs);
3601 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
3608 * Calculate bin_info->run_size such that it meets the following constraints:
3610 * *) bin_info->run_size <= arena_maxrun
3611 * *) bin_info->nregs <= RUN_MAXREGS
3613 * bin_info->nregs and bin_info->reg0_offset are also calculated here, since
3614 * these settings are all interdependent.
3617 bin_info_run_size_calc(arena_bin_info_t *bin_info)
3620 size_t try_run_size, perfect_run_size, actual_run_size;
3621 uint32_t try_nregs, perfect_nregs, actual_nregs;
3624 * Determine redzone size based on minimum alignment and minimum
3625 * redzone size. Add padding to the end of the run if it is needed to
3626 * align the regions. The padding allows each redzone to be half the
3627 * minimum alignment; without the padding, each redzone would have to
3628 * be twice as large in order to maintain alignment.
3630 if (config_fill && unlikely(opt_redzone)) {
3631 size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1);
3632 if (align_min <= REDZONE_MINSIZE) {
3633 bin_info->redzone_size = REDZONE_MINSIZE;
3636 bin_info->redzone_size = align_min >> 1;
3637 pad_size = bin_info->redzone_size;
3640 bin_info->redzone_size = 0;
3643 bin_info->reg_interval = bin_info->reg_size +
3644 (bin_info->redzone_size << 1);
3647 * Compute run size under ideal conditions (no redzones, no limit on run
3650 try_run_size = PAGE;
3651 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
3653 perfect_run_size = try_run_size;
3654 perfect_nregs = try_nregs;
3656 try_run_size += PAGE;
3657 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
3658 } while (perfect_run_size != perfect_nregs * bin_info->reg_size);
3659 assert(perfect_nregs <= RUN_MAXREGS);
3661 actual_run_size = perfect_run_size;
3662 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3663 bin_info->reg_interval);
3666 * Redzones can require enough padding that not even a single region can
3667 * fit within the number of pages that would normally be dedicated to a
3668 * run for this size class. Increase the run size until at least one
3671 while (actual_nregs == 0) {
3672 assert(config_fill && unlikely(opt_redzone));
3674 actual_run_size += PAGE;
3675 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3676 bin_info->reg_interval);
3680 * Make sure that the run will fit within an arena chunk.
3682 while (actual_run_size > arena_maxrun) {
3683 actual_run_size -= PAGE;
3684 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3685 bin_info->reg_interval);
3687 assert(actual_nregs > 0);
3688 assert(actual_run_size == s2u(actual_run_size));
3690 /* Copy final settings. */
3691 bin_info->run_size = actual_run_size;
3692 bin_info->nregs = actual_nregs;
3693 bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs *
3694 bin_info->reg_interval) - pad_size + bin_info->redzone_size);
3696 if (actual_run_size > small_maxrun)
3697 small_maxrun = actual_run_size;
3699 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
3700 * bin_info->reg_interval) + pad_size == bin_info->run_size);
3706 arena_bin_info_t *bin_info;
3708 #define BIN_INFO_INIT_bin_yes(index, size) \
3709 bin_info = &arena_bin_info[index]; \
3710 bin_info->reg_size = size; \
3711 bin_info_run_size_calc(bin_info); \
3712 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
3713 #define BIN_INFO_INIT_bin_no(index, size)
3714 #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
3715 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
3717 #undef BIN_INFO_INIT_bin_yes
3718 #undef BIN_INFO_INIT_bin_no
3723 small_run_size_init(void)
3726 assert(small_maxrun != 0);
3728 small_run_tab = (bool *)base_alloc(NULL, sizeof(bool) * (small_maxrun >>
3730 if (small_run_tab == NULL)
3733 #define TAB_INIT_bin_yes(index, size) { \
3734 arena_bin_info_t *bin_info = &arena_bin_info[index]; \
3735 small_run_tab[bin_info->run_size >> LG_PAGE] = true; \
3737 #define TAB_INIT_bin_no(index, size)
3738 #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
3739 TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
3741 #undef TAB_INIT_bin_yes
3742 #undef TAB_INIT_bin_no
3749 run_quantize_init(void)
3753 run_quantize_max = chunksize + large_pad;
3755 run_quantize_floor_tab = (size_t *)base_alloc(NULL, sizeof(size_t) *
3756 (run_quantize_max >> LG_PAGE));
3757 if (run_quantize_floor_tab == NULL)
3760 run_quantize_ceil_tab = (size_t *)base_alloc(NULL, sizeof(size_t) *
3761 (run_quantize_max >> LG_PAGE));
3762 if (run_quantize_ceil_tab == NULL)
3765 for (i = 1; i <= run_quantize_max >> LG_PAGE; i++) {
3766 size_t run_size = i << LG_PAGE;
3768 run_quantize_floor_tab[i-1] =
3769 run_quantize_floor_compute(run_size);
3770 run_quantize_ceil_tab[i-1] =
3771 run_quantize_ceil_compute(run_size);
3782 arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
3783 arena_decay_time_default_set(opt_decay_time);
3786 * Compute the header size such that it is large enough to contain the
3787 * page map. The page map is biased to omit entries for the header
3788 * itself, so some iteration is necessary to compute the map bias.
3790 * 1) Compute safe header_size and map_bias values that include enough
3791 * space for an unbiased page map.
3792 * 2) Refine map_bias based on (1) to omit the header pages in the page
3793 * map. The resulting map_bias may be one too small.
3794 * 3) Refine map_bias based on (2). The result will be >= the result
3795 * from (2), and will always be correct.
3798 for (i = 0; i < 3; i++) {
3799 size_t header_size = offsetof(arena_chunk_t, map_bits) +
3800 ((sizeof(arena_chunk_map_bits_t) +
3801 sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
3802 map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
3804 assert(map_bias > 0);
3806 map_misc_offset = offsetof(arena_chunk_t, map_bits) +
3807 sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
3809 arena_maxrun = chunksize - (map_bias << LG_PAGE);
3810 assert(arena_maxrun > 0);
3811 large_maxclass = index2size(size2index(chunksize)-1);
3812 if (large_maxclass > arena_maxrun) {
3814 * For small chunk sizes it's possible for there to be fewer
3815 * non-header pages available than are necessary to serve the
3816 * size classes just below chunksize.
3818 large_maxclass = arena_maxrun;
3820 assert(large_maxclass > 0);
3821 nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
3822 nhclasses = NSIZES - nlclasses - NBINS;
3825 if (small_run_size_init())
3827 if (run_quantize_init())
3830 runs_avail_bias = size2index(PAGE);
3831 runs_avail_nclasses = size2index(run_quantize_max)+1 - runs_avail_bias;
3837 arena_prefork0(tsdn_t *tsdn, arena_t *arena)
3840 malloc_mutex_prefork(tsdn, &arena->lock);
3844 arena_prefork1(tsdn_t *tsdn, arena_t *arena)
3847 malloc_mutex_prefork(tsdn, &arena->chunks_mtx);
3851 arena_prefork2(tsdn_t *tsdn, arena_t *arena)
3854 malloc_mutex_prefork(tsdn, &arena->node_cache_mtx);
3858 arena_prefork3(tsdn_t *tsdn, arena_t *arena)
3862 for (i = 0; i < NBINS; i++)
3863 malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
3864 malloc_mutex_prefork(tsdn, &arena->huge_mtx);
3868 arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
3872 malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx);
3873 for (i = 0; i < NBINS; i++)
3874 malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
3875 malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx);
3876 malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx);
3877 malloc_mutex_postfork_parent(tsdn, &arena->lock);
3881 arena_postfork_child(tsdn_t *tsdn, arena_t *arena)
3885 malloc_mutex_postfork_child(tsdn, &arena->huge_mtx);
3886 for (i = 0; i < NBINS; i++)
3887 malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
3888 malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx);
3889 malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx);
3890 malloc_mutex_postfork_child(tsdn, &arena->lock);