1 #define JEMALLOC_ARENA_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 purge_mode_t opt_purge = PURGE_DEFAULT;
8 const char *purge_mode_names[] = {
13 ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
14 static ssize_t lg_dirty_mult_default;
15 ssize_t opt_decay_time = DECAY_TIME_DEFAULT;
16 static ssize_t decay_time_default;
18 arena_bin_info_t arena_bin_info[NBINS];
21 size_t map_misc_offset;
22 size_t arena_maxrun; /* Max run size for arenas. */
23 size_t large_maxclass; /* Max large size class. */
24 size_t run_quantize_max; /* Max run_quantize_*() input. */
25 static size_t small_maxrun; /* Max run size for small size classes. */
26 static bool *small_run_tab; /* Valid small run page multiples. */
27 static size_t *run_quantize_floor_tab; /* run_quantize_floor() memoization. */
28 static size_t *run_quantize_ceil_tab; /* run_quantize_ceil() memoization. */
29 unsigned nlclasses; /* Number of large size classes. */
30 unsigned nhclasses; /* Number of huge size classes. */
31 static szind_t runs_avail_bias; /* Size index for first runs_avail tree. */
32 static szind_t runs_avail_nclasses; /* Number of runs_avail trees. */
34 /******************************************************************************/
36 * Function prototypes for static functions that are referenced prior to
40 static void arena_purge_to_limit(arena_t *arena, size_t ndirty_limit);
41 static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
42 bool cleaned, bool decommitted);
43 static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
44 arena_run_t *run, arena_bin_t *bin);
45 static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
46 arena_run_t *run, arena_bin_t *bin);
48 /******************************************************************************/
50 JEMALLOC_INLINE_C size_t
51 arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
54 size_t pageind, mapbits;
56 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
57 pageind = arena_miscelm_to_pageind(miscelm);
58 mapbits = arena_mapbits_get(chunk, pageind);
59 return (arena_mapbits_size_decode(mapbits));
63 arena_run_addr_comp(const arena_chunk_map_misc_t *a,
64 const arena_chunk_map_misc_t *b)
66 uintptr_t a_miscelm = (uintptr_t)a;
67 uintptr_t b_miscelm = (uintptr_t)b;
72 return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
75 /* Generate red-black tree functions. */
76 rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t,
77 rb_link, arena_run_addr_comp)
80 run_quantize_floor_compute(size_t size)
85 assert(size == PAGE_CEILING(size));
87 /* Don't change sizes that are valid small run sizes. */
88 if (size <= small_maxrun && small_run_tab[size >> LG_PAGE])
92 * Round down to the nearest run size that can actually be requested
93 * during normal large allocation. Add large_pad so that cache index
94 * randomization can offset the allocation from the page boundary.
96 qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad;
97 if (qsize <= SMALL_MAXCLASS + large_pad)
98 return (run_quantize_floor_compute(size - large_pad));
99 assert(qsize <= size);
104 run_quantize_ceil_compute_hard(size_t size)
106 size_t large_run_size_next;
109 assert(size == PAGE_CEILING(size));
112 * Return the next quantized size greater than the input size.
113 * Quantized sizes comprise the union of run sizes that back small
114 * region runs, and run sizes that back large regions with no explicit
115 * alignment constraints.
118 if (size > SMALL_MAXCLASS) {
119 large_run_size_next = PAGE_CEILING(index2size(size2index(size -
120 large_pad) + 1) + large_pad);
122 large_run_size_next = SIZE_T_MAX;
123 if (size >= small_maxrun)
124 return (large_run_size_next);
128 assert(size <= small_maxrun);
129 if (small_run_tab[size >> LG_PAGE]) {
130 if (large_run_size_next < size)
131 return (large_run_size_next);
138 run_quantize_ceil_compute(size_t size)
140 size_t qsize = run_quantize_floor_compute(size);
144 * Skip a quantization that may have an adequately large run,
145 * because under-sized runs may be mixed in. This only happens
146 * when an unusual size is requested, i.e. for aligned
147 * allocation, and is just one of several places where linear
148 * search would potentially find sufficiently aligned available
149 * memory somewhere lower.
151 qsize = run_quantize_ceil_compute_hard(qsize);
157 #undef run_quantize_floor
158 #define run_quantize_floor JEMALLOC_N(run_quantize_floor_impl)
161 run_quantize_floor(size_t size)
166 assert(size <= run_quantize_max);
167 assert((size & PAGE_MASK) == 0);
169 ret = run_quantize_floor_tab[(size >> LG_PAGE) - 1];
170 assert(ret == run_quantize_floor_compute(size));
174 #undef run_quantize_floor
175 #define run_quantize_floor JEMALLOC_N(run_quantize_floor)
176 run_quantize_t *run_quantize_floor = JEMALLOC_N(run_quantize_floor_impl);
180 #undef run_quantize_ceil
181 #define run_quantize_ceil JEMALLOC_N(run_quantize_ceil_impl)
184 run_quantize_ceil(size_t size)
189 assert(size <= run_quantize_max);
190 assert((size & PAGE_MASK) == 0);
192 ret = run_quantize_ceil_tab[(size >> LG_PAGE) - 1];
193 assert(ret == run_quantize_ceil_compute(size));
197 #undef run_quantize_ceil
198 #define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
199 run_quantize_t *run_quantize_ceil = JEMALLOC_N(run_quantize_ceil_impl);
202 static arena_run_tree_t *
203 arena_runs_avail_get(arena_t *arena, szind_t ind)
206 assert(ind >= runs_avail_bias);
207 assert(ind - runs_avail_bias < runs_avail_nclasses);
209 return (&arena->runs_avail[ind - runs_avail_bias]);
213 arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
216 szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get(
217 arena_miscelm_get(chunk, pageind))));
218 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
220 arena_run_tree_insert(arena_runs_avail_get(arena, ind),
221 arena_miscelm_get(chunk, pageind));
225 arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
228 szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get(
229 arena_miscelm_get(chunk, pageind))));
230 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
232 arena_run_tree_remove(arena_runs_avail_get(arena, ind),
233 arena_miscelm_get(chunk, pageind));
237 arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
240 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
242 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
244 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
245 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
248 qr_new(&miscelm->rd, rd_link);
249 qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link);
250 arena->ndirty += npages;
254 arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
257 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
259 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
261 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
262 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
265 qr_remove(&miscelm->rd, rd_link);
266 assert(arena->ndirty >= npages);
267 arena->ndirty -= npages;
271 arena_chunk_dirty_npages(const extent_node_t *node)
274 return (extent_node_size_get(node) >> LG_PAGE);
278 arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
282 extent_node_dirty_linkage_init(node);
283 extent_node_dirty_insert(node, &arena->runs_dirty,
284 &arena->chunks_cache);
285 arena->ndirty += arena_chunk_dirty_npages(node);
290 arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
294 extent_node_dirty_remove(node);
295 assert(arena->ndirty >= arena_chunk_dirty_npages(node));
296 arena->ndirty -= arena_chunk_dirty_npages(node);
300 JEMALLOC_INLINE_C void *
301 arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
305 arena_chunk_map_misc_t *miscelm;
308 assert(run->nfree > 0);
309 assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
311 regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
312 miscelm = arena_run_to_miscelm(run);
313 rpages = arena_miscelm_to_rpages(miscelm);
314 ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
315 (uintptr_t)(bin_info->reg_interval * regind));
320 JEMALLOC_INLINE_C void
321 arena_run_reg_dalloc(arena_run_t *run, void *ptr)
323 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
324 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
325 size_t mapbits = arena_mapbits_get(chunk, pageind);
326 szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
327 arena_bin_info_t *bin_info = &arena_bin_info[binind];
328 size_t regind = arena_run_regind(run, bin_info, ptr);
330 assert(run->nfree < bin_info->nregs);
331 /* Freeing an interior pointer can cause assertion failure. */
332 assert(((uintptr_t)ptr -
333 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
334 (uintptr_t)bin_info->reg0_offset)) %
335 (uintptr_t)bin_info->reg_interval == 0);
336 assert((uintptr_t)ptr >=
337 (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
338 (uintptr_t)bin_info->reg0_offset);
339 /* Freeing an unallocated pointer can cause assertion failure. */
340 assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
342 bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind);
346 JEMALLOC_INLINE_C void
347 arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
350 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
351 (run_ind << LG_PAGE)), (npages << LG_PAGE));
352 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
353 (npages << LG_PAGE));
356 JEMALLOC_INLINE_C void
357 arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
360 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
364 JEMALLOC_INLINE_C void
365 arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
368 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
370 arena_run_page_mark_zeroed(chunk, run_ind);
371 for (i = 0; i < PAGE / sizeof(size_t); i++)
376 arena_nactive_add(arena_t *arena, size_t add_pages)
380 size_t cactive_add = CHUNK_CEILING((arena->nactive +
381 add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
383 if (cactive_add != 0)
384 stats_cactive_add(cactive_add);
386 arena->nactive += add_pages;
390 arena_nactive_sub(arena_t *arena, size_t sub_pages)
394 size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
395 CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
396 if (cactive_sub != 0)
397 stats_cactive_sub(cactive_sub);
399 arena->nactive -= sub_pages;
403 arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
404 size_t flag_dirty, size_t flag_decommitted, size_t need_pages)
406 size_t total_pages, rem_pages;
408 assert(flag_dirty == 0 || flag_decommitted == 0);
410 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
412 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
414 assert(need_pages <= total_pages);
415 rem_pages = total_pages - need_pages;
417 arena_avail_remove(arena, chunk, run_ind, total_pages);
419 arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
420 arena_nactive_add(arena, need_pages);
422 /* Keep track of trailing unused pages for later use. */
424 size_t flags = flag_dirty | flag_decommitted;
425 size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED :
428 arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
429 (rem_pages << LG_PAGE), flags |
430 (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) &
431 flag_unzeroed_mask));
432 arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1,
433 (rem_pages << LG_PAGE), flags |
434 (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) &
435 flag_unzeroed_mask));
436 if (flag_dirty != 0) {
437 arena_run_dirty_insert(arena, chunk, run_ind+need_pages,
440 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
445 arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
446 bool remove, bool zero)
448 arena_chunk_t *chunk;
449 arena_chunk_map_misc_t *miscelm;
450 size_t flag_dirty, flag_decommitted, run_ind, need_pages;
451 size_t flag_unzeroed_mask;
453 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
454 miscelm = arena_run_to_miscelm(run);
455 run_ind = arena_miscelm_to_pageind(miscelm);
456 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
457 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
458 need_pages = (size >> LG_PAGE);
459 assert(need_pages > 0);
461 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
462 run_ind << LG_PAGE, size, arena->ind))
466 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
467 flag_decommitted, need_pages);
471 if (flag_decommitted != 0) {
472 /* The run is untouched, and therefore zeroed. */
473 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
474 *)((uintptr_t)chunk + (run_ind << LG_PAGE)),
475 (need_pages << LG_PAGE));
476 } else if (flag_dirty != 0) {
477 /* The run is dirty, so all pages must be zeroed. */
478 arena_run_zero(chunk, run_ind, need_pages);
481 * The run is clean, so some pages may be zeroed (i.e.
482 * never before touched).
485 for (i = 0; i < need_pages; i++) {
486 if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
488 arena_run_zero(chunk, run_ind+i, 1);
489 else if (config_debug) {
490 arena_run_page_validate_zeroed(chunk,
493 arena_run_page_mark_zeroed(chunk,
499 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
500 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
504 * Set the last element first, in case the run only contains one page
505 * (i.e. both statements set the same element).
507 flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
508 CHUNK_MAP_UNZEROED : 0;
509 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty |
510 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
511 run_ind+need_pages-1)));
512 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty |
513 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind)));
518 arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
521 return (arena_run_split_large_helper(arena, run, size, true, zero));
525 arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
528 return (arena_run_split_large_helper(arena, run, size, false, zero));
532 arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
535 arena_chunk_t *chunk;
536 arena_chunk_map_misc_t *miscelm;
537 size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
539 assert(binind != BININD_INVALID);
541 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
542 miscelm = arena_run_to_miscelm(run);
543 run_ind = arena_miscelm_to_pageind(miscelm);
544 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
545 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
546 need_pages = (size >> LG_PAGE);
547 assert(need_pages > 0);
549 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
550 run_ind << LG_PAGE, size, arena->ind))
553 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
554 flag_decommitted, need_pages);
556 for (i = 0; i < need_pages; i++) {
557 size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk,
559 arena_mapbits_small_set(chunk, run_ind+i, i, binind,
561 if (config_debug && flag_dirty == 0 && flag_unzeroed == 0)
562 arena_run_page_validate_zeroed(chunk, run_ind+i);
564 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
565 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
569 static arena_chunk_t *
570 arena_chunk_init_spare(arena_t *arena)
572 arena_chunk_t *chunk;
574 assert(arena->spare != NULL);
576 chunk = arena->spare;
579 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
580 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
581 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
583 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
585 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
586 arena_mapbits_dirty_get(chunk, chunk_npages-1));
592 arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero)
596 * The extent node notion of "committed" doesn't directly apply to
597 * arena chunks. Arbitrarily mark them as committed. The commit state
598 * of runs is tracked individually, and upon chunk deallocation the
599 * entire chunk is in a consistent commit state.
601 extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
602 extent_node_achunk_set(&chunk->node, true);
603 return (chunk_register(chunk, &chunk->node));
606 static arena_chunk_t *
607 arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
608 bool *zero, bool *commit)
610 arena_chunk_t *chunk;
612 malloc_mutex_unlock(&arena->lock);
614 chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_hooks, NULL,
615 chunksize, chunksize, zero, commit);
616 if (chunk != NULL && !*commit) {
618 if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
619 LG_PAGE, arena->ind)) {
620 chunk_dalloc_wrapper(arena, chunk_hooks,
621 (void *)chunk, chunksize, *commit);
625 if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) {
627 /* Undo commit of header. */
628 chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
629 LG_PAGE, arena->ind);
631 chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk,
636 malloc_mutex_lock(&arena->lock);
640 static arena_chunk_t *
641 arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit)
643 arena_chunk_t *chunk;
644 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
646 chunk = chunk_alloc_cache(arena, &chunk_hooks, NULL, chunksize,
647 chunksize, zero, true);
649 if (arena_chunk_register(arena, chunk, *zero)) {
650 chunk_dalloc_cache(arena, &chunk_hooks, chunk,
657 chunk = arena_chunk_alloc_internal_hard(arena, &chunk_hooks,
661 if (config_stats && chunk != NULL) {
662 arena->stats.mapped += chunksize;
663 arena->stats.metadata_mapped += (map_bias << LG_PAGE);
669 static arena_chunk_t *
670 arena_chunk_init_hard(arena_t *arena)
672 arena_chunk_t *chunk;
674 size_t flag_unzeroed, flag_decommitted, i;
676 assert(arena->spare == NULL);
680 chunk = arena_chunk_alloc_internal(arena, &zero, &commit);
685 * Initialize the map to contain one maximal free untouched run. Mark
686 * the pages as zeroed if chunk_alloc() returned a zeroed or decommitted
689 flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
690 flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
691 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun,
692 flag_unzeroed | flag_decommitted);
694 * There is no need to initialize the internal page map entries unless
695 * the chunk is not zeroed.
698 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
699 (void *)arena_bitselm_get(chunk, map_bias+1),
700 (size_t)((uintptr_t) arena_bitselm_get(chunk,
701 chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk,
703 for (i = map_bias+1; i < chunk_npages-1; i++)
704 arena_mapbits_internal_set(chunk, i, flag_unzeroed);
706 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
707 *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t)
708 arena_bitselm_get(chunk, chunk_npages-1) -
709 (uintptr_t)arena_bitselm_get(chunk, map_bias+1)));
711 for (i = map_bias+1; i < chunk_npages-1; i++) {
712 assert(arena_mapbits_unzeroed_get(chunk, i) ==
717 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
723 static arena_chunk_t *
724 arena_chunk_alloc(arena_t *arena)
726 arena_chunk_t *chunk;
728 if (arena->spare != NULL)
729 chunk = arena_chunk_init_spare(arena);
731 chunk = arena_chunk_init_hard(arena);
736 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
742 arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
745 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
746 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
747 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
749 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
751 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
752 arena_mapbits_dirty_get(chunk, chunk_npages-1));
753 assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
754 arena_mapbits_decommitted_get(chunk, chunk_npages-1));
756 /* Remove run from runs_avail, so that the arena does not use it. */
757 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
759 if (arena->spare != NULL) {
760 arena_chunk_t *spare = arena->spare;
761 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
764 arena->spare = chunk;
765 if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
766 arena_run_dirty_remove(arena, spare, map_bias,
767 chunk_npages-map_bias);
770 chunk_deregister(spare, &spare->node);
772 committed = (arena_mapbits_decommitted_get(spare, map_bias) ==
776 * Decommit the header. Mark the chunk as decommitted
777 * even if header decommit fails, since treating a
778 * partially committed chunk as committed has a high
779 * potential for causing later access of decommitted
782 chunk_hooks = chunk_hooks_get(arena);
783 chunk_hooks.decommit(spare, chunksize, 0, map_bias <<
784 LG_PAGE, arena->ind);
787 chunk_dalloc_cache(arena, &chunk_hooks, (void *)spare,
788 chunksize, committed);
791 arena->stats.mapped -= chunksize;
792 arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
795 arena->spare = chunk;
799 arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
801 szind_t index = size2index(usize) - nlclasses - NBINS;
803 cassert(config_stats);
805 arena->stats.nmalloc_huge++;
806 arena->stats.allocated_huge += usize;
807 arena->stats.hstats[index].nmalloc++;
808 arena->stats.hstats[index].curhchunks++;
812 arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
814 szind_t index = size2index(usize) - nlclasses - NBINS;
816 cassert(config_stats);
818 arena->stats.nmalloc_huge--;
819 arena->stats.allocated_huge -= usize;
820 arena->stats.hstats[index].nmalloc--;
821 arena->stats.hstats[index].curhchunks--;
825 arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
827 szind_t index = size2index(usize) - nlclasses - NBINS;
829 cassert(config_stats);
831 arena->stats.ndalloc_huge++;
832 arena->stats.allocated_huge -= usize;
833 arena->stats.hstats[index].ndalloc++;
834 arena->stats.hstats[index].curhchunks--;
838 arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
840 szind_t index = size2index(usize) - nlclasses - NBINS;
842 cassert(config_stats);
844 arena->stats.ndalloc_huge--;
845 arena->stats.allocated_huge += usize;
846 arena->stats.hstats[index].ndalloc--;
847 arena->stats.hstats[index].curhchunks++;
851 arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
854 arena_huge_dalloc_stats_update(arena, oldsize);
855 arena_huge_malloc_stats_update(arena, usize);
859 arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
863 arena_huge_dalloc_stats_update_undo(arena, oldsize);
864 arena_huge_malloc_stats_update_undo(arena, usize);
868 arena_node_alloc(arena_t *arena)
872 malloc_mutex_lock(&arena->node_cache_mtx);
873 node = ql_last(&arena->node_cache, ql_link);
875 malloc_mutex_unlock(&arena->node_cache_mtx);
876 return (base_alloc(sizeof(extent_node_t)));
878 ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
879 malloc_mutex_unlock(&arena->node_cache_mtx);
884 arena_node_dalloc(arena_t *arena, extent_node_t *node)
887 malloc_mutex_lock(&arena->node_cache_mtx);
888 ql_elm_new(node, ql_link);
889 ql_tail_insert(&arena->node_cache, node, ql_link);
890 malloc_mutex_unlock(&arena->node_cache_mtx);
894 arena_chunk_alloc_huge_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
895 size_t usize, size_t alignment, bool *zero, size_t csize)
900 ret = chunk_alloc_wrapper(arena, chunk_hooks, NULL, csize, alignment,
903 /* Revert optimistic stats updates. */
904 malloc_mutex_lock(&arena->lock);
906 arena_huge_malloc_stats_update_undo(arena, usize);
907 arena->stats.mapped -= usize;
909 arena_nactive_sub(arena, usize >> LG_PAGE);
910 malloc_mutex_unlock(&arena->lock);
917 arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
921 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
922 size_t csize = CHUNK_CEILING(usize);
924 malloc_mutex_lock(&arena->lock);
926 /* Optimistically update stats. */
928 arena_huge_malloc_stats_update(arena, usize);
929 arena->stats.mapped += usize;
931 arena_nactive_add(arena, usize >> LG_PAGE);
933 ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment,
935 malloc_mutex_unlock(&arena->lock);
937 ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize,
938 alignment, zero, csize);
945 arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
947 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
950 csize = CHUNK_CEILING(usize);
951 malloc_mutex_lock(&arena->lock);
953 arena_huge_dalloc_stats_update(arena, usize);
954 arena->stats.mapped -= usize;
956 arena_nactive_sub(arena, usize >> LG_PAGE);
958 chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize, true);
959 malloc_mutex_unlock(&arena->lock);
963 arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize,
967 assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
968 assert(oldsize != usize);
970 malloc_mutex_lock(&arena->lock);
972 arena_huge_ralloc_stats_update(arena, oldsize, usize);
974 arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE);
976 arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE);
977 malloc_mutex_unlock(&arena->lock);
981 arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize,
984 size_t udiff = oldsize - usize;
985 size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
987 malloc_mutex_lock(&arena->lock);
989 arena_huge_ralloc_stats_update(arena, oldsize, usize);
991 arena->stats.mapped -= cdiff;
993 arena_nactive_sub(arena, udiff >> LG_PAGE);
996 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
997 void *nchunk = (void *)((uintptr_t)chunk +
998 CHUNK_CEILING(usize));
1000 chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff, true);
1002 malloc_mutex_unlock(&arena->lock);
1006 arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
1007 void *chunk, size_t oldsize, size_t usize, bool *zero, void *nchunk,
1008 size_t udiff, size_t cdiff)
1013 err = (chunk_alloc_wrapper(arena, chunk_hooks, nchunk, cdiff, chunksize,
1014 zero, &commit) == NULL);
1016 /* Revert optimistic stats updates. */
1017 malloc_mutex_lock(&arena->lock);
1019 arena_huge_ralloc_stats_update_undo(arena, oldsize,
1021 arena->stats.mapped -= cdiff;
1023 arena_nactive_sub(arena, udiff >> LG_PAGE);
1024 malloc_mutex_unlock(&arena->lock);
1025 } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1026 cdiff, true, arena->ind)) {
1027 chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero,
1035 arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
1036 size_t usize, bool *zero)
1039 chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
1040 void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
1041 size_t udiff = usize - oldsize;
1042 size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
1044 malloc_mutex_lock(&arena->lock);
1046 /* Optimistically update stats. */
1048 arena_huge_ralloc_stats_update(arena, oldsize, usize);
1049 arena->stats.mapped += cdiff;
1051 arena_nactive_add(arena, udiff >> LG_PAGE);
1053 err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff,
1054 chunksize, zero, true) == NULL);
1055 malloc_mutex_unlock(&arena->lock);
1057 err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks,
1058 chunk, oldsize, usize, zero, nchunk, udiff,
1060 } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1061 cdiff, true, arena->ind)) {
1062 chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero,
1071 * Do first-best-fit run selection, i.e. select the lowest run that best fits.
1072 * Run sizes are indexed, so not all candidate runs are necessarily exactly the
1075 static arena_run_t *
1076 arena_run_first_best_fit(arena_t *arena, size_t size)
1080 ind = size2index(run_quantize_ceil(size));
1081 for (i = ind; i < runs_avail_nclasses + runs_avail_bias; i++) {
1082 arena_chunk_map_misc_t *miscelm = arena_run_tree_first(
1083 arena_runs_avail_get(arena, i));
1084 if (miscelm != NULL)
1085 return (&miscelm->run);
1091 static arena_run_t *
1092 arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
1094 arena_run_t *run = arena_run_first_best_fit(arena, s2u(size));
1096 if (arena_run_split_large(arena, run, size, zero))
1102 static arena_run_t *
1103 arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
1105 arena_chunk_t *chunk;
1108 assert(size <= arena_maxrun);
1109 assert(size == PAGE_CEILING(size));
1111 /* Search the arena's chunks for the lowest best fit. */
1112 run = arena_run_alloc_large_helper(arena, size, zero);
1117 * No usable runs. Create a new chunk from which to allocate the run.
1119 chunk = arena_chunk_alloc(arena);
1120 if (chunk != NULL) {
1121 run = &arena_miscelm_get(chunk, map_bias)->run;
1122 if (arena_run_split_large(arena, run, size, zero))
1128 * arena_chunk_alloc() failed, but another thread may have made
1129 * sufficient memory available while this one dropped arena->lock in
1130 * arena_chunk_alloc(), so search one more time.
1132 return (arena_run_alloc_large_helper(arena, size, zero));
1135 static arena_run_t *
1136 arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
1138 arena_run_t *run = arena_run_first_best_fit(arena, size);
1140 if (arena_run_split_small(arena, run, size, binind))
1146 static arena_run_t *
1147 arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind)
1149 arena_chunk_t *chunk;
1152 assert(size <= arena_maxrun);
1153 assert(size == PAGE_CEILING(size));
1154 assert(binind != BININD_INVALID);
1156 /* Search the arena's chunks for the lowest best fit. */
1157 run = arena_run_alloc_small_helper(arena, size, binind);
1162 * No usable runs. Create a new chunk from which to allocate the run.
1164 chunk = arena_chunk_alloc(arena);
1165 if (chunk != NULL) {
1166 run = &arena_miscelm_get(chunk, map_bias)->run;
1167 if (arena_run_split_small(arena, run, size, binind))
1173 * arena_chunk_alloc() failed, but another thread may have made
1174 * sufficient memory available while this one dropped arena->lock in
1175 * arena_chunk_alloc(), so search one more time.
1177 return (arena_run_alloc_small_helper(arena, size, binind));
1181 arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
1184 return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t)
1189 arena_lg_dirty_mult_get(arena_t *arena)
1191 ssize_t lg_dirty_mult;
1193 malloc_mutex_lock(&arena->lock);
1194 lg_dirty_mult = arena->lg_dirty_mult;
1195 malloc_mutex_unlock(&arena->lock);
1197 return (lg_dirty_mult);
1201 arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult)
1204 if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
1207 malloc_mutex_lock(&arena->lock);
1208 arena->lg_dirty_mult = lg_dirty_mult;
1209 arena_maybe_purge(arena);
1210 malloc_mutex_unlock(&arena->lock);
1216 arena_decay_deadline_init(arena_t *arena)
1219 assert(opt_purge == purge_mode_decay);
1222 * Generate a new deadline that is uniformly random within the next
1223 * epoch after the current one.
1225 nstime_copy(&arena->decay_deadline, &arena->decay_epoch);
1226 nstime_add(&arena->decay_deadline, &arena->decay_interval);
1227 if (arena->decay_time > 0) {
1230 nstime_init(&jitter, prng_range(&arena->decay_jitter_state,
1231 nstime_ns(&arena->decay_interval)));
1232 nstime_add(&arena->decay_deadline, &jitter);
1237 arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
1240 assert(opt_purge == purge_mode_decay);
1242 return (nstime_compare(&arena->decay_deadline, time) <= 0);
1246 arena_decay_backlog_npages_limit(const arena_t *arena)
1248 static const uint64_t h_steps[] = {
1249 #define STEP(step, h, x, y) \
1255 size_t npages_limit_backlog;
1258 assert(opt_purge == purge_mode_decay);
1261 * For each element of decay_backlog, multiply by the corresponding
1262 * fixed-point smoothstep decay factor. Sum the products, then divide
1263 * to round down to the nearest whole number of pages.
1266 for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
1267 sum += arena->decay_backlog[i] * h_steps[i];
1268 npages_limit_backlog = (sum >> SMOOTHSTEP_BFP);
1270 return (npages_limit_backlog);
1274 arena_decay_epoch_advance(arena_t *arena, const nstime_t *time)
1278 size_t ndirty_delta;
1280 assert(opt_purge == purge_mode_decay);
1281 assert(arena_decay_deadline_reached(arena, time));
1283 nstime_copy(&delta, time);
1284 nstime_subtract(&delta, &arena->decay_epoch);
1285 nadvance = nstime_divide(&delta, &arena->decay_interval);
1286 assert(nadvance > 0);
1288 /* Add nadvance decay intervals to epoch. */
1289 nstime_copy(&delta, &arena->decay_interval);
1290 nstime_imultiply(&delta, nadvance);
1291 nstime_add(&arena->decay_epoch, &delta);
1293 /* Set a new deadline. */
1294 arena_decay_deadline_init(arena);
1296 /* Update the backlog. */
1297 if (nadvance >= SMOOTHSTEP_NSTEPS) {
1298 memset(arena->decay_backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
1301 memmove(arena->decay_backlog, &arena->decay_backlog[nadvance],
1302 (SMOOTHSTEP_NSTEPS - nadvance) * sizeof(size_t));
1304 memset(&arena->decay_backlog[SMOOTHSTEP_NSTEPS -
1305 nadvance], 0, (nadvance-1) * sizeof(size_t));
1308 ndirty_delta = (arena->ndirty > arena->decay_ndirty) ? arena->ndirty -
1309 arena->decay_ndirty : 0;
1310 arena->decay_ndirty = arena->ndirty;
1311 arena->decay_backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
1312 arena->decay_backlog_npages_limit =
1313 arena_decay_backlog_npages_limit(arena);
1317 arena_decay_npages_limit(arena_t *arena)
1319 size_t npages_limit;
1321 assert(opt_purge == purge_mode_decay);
1323 npages_limit = arena->decay_backlog_npages_limit;
1325 /* Add in any dirty pages created during the current epoch. */
1326 if (arena->ndirty > arena->decay_ndirty)
1327 npages_limit += arena->ndirty - arena->decay_ndirty;
1329 return (npages_limit);
1333 arena_decay_init(arena_t *arena, ssize_t decay_time)
1336 arena->decay_time = decay_time;
1337 if (decay_time > 0) {
1338 nstime_init2(&arena->decay_interval, decay_time, 0);
1339 nstime_idivide(&arena->decay_interval, SMOOTHSTEP_NSTEPS);
1342 nstime_init(&arena->decay_epoch, 0);
1343 nstime_update(&arena->decay_epoch);
1344 arena->decay_jitter_state = (uint64_t)(uintptr_t)arena;
1345 arena_decay_deadline_init(arena);
1346 arena->decay_ndirty = arena->ndirty;
1347 arena->decay_backlog_npages_limit = 0;
1348 memset(arena->decay_backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
1352 arena_decay_time_valid(ssize_t decay_time)
1355 if (decay_time < -1)
1357 if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX)
1363 arena_decay_time_get(arena_t *arena)
1367 malloc_mutex_lock(&arena->lock);
1368 decay_time = arena->decay_time;
1369 malloc_mutex_unlock(&arena->lock);
1371 return (decay_time);
1375 arena_decay_time_set(arena_t *arena, ssize_t decay_time)
1378 if (!arena_decay_time_valid(decay_time))
1381 malloc_mutex_lock(&arena->lock);
1383 * Restart decay backlog from scratch, which may cause many dirty pages
1384 * to be immediately purged. It would conceptually be possible to map
1385 * the old backlog onto the new backlog, but there is no justification
1386 * for such complexity since decay_time changes are intended to be
1387 * infrequent, either between the {-1, 0, >0} states, or a one-time
1388 * arbitrary change during initial arena configuration.
1390 arena_decay_init(arena, decay_time);
1391 arena_maybe_purge(arena);
1392 malloc_mutex_unlock(&arena->lock);
1398 arena_maybe_purge_ratio(arena_t *arena)
1401 assert(opt_purge == purge_mode_ratio);
1403 /* Don't purge if the option is disabled. */
1404 if (arena->lg_dirty_mult < 0)
1408 * Iterate, since preventing recursive purging could otherwise leave too
1412 size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
1413 if (threshold < chunk_npages)
1414 threshold = chunk_npages;
1416 * Don't purge unless the number of purgeable pages exceeds the
1419 if (arena->ndirty <= threshold)
1421 arena_purge_to_limit(arena, threshold);
1426 arena_maybe_purge_decay(arena_t *arena)
1429 size_t ndirty_limit;
1431 assert(opt_purge == purge_mode_decay);
1433 /* Purge all or nothing if the option is disabled. */
1434 if (arena->decay_time <= 0) {
1435 if (arena->decay_time == 0)
1436 arena_purge_to_limit(arena, 0);
1440 nstime_copy(&time, &arena->decay_epoch);
1441 if (unlikely(nstime_update(&time))) {
1442 /* Time went backwards. Force an epoch advance. */
1443 nstime_copy(&time, &arena->decay_deadline);
1446 if (arena_decay_deadline_reached(arena, &time))
1447 arena_decay_epoch_advance(arena, &time);
1449 ndirty_limit = arena_decay_npages_limit(arena);
1452 * Don't try to purge unless the number of purgeable pages exceeds the
1455 if (arena->ndirty <= ndirty_limit)
1457 arena_purge_to_limit(arena, ndirty_limit);
1461 arena_maybe_purge(arena_t *arena)
1464 /* Don't recursively purge. */
1468 if (opt_purge == purge_mode_ratio)
1469 arena_maybe_purge_ratio(arena);
1471 arena_maybe_purge_decay(arena);
1475 arena_dirty_count(arena_t *arena)
1478 arena_runs_dirty_link_t *rdelm;
1479 extent_node_t *chunkselm;
1481 for (rdelm = qr_next(&arena->runs_dirty, rd_link),
1482 chunkselm = qr_next(&arena->chunks_cache, cc_link);
1483 rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) {
1486 if (rdelm == &chunkselm->rd) {
1487 npages = extent_node_size_get(chunkselm) >> LG_PAGE;
1488 chunkselm = qr_next(chunkselm, cc_link);
1490 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
1492 arena_chunk_map_misc_t *miscelm =
1493 arena_rd_to_miscelm(rdelm);
1494 size_t pageind = arena_miscelm_to_pageind(miscelm);
1495 assert(arena_mapbits_allocated_get(chunk, pageind) ==
1497 assert(arena_mapbits_large_get(chunk, pageind) == 0);
1498 assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
1499 npages = arena_mapbits_unallocated_size_get(chunk,
1500 pageind) >> LG_PAGE;
1509 arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks,
1510 size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
1511 extent_node_t *purge_chunks_sentinel)
1513 arena_runs_dirty_link_t *rdelm, *rdelm_next;
1514 extent_node_t *chunkselm;
1515 size_t nstashed = 0;
1517 /* Stash runs/chunks according to ndirty_limit. */
1518 for (rdelm = qr_next(&arena->runs_dirty, rd_link),
1519 chunkselm = qr_next(&arena->chunks_cache, cc_link);
1520 rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
1522 rdelm_next = qr_next(rdelm, rd_link);
1524 if (rdelm == &chunkselm->rd) {
1525 extent_node_t *chunkselm_next;
1529 npages = extent_node_size_get(chunkselm) >> LG_PAGE;
1530 if (opt_purge == purge_mode_decay && arena->ndirty -
1531 (nstashed + npages) < ndirty_limit)
1534 chunkselm_next = qr_next(chunkselm, cc_link);
1536 * Allocate. chunkselm remains valid due to the
1537 * dalloc_node=false argument to chunk_alloc_cache().
1540 chunk = chunk_alloc_cache(arena, chunk_hooks,
1541 extent_node_addr_get(chunkselm),
1542 extent_node_size_get(chunkselm), chunksize, &zero,
1544 assert(chunk == extent_node_addr_get(chunkselm));
1545 assert(zero == extent_node_zeroed_get(chunkselm));
1546 extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
1547 purge_chunks_sentinel);
1548 assert(npages == (extent_node_size_get(chunkselm) >>
1550 chunkselm = chunkselm_next;
1552 arena_chunk_t *chunk =
1553 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1554 arena_chunk_map_misc_t *miscelm =
1555 arena_rd_to_miscelm(rdelm);
1556 size_t pageind = arena_miscelm_to_pageind(miscelm);
1557 arena_run_t *run = &miscelm->run;
1559 arena_mapbits_unallocated_size_get(chunk, pageind);
1561 npages = run_size >> LG_PAGE;
1562 if (opt_purge == purge_mode_decay && arena->ndirty -
1563 (nstashed + npages) < ndirty_limit)
1566 assert(pageind + npages <= chunk_npages);
1567 assert(arena_mapbits_dirty_get(chunk, pageind) ==
1568 arena_mapbits_dirty_get(chunk, pageind+npages-1));
1571 * If purging the spare chunk's run, make it available
1572 * prior to allocation.
1574 if (chunk == arena->spare)
1575 arena_chunk_alloc(arena);
1577 /* Temporarily allocate the free dirty run. */
1578 arena_run_split_large(arena, run, run_size, false);
1581 qr_new(rdelm, rd_link); /* Redundant. */
1583 assert(qr_next(rdelm, rd_link) == rdelm);
1584 assert(qr_prev(rdelm, rd_link) == rdelm);
1586 qr_meld(purge_runs_sentinel, rdelm, rd_link);
1590 if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <=
1599 arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
1600 arena_runs_dirty_link_t *purge_runs_sentinel,
1601 extent_node_t *purge_chunks_sentinel)
1603 size_t npurged, nmadvise;
1604 arena_runs_dirty_link_t *rdelm;
1605 extent_node_t *chunkselm;
1611 malloc_mutex_unlock(&arena->lock);
1612 for (rdelm = qr_next(purge_runs_sentinel, rd_link),
1613 chunkselm = qr_next(purge_chunks_sentinel, cc_link);
1614 rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
1617 if (rdelm == &chunkselm->rd) {
1619 * Don't actually purge the chunk here because 1)
1620 * chunkselm is embedded in the chunk and must remain
1621 * valid, and 2) we deallocate the chunk in
1622 * arena_unstash_purged(), where it is destroyed,
1623 * decommitted, or purged, depending on chunk
1624 * deallocation policy.
1626 size_t size = extent_node_size_get(chunkselm);
1627 npages = size >> LG_PAGE;
1628 chunkselm = qr_next(chunkselm, cc_link);
1630 size_t pageind, run_size, flag_unzeroed, flags, i;
1632 arena_chunk_t *chunk =
1633 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1634 arena_chunk_map_misc_t *miscelm =
1635 arena_rd_to_miscelm(rdelm);
1636 pageind = arena_miscelm_to_pageind(miscelm);
1637 run_size = arena_mapbits_large_size_get(chunk, pageind);
1638 npages = run_size >> LG_PAGE;
1640 assert(pageind + npages <= chunk_npages);
1641 assert(!arena_mapbits_decommitted_get(chunk, pageind));
1642 assert(!arena_mapbits_decommitted_get(chunk,
1644 decommitted = !chunk_hooks->decommit(chunk, chunksize,
1645 pageind << LG_PAGE, npages << LG_PAGE, arena->ind);
1648 flags = CHUNK_MAP_DECOMMITTED;
1650 flag_unzeroed = chunk_purge_wrapper(arena,
1651 chunk_hooks, chunk, chunksize, pageind <<
1652 LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
1653 flags = flag_unzeroed;
1655 arena_mapbits_large_set(chunk, pageind+npages-1, 0,
1657 arena_mapbits_large_set(chunk, pageind, run_size,
1661 * Set the unzeroed flag for internal pages, now that
1662 * chunk_purge_wrapper() has returned whether the pages
1663 * were zeroed as a side effect of purging. This chunk
1664 * map modification is safe even though the arena mutex
1665 * isn't currently owned by this thread, because the run
1666 * is marked as allocated, thus protecting it from being
1667 * modified by any other thread. As long as these
1668 * writes don't perturb the first and last elements'
1669 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
1671 for (i = 1; i < npages-1; i++) {
1672 arena_mapbits_internal_set(chunk, pageind+i,
1681 malloc_mutex_lock(&arena->lock);
1684 arena->stats.nmadvise += nmadvise;
1685 arena->stats.purged += npurged;
1692 arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
1693 arena_runs_dirty_link_t *purge_runs_sentinel,
1694 extent_node_t *purge_chunks_sentinel)
1696 arena_runs_dirty_link_t *rdelm, *rdelm_next;
1697 extent_node_t *chunkselm;
1699 /* Deallocate chunks/runs. */
1700 for (rdelm = qr_next(purge_runs_sentinel, rd_link),
1701 chunkselm = qr_next(purge_chunks_sentinel, cc_link);
1702 rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
1703 rdelm_next = qr_next(rdelm, rd_link);
1704 if (rdelm == &chunkselm->rd) {
1705 extent_node_t *chunkselm_next = qr_next(chunkselm,
1707 void *addr = extent_node_addr_get(chunkselm);
1708 size_t size = extent_node_size_get(chunkselm);
1709 bool zeroed = extent_node_zeroed_get(chunkselm);
1710 bool committed = extent_node_committed_get(chunkselm);
1711 extent_node_dirty_remove(chunkselm);
1712 arena_node_dalloc(arena, chunkselm);
1713 chunkselm = chunkselm_next;
1714 chunk_dalloc_arena(arena, chunk_hooks, addr, size,
1717 arena_chunk_t *chunk =
1718 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1719 arena_chunk_map_misc_t *miscelm =
1720 arena_rd_to_miscelm(rdelm);
1721 size_t pageind = arena_miscelm_to_pageind(miscelm);
1722 bool decommitted = (arena_mapbits_decommitted_get(chunk,
1724 arena_run_t *run = &miscelm->run;
1725 qr_remove(rdelm, rd_link);
1726 arena_run_dalloc(arena, run, false, true, decommitted);
1732 * NB: ndirty_limit is interpreted differently depending on opt_purge:
1733 * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
1735 * (arena->ndirty <= ndirty_limit)
1736 * - purge_mode_decay: Purge as many dirty runs/chunks as possible without
1737 * violating the invariant:
1738 * (arena->ndirty >= ndirty_limit)
1741 arena_purge_to_limit(arena_t *arena, size_t ndirty_limit)
1743 chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
1744 size_t npurge, npurged;
1745 arena_runs_dirty_link_t purge_runs_sentinel;
1746 extent_node_t purge_chunks_sentinel;
1748 arena->purging = true;
1751 * Calls to arena_dirty_count() are disabled even for debug builds
1752 * because overhead grows nonlinearly as memory usage increases.
1754 if (false && config_debug) {
1755 size_t ndirty = arena_dirty_count(arena);
1756 assert(ndirty == arena->ndirty);
1758 assert(opt_purge != purge_mode_ratio || (arena->nactive >>
1759 arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
1761 qr_new(&purge_runs_sentinel, rd_link);
1762 extent_node_dirty_linkage_init(&purge_chunks_sentinel);
1764 npurge = arena_stash_dirty(arena, &chunk_hooks, ndirty_limit,
1765 &purge_runs_sentinel, &purge_chunks_sentinel);
1768 npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel,
1769 &purge_chunks_sentinel);
1770 assert(npurged == npurge);
1771 arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel,
1772 &purge_chunks_sentinel);
1775 arena->stats.npurge++;
1778 arena->purging = false;
1782 arena_purge(arena_t *arena, bool all)
1785 malloc_mutex_lock(&arena->lock);
1787 arena_purge_to_limit(arena, 0);
1789 arena_maybe_purge(arena);
1790 malloc_mutex_unlock(&arena->lock);
1794 arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
1795 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty,
1796 size_t flag_decommitted)
1798 size_t size = *p_size;
1799 size_t run_ind = *p_run_ind;
1800 size_t run_pages = *p_run_pages;
1802 /* Try to coalesce forward. */
1803 if (run_ind + run_pages < chunk_npages &&
1804 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
1805 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty &&
1806 arena_mapbits_decommitted_get(chunk, run_ind+run_pages) ==
1808 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
1810 size_t nrun_pages = nrun_size >> LG_PAGE;
1813 * Remove successor from runs_avail; the coalesced run is
1816 assert(arena_mapbits_unallocated_size_get(chunk,
1817 run_ind+run_pages+nrun_pages-1) == nrun_size);
1818 assert(arena_mapbits_dirty_get(chunk,
1819 run_ind+run_pages+nrun_pages-1) == flag_dirty);
1820 assert(arena_mapbits_decommitted_get(chunk,
1821 run_ind+run_pages+nrun_pages-1) == flag_decommitted);
1822 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
1825 * If the successor is dirty, remove it from the set of dirty
1828 if (flag_dirty != 0) {
1829 arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
1834 run_pages += nrun_pages;
1836 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1837 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1841 /* Try to coalesce backward. */
1842 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
1843 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
1844 flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) ==
1846 size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
1848 size_t prun_pages = prun_size >> LG_PAGE;
1850 run_ind -= prun_pages;
1853 * Remove predecessor from runs_avail; the coalesced run is
1856 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1858 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
1859 assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
1861 arena_avail_remove(arena, chunk, run_ind, prun_pages);
1864 * If the predecessor is dirty, remove it from the set of dirty
1867 if (flag_dirty != 0) {
1868 arena_run_dirty_remove(arena, chunk, run_ind,
1873 run_pages += prun_pages;
1875 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1876 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1881 *p_run_ind = run_ind;
1882 *p_run_pages = run_pages;
1886 arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1891 assert(run_ind >= map_bias);
1892 assert(run_ind < chunk_npages);
1894 if (arena_mapbits_large_get(chunk, run_ind) != 0) {
1895 size = arena_mapbits_large_size_get(chunk, run_ind);
1896 assert(size == PAGE || arena_mapbits_large_size_get(chunk,
1897 run_ind+(size>>LG_PAGE)-1) == 0);
1899 arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
1900 size = bin_info->run_size;
1907 arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
1910 arena_chunk_t *chunk;
1911 arena_chunk_map_misc_t *miscelm;
1912 size_t size, run_ind, run_pages, flag_dirty, flag_decommitted;
1914 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1915 miscelm = arena_run_to_miscelm(run);
1916 run_ind = arena_miscelm_to_pageind(miscelm);
1917 assert(run_ind >= map_bias);
1918 assert(run_ind < chunk_npages);
1919 size = arena_run_size_get(arena, chunk, run, run_ind);
1920 run_pages = (size >> LG_PAGE);
1921 arena_nactive_sub(arena, run_pages);
1924 * The run is dirty if the caller claims to have dirtied it, as well as
1925 * if it was already dirty before being allocated and the caller
1926 * doesn't claim to have cleaned it.
1928 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1929 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
1930 if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind)
1933 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
1934 flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0;
1936 /* Mark pages as unallocated in the chunk map. */
1937 if (dirty || decommitted) {
1938 size_t flags = flag_dirty | flag_decommitted;
1939 arena_mapbits_unallocated_set(chunk, run_ind, size, flags);
1940 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1943 arena_mapbits_unallocated_set(chunk, run_ind, size,
1944 arena_mapbits_unzeroed_get(chunk, run_ind));
1945 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1946 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
1949 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
1950 flag_dirty, flag_decommitted);
1952 /* Insert into runs_avail, now that coalescing is complete. */
1953 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1954 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
1955 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1956 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
1957 assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
1958 arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1));
1959 arena_avail_insert(arena, chunk, run_ind, run_pages);
1962 arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
1964 /* Deallocate chunk if it is now completely unused. */
1965 if (size == arena_maxrun) {
1966 assert(run_ind == map_bias);
1967 assert(run_pages == (arena_maxrun >> LG_PAGE));
1968 arena_chunk_dalloc(arena, chunk);
1972 * It is okay to do dirty page processing here even if the chunk was
1973 * deallocated above, since in that case it is the spare. Waiting
1974 * until after possible chunk deallocation to do dirty processing
1975 * allows for an old spare to be fully deallocated, thus decreasing the
1976 * chances of spuriously crossing the dirty page purging threshold.
1979 arena_maybe_purge(arena);
1983 arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1984 size_t oldsize, size_t newsize)
1986 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
1987 size_t pageind = arena_miscelm_to_pageind(miscelm);
1988 size_t head_npages = (oldsize - newsize) >> LG_PAGE;
1989 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
1990 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
1991 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
1992 CHUNK_MAP_UNZEROED : 0;
1994 assert(oldsize > newsize);
1997 * Update the chunk map so that arena_run_dalloc() can treat the
1998 * leading run as separately allocated. Set the last element of each
1999 * run first, in case of single-page runs.
2001 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
2002 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
2003 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2004 pageind+head_npages-1)));
2005 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
2006 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
2009 UNUSED size_t tail_npages = newsize >> LG_PAGE;
2010 assert(arena_mapbits_large_size_get(chunk,
2011 pageind+head_npages+tail_npages-1) == 0);
2012 assert(arena_mapbits_dirty_get(chunk,
2013 pageind+head_npages+tail_npages-1) == flag_dirty);
2015 arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
2016 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2017 pageind+head_npages)));
2019 arena_run_dalloc(arena, run, false, false, (flag_decommitted != 0));
2023 arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2024 size_t oldsize, size_t newsize, bool dirty)
2026 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2027 size_t pageind = arena_miscelm_to_pageind(miscelm);
2028 size_t head_npages = newsize >> LG_PAGE;
2029 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
2030 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
2031 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
2032 CHUNK_MAP_UNZEROED : 0;
2033 arena_chunk_map_misc_t *tail_miscelm;
2034 arena_run_t *tail_run;
2036 assert(oldsize > newsize);
2039 * Update the chunk map so that arena_run_dalloc() can treat the
2040 * trailing run as separately allocated. Set the last element of each
2041 * run first, in case of single-page runs.
2043 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
2044 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
2045 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2046 pageind+head_npages-1)));
2047 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
2048 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
2051 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
2052 assert(arena_mapbits_large_size_get(chunk,
2053 pageind+head_npages+tail_npages-1) == 0);
2054 assert(arena_mapbits_dirty_get(chunk,
2055 pageind+head_npages+tail_npages-1) == flag_dirty);
2057 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
2058 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2059 pageind+head_npages)));
2061 tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages);
2062 tail_run = &tail_miscelm->run;
2063 arena_run_dalloc(arena, tail_run, dirty, false, (flag_decommitted !=
2067 static arena_run_t *
2068 arena_bin_runs_first(arena_bin_t *bin)
2070 arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs);
2071 if (miscelm != NULL)
2072 return (&miscelm->run);
2078 arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
2080 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2082 assert(arena_run_tree_search(&bin->runs, miscelm) == NULL);
2084 arena_run_tree_insert(&bin->runs, miscelm);
2088 arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
2090 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2092 assert(arena_run_tree_search(&bin->runs, miscelm) != NULL);
2094 arena_run_tree_remove(&bin->runs, miscelm);
2097 static arena_run_t *
2098 arena_bin_nonfull_run_tryget(arena_bin_t *bin)
2100 arena_run_t *run = arena_bin_runs_first(bin);
2102 arena_bin_runs_remove(bin, run);
2104 bin->stats.reruns++;
2109 static arena_run_t *
2110 arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
2114 arena_bin_info_t *bin_info;
2116 /* Look for a usable run. */
2117 run = arena_bin_nonfull_run_tryget(bin);
2120 /* No existing runs have any space available. */
2122 binind = arena_bin_index(arena, bin);
2123 bin_info = &arena_bin_info[binind];
2125 /* Allocate a new run. */
2126 malloc_mutex_unlock(&bin->lock);
2127 /******************************/
2128 malloc_mutex_lock(&arena->lock);
2129 run = arena_run_alloc_small(arena, bin_info->run_size, binind);
2131 /* Initialize run internals. */
2132 run->binind = binind;
2133 run->nfree = bin_info->nregs;
2134 bitmap_init(run->bitmap, &bin_info->bitmap_info);
2136 malloc_mutex_unlock(&arena->lock);
2137 /********************************/
2138 malloc_mutex_lock(&bin->lock);
2142 bin->stats.curruns++;
2148 * arena_run_alloc_small() failed, but another thread may have made
2149 * sufficient memory available while this one dropped bin->lock above,
2150 * so search one more time.
2152 run = arena_bin_nonfull_run_tryget(bin);
2159 /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
2161 arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
2164 arena_bin_info_t *bin_info;
2167 binind = arena_bin_index(arena, bin);
2168 bin_info = &arena_bin_info[binind];
2170 run = arena_bin_nonfull_run_get(arena, bin);
2171 if (bin->runcur != NULL && bin->runcur->nfree > 0) {
2173 * Another thread updated runcur while this one ran without the
2174 * bin lock in arena_bin_nonfull_run_get().
2177 assert(bin->runcur->nfree > 0);
2178 ret = arena_run_reg_alloc(bin->runcur, bin_info);
2180 arena_chunk_t *chunk;
2183 * arena_run_alloc_small() may have allocated run, or
2184 * it may have pulled run from the bin's run tree.
2185 * Therefore it is unsafe to make any assumptions about
2186 * how run has previously been used, and
2187 * arena_bin_lower_run() must be called, as if a region
2188 * were just deallocated from the run.
2190 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
2191 if (run->nfree == bin_info->nregs)
2192 arena_dalloc_bin_run(arena, chunk, run, bin);
2194 arena_bin_lower_run(arena, chunk, run, bin);
2204 assert(bin->runcur->nfree > 0);
2206 return (arena_run_reg_alloc(bin->runcur, bin_info));
2210 arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin,
2211 szind_t binind, uint64_t prof_accumbytes)
2216 assert(tbin->ncached == 0);
2218 if (config_prof && arena_prof_accum(arena, prof_accumbytes))
2220 bin = &arena->bins[binind];
2221 malloc_mutex_lock(&bin->lock);
2222 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
2223 tbin->lg_fill_div); i < nfill; i++) {
2226 if ((run = bin->runcur) != NULL && run->nfree > 0)
2227 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
2229 ptr = arena_bin_malloc_hard(arena, bin);
2232 * OOM. tbin->avail isn't yet filled down to its first
2233 * element, so the successful allocations (if any) must
2234 * be moved just before tbin->avail before bailing out.
2237 memmove(tbin->avail - i, tbin->avail - nfill,
2238 i * sizeof(void *));
2242 if (config_fill && unlikely(opt_junk_alloc)) {
2243 arena_alloc_junk_small(ptr, &arena_bin_info[binind],
2246 /* Insert such that low regions get used first. */
2247 *(tbin->avail - nfill + i) = ptr;
2250 bin->stats.nmalloc += i;
2251 bin->stats.nrequests += tbin->tstats.nrequests;
2252 bin->stats.curregs += i;
2253 bin->stats.nfills++;
2254 tbin->tstats.nrequests = 0;
2256 malloc_mutex_unlock(&bin->lock);
2258 arena_decay_tick(tsd, arena);
2262 arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
2266 size_t redzone_size = bin_info->redzone_size;
2267 memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
2269 memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
2272 memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
2273 bin_info->reg_interval);
2278 #undef arena_redzone_corruption
2279 #define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl)
2282 arena_redzone_corruption(void *ptr, size_t usize, bool after,
2283 size_t offset, uint8_t byte)
2286 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
2287 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
2288 after ? "after" : "before", ptr, usize, byte);
2291 #undef arena_redzone_corruption
2292 #define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
2293 arena_redzone_corruption_t *arena_redzone_corruption =
2294 JEMALLOC_N(arena_redzone_corruption_impl);
2298 arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
2302 if (opt_junk_alloc) {
2303 size_t size = bin_info->reg_size;
2304 size_t redzone_size = bin_info->redzone_size;
2307 for (i = 1; i <= redzone_size; i++) {
2308 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
2309 if (*byte != 0xa5) {
2311 arena_redzone_corruption(ptr, size, false, i,
2317 for (i = 0; i < redzone_size; i++) {
2318 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
2319 if (*byte != 0xa5) {
2321 arena_redzone_corruption(ptr, size, true, i,
2329 if (opt_abort && error)
2334 #undef arena_dalloc_junk_small
2335 #define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl)
2338 arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
2340 size_t redzone_size = bin_info->redzone_size;
2342 arena_redzones_validate(ptr, bin_info, false);
2343 memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
2344 bin_info->reg_interval);
2347 #undef arena_dalloc_junk_small
2348 #define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
2349 arena_dalloc_junk_small_t *arena_dalloc_junk_small =
2350 JEMALLOC_N(arena_dalloc_junk_small_impl);
2354 arena_quarantine_junk_small(void *ptr, size_t usize)
2357 arena_bin_info_t *bin_info;
2358 cassert(config_fill);
2359 assert(opt_junk_free);
2360 assert(opt_quarantine);
2361 assert(usize <= SMALL_MAXCLASS);
2363 binind = size2index(usize);
2364 bin_info = &arena_bin_info[binind];
2365 arena_redzones_validate(ptr, bin_info, true);
2369 arena_malloc_small(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
2376 assert(binind < NBINS);
2377 bin = &arena->bins[binind];
2378 usize = index2size(binind);
2380 malloc_mutex_lock(&bin->lock);
2381 if ((run = bin->runcur) != NULL && run->nfree > 0)
2382 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
2384 ret = arena_bin_malloc_hard(arena, bin);
2387 malloc_mutex_unlock(&bin->lock);
2392 bin->stats.nmalloc++;
2393 bin->stats.nrequests++;
2394 bin->stats.curregs++;
2396 malloc_mutex_unlock(&bin->lock);
2397 if (config_prof && !isthreaded && arena_prof_accum(arena, usize))
2402 if (unlikely(opt_junk_alloc)) {
2403 arena_alloc_junk_small(ret,
2404 &arena_bin_info[binind], false);
2405 } else if (unlikely(opt_zero))
2406 memset(ret, 0, usize);
2408 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
2410 if (config_fill && unlikely(opt_junk_alloc)) {
2411 arena_alloc_junk_small(ret, &arena_bin_info[binind],
2414 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
2415 memset(ret, 0, usize);
2418 arena_decay_tick(tsd, arena);
2423 arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
2427 uintptr_t random_offset;
2429 arena_chunk_map_misc_t *miscelm;
2432 /* Large allocation. */
2433 usize = index2size(binind);
2434 malloc_mutex_lock(&arena->lock);
2435 if (config_cache_oblivious) {
2439 * Compute a uniformly distributed offset within the first page
2440 * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
2441 * for 4 KiB pages and 64-byte cachelines.
2443 r = prng_lg_range(&arena->offset_state, LG_PAGE - LG_CACHELINE);
2444 random_offset = ((uintptr_t)r) << LG_CACHELINE;
2447 run = arena_run_alloc_large(arena, usize + large_pad, zero);
2449 malloc_mutex_unlock(&arena->lock);
2452 miscelm = arena_run_to_miscelm(run);
2453 ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
2456 szind_t index = binind - NBINS;
2458 arena->stats.nmalloc_large++;
2459 arena->stats.nrequests_large++;
2460 arena->stats.allocated_large += usize;
2461 arena->stats.lstats[index].nmalloc++;
2462 arena->stats.lstats[index].nrequests++;
2463 arena->stats.lstats[index].curruns++;
2466 idump = arena_prof_accum_locked(arena, usize);
2467 malloc_mutex_unlock(&arena->lock);
2468 if (config_prof && idump)
2473 if (unlikely(opt_junk_alloc))
2474 memset(ret, 0xa5, usize);
2475 else if (unlikely(opt_zero))
2476 memset(ret, 0, usize);
2480 arena_decay_tick(tsd, arena);
2485 arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
2486 bool zero, tcache_t *tcache)
2489 arena = arena_choose(tsd, arena);
2490 if (unlikely(arena == NULL))
2493 if (likely(size <= SMALL_MAXCLASS))
2494 return (arena_malloc_small(tsd, arena, ind, zero));
2495 if (likely(size <= large_maxclass))
2496 return (arena_malloc_large(tsd, arena, ind, zero));
2497 return (huge_malloc(tsd, arena, index2size(ind), zero, tcache));
2500 /* Only handles large allocations that require more than page alignment. */
2502 arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
2506 size_t alloc_size, leadsize, trailsize;
2508 arena_chunk_t *chunk;
2509 arena_chunk_map_misc_t *miscelm;
2512 assert(usize == PAGE_CEILING(usize));
2514 arena = arena_choose(tsd, arena);
2515 if (unlikely(arena == NULL))
2518 alignment = PAGE_CEILING(alignment);
2519 alloc_size = usize + large_pad + alignment - PAGE;
2521 malloc_mutex_lock(&arena->lock);
2522 run = arena_run_alloc_large(arena, alloc_size, false);
2524 malloc_mutex_unlock(&arena->lock);
2527 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
2528 miscelm = arena_run_to_miscelm(run);
2529 rpages = arena_miscelm_to_rpages(miscelm);
2531 leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) -
2533 assert(alloc_size >= leadsize + usize);
2534 trailsize = alloc_size - leadsize - usize - large_pad;
2535 if (leadsize != 0) {
2536 arena_chunk_map_misc_t *head_miscelm = miscelm;
2537 arena_run_t *head_run = run;
2539 miscelm = arena_miscelm_get(chunk,
2540 arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
2542 run = &miscelm->run;
2544 arena_run_trim_head(arena, chunk, head_run, alloc_size,
2545 alloc_size - leadsize);
2547 if (trailsize != 0) {
2548 arena_run_trim_tail(arena, chunk, run, usize + large_pad +
2549 trailsize, usize + large_pad, false);
2551 if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
2553 arena_miscelm_to_pageind(arena_run_to_miscelm(run));
2554 bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0);
2555 bool decommitted = (arena_mapbits_decommitted_get(chunk,
2558 assert(decommitted); /* Cause of OOM. */
2559 arena_run_dalloc(arena, run, dirty, false, decommitted);
2560 malloc_mutex_unlock(&arena->lock);
2563 ret = arena_miscelm_to_rpages(miscelm);
2566 szind_t index = size2index(usize) - NBINS;
2568 arena->stats.nmalloc_large++;
2569 arena->stats.nrequests_large++;
2570 arena->stats.allocated_large += usize;
2571 arena->stats.lstats[index].nmalloc++;
2572 arena->stats.lstats[index].nrequests++;
2573 arena->stats.lstats[index].curruns++;
2575 malloc_mutex_unlock(&arena->lock);
2577 if (config_fill && !zero) {
2578 if (unlikely(opt_junk_alloc))
2579 memset(ret, 0xa5, usize);
2580 else if (unlikely(opt_zero))
2581 memset(ret, 0, usize);
2583 arena_decay_tick(tsd, arena);
2588 arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
2589 bool zero, tcache_t *tcache)
2593 if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
2594 && (usize & PAGE_MASK) == 0))) {
2595 /* Small; alignment doesn't require special run placement. */
2596 ret = arena_malloc(tsd, arena, usize, size2index(usize), zero,
2598 } else if (usize <= large_maxclass && alignment <= PAGE) {
2600 * Large; alignment doesn't require special run placement.
2601 * However, the cached pointer may be at a random offset from
2602 * the base of the run, so do some bit manipulation to retrieve
2605 ret = arena_malloc(tsd, arena, usize, size2index(usize), zero,
2607 if (config_cache_oblivious)
2608 ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
2610 if (likely(usize <= large_maxclass)) {
2611 ret = arena_palloc_large(tsd, arena, usize, alignment,
2613 } else if (likely(alignment <= chunksize))
2614 ret = huge_malloc(tsd, arena, usize, zero, tcache);
2616 ret = huge_palloc(tsd, arena, usize, alignment, zero,
2624 arena_prof_promoted(const void *ptr, size_t size)
2626 arena_chunk_t *chunk;
2630 cassert(config_prof);
2631 assert(ptr != NULL);
2632 assert(CHUNK_ADDR2BASE(ptr) != ptr);
2633 assert(isalloc(ptr, false) == LARGE_MINCLASS);
2634 assert(isalloc(ptr, true) == LARGE_MINCLASS);
2635 assert(size <= SMALL_MAXCLASS);
2637 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
2638 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2639 binind = size2index(size);
2640 assert(binind < NBINS);
2641 arena_mapbits_large_binind_set(chunk, pageind, binind);
2643 assert(isalloc(ptr, false) == LARGE_MINCLASS);
2644 assert(isalloc(ptr, true) == size);
2648 arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
2652 /* Dissociate run from bin. */
2653 if (run == bin->runcur)
2656 szind_t binind = arena_bin_index(extent_node_arena_get(
2657 &chunk->node), bin);
2658 arena_bin_info_t *bin_info = &arena_bin_info[binind];
2660 if (bin_info->nregs != 1) {
2662 * This block's conditional is necessary because if the
2663 * run only contains one region, then it never gets
2664 * inserted into the non-full runs tree.
2666 arena_bin_runs_remove(bin, run);
2672 arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2676 assert(run != bin->runcur);
2677 assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) ==
2680 malloc_mutex_unlock(&bin->lock);
2681 /******************************/
2682 malloc_mutex_lock(&arena->lock);
2683 arena_run_dalloc(arena, run, true, false, false);
2684 malloc_mutex_unlock(&arena->lock);
2685 /****************************/
2686 malloc_mutex_lock(&bin->lock);
2688 bin->stats.curruns--;
2692 arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2697 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
2698 * non-full run. It is okay to NULL runcur out rather than proactively
2699 * keeping it pointing at the lowest non-full run.
2701 if ((uintptr_t)run < (uintptr_t)bin->runcur) {
2702 /* Switch runcur. */
2703 if (bin->runcur->nfree > 0)
2704 arena_bin_runs_insert(bin, bin->runcur);
2707 bin->stats.reruns++;
2709 arena_bin_runs_insert(bin, run);
2713 arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2714 arena_chunk_map_bits_t *bitselm, bool junked)
2716 size_t pageind, rpages_ind;
2719 arena_bin_info_t *bin_info;
2722 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2723 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
2724 run = &arena_miscelm_get(chunk, rpages_ind)->run;
2725 binind = run->binind;
2726 bin = &arena->bins[binind];
2727 bin_info = &arena_bin_info[binind];
2729 if (!junked && config_fill && unlikely(opt_junk_free))
2730 arena_dalloc_junk_small(ptr, bin_info);
2732 arena_run_reg_dalloc(run, ptr);
2733 if (run->nfree == bin_info->nregs) {
2734 arena_dissociate_bin_run(chunk, run, bin);
2735 arena_dalloc_bin_run(arena, chunk, run, bin);
2736 } else if (run->nfree == 1 && run != bin->runcur)
2737 arena_bin_lower_run(arena, chunk, run, bin);
2740 bin->stats.ndalloc++;
2741 bin->stats.curregs--;
2746 arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2747 arena_chunk_map_bits_t *bitselm)
2750 arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true);
2754 arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2755 size_t pageind, arena_chunk_map_bits_t *bitselm)
2761 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
2762 run = &arena_miscelm_get(chunk, rpages_ind)->run;
2763 bin = &arena->bins[run->binind];
2764 malloc_mutex_lock(&bin->lock);
2765 arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false);
2766 malloc_mutex_unlock(&bin->lock);
2770 arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr,
2773 arena_chunk_map_bits_t *bitselm;
2776 /* arena_ptr_small_binind_get() does extra sanity checking. */
2777 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
2778 pageind)) != BININD_INVALID);
2780 bitselm = arena_bitselm_get(chunk, pageind);
2781 arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm);
2782 arena_decay_tick(tsd, arena);
2786 #undef arena_dalloc_junk_large
2787 #define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
2790 arena_dalloc_junk_large(void *ptr, size_t usize)
2793 if (config_fill && unlikely(opt_junk_free))
2794 memset(ptr, 0x5a, usize);
2797 #undef arena_dalloc_junk_large
2798 #define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
2799 arena_dalloc_junk_large_t *arena_dalloc_junk_large =
2800 JEMALLOC_N(arena_dalloc_junk_large_impl);
2804 arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk,
2805 void *ptr, bool junked)
2807 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2808 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
2809 arena_run_t *run = &miscelm->run;
2811 if (config_fill || config_stats) {
2812 size_t usize = arena_mapbits_large_size_get(chunk, pageind) -
2816 arena_dalloc_junk_large(ptr, usize);
2818 szind_t index = size2index(usize) - NBINS;
2820 arena->stats.ndalloc_large++;
2821 arena->stats.allocated_large -= usize;
2822 arena->stats.lstats[index].ndalloc++;
2823 arena->stats.lstats[index].curruns--;
2827 arena_run_dalloc(arena, run, true, false, false);
2831 arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
2835 arena_dalloc_large_locked_impl(arena, chunk, ptr, true);
2839 arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr)
2842 malloc_mutex_lock(&arena->lock);
2843 arena_dalloc_large_locked_impl(arena, chunk, ptr, false);
2844 malloc_mutex_unlock(&arena->lock);
2845 arena_decay_tick(tsd, arena);
2849 arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2850 size_t oldsize, size_t size)
2852 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2853 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
2854 arena_run_t *run = &miscelm->run;
2856 assert(size < oldsize);
2859 * Shrink the run, and make trailing pages available for other
2862 malloc_mutex_lock(&arena->lock);
2863 arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size +
2866 szind_t oldindex = size2index(oldsize) - NBINS;
2867 szind_t index = size2index(size) - NBINS;
2869 arena->stats.ndalloc_large++;
2870 arena->stats.allocated_large -= oldsize;
2871 arena->stats.lstats[oldindex].ndalloc++;
2872 arena->stats.lstats[oldindex].curruns--;
2874 arena->stats.nmalloc_large++;
2875 arena->stats.nrequests_large++;
2876 arena->stats.allocated_large += size;
2877 arena->stats.lstats[index].nmalloc++;
2878 arena->stats.lstats[index].nrequests++;
2879 arena->stats.lstats[index].curruns++;
2881 malloc_mutex_unlock(&arena->lock);
2885 arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2886 size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
2888 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2889 size_t npages = (oldsize + large_pad) >> LG_PAGE;
2892 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
2895 /* Try to extend the run. */
2896 malloc_mutex_lock(&arena->lock);
2897 if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
2898 pageind+npages) != 0)
2900 followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages);
2901 if (oldsize + followsize >= usize_min) {
2903 * The next run is available and sufficiently large. Split the
2904 * following run, then merge the first part with the existing
2908 size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask;
2911 while (oldsize + followsize < usize)
2912 usize = index2size(size2index(usize)-1);
2913 assert(usize >= usize_min);
2914 assert(usize >= oldsize);
2915 splitsize = usize - oldsize;
2919 run = &arena_miscelm_get(chunk, pageind+npages)->run;
2920 if (arena_run_split_large(arena, run, splitsize, zero))
2923 if (config_cache_oblivious && zero) {
2925 * Zero the trailing bytes of the original allocation's
2926 * last page, since they are in an indeterminate state.
2927 * There will always be trailing bytes, because ptr's
2928 * offset from the beginning of the run is a multiple of
2929 * CACHELINE in [0 .. PAGE).
2931 void *zbase = (void *)((uintptr_t)ptr + oldsize);
2932 void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
2934 size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
2936 memset(zbase, 0, nzero);
2939 size = oldsize + splitsize;
2940 npages = (size + large_pad) >> LG_PAGE;
2943 * Mark the extended run as dirty if either portion of the run
2944 * was dirty before allocation. This is rather pedantic,
2945 * because there's not actually any sequence of events that
2946 * could cause the resulting run to be passed to
2947 * arena_run_dalloc() with the dirty argument set to false
2948 * (which is when dirty flag consistency would really matter).
2950 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
2951 arena_mapbits_dirty_get(chunk, pageind+npages-1);
2952 flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0;
2953 arena_mapbits_large_set(chunk, pageind, size + large_pad,
2954 flag_dirty | (flag_unzeroed_mask &
2955 arena_mapbits_unzeroed_get(chunk, pageind)));
2956 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty |
2957 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2958 pageind+npages-1)));
2961 szind_t oldindex = size2index(oldsize) - NBINS;
2962 szind_t index = size2index(size) - NBINS;
2964 arena->stats.ndalloc_large++;
2965 arena->stats.allocated_large -= oldsize;
2966 arena->stats.lstats[oldindex].ndalloc++;
2967 arena->stats.lstats[oldindex].curruns--;
2969 arena->stats.nmalloc_large++;
2970 arena->stats.nrequests_large++;
2971 arena->stats.allocated_large += size;
2972 arena->stats.lstats[index].nmalloc++;
2973 arena->stats.lstats[index].nrequests++;
2974 arena->stats.lstats[index].curruns++;
2976 malloc_mutex_unlock(&arena->lock);
2980 malloc_mutex_unlock(&arena->lock);
2985 #undef arena_ralloc_junk_large
2986 #define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
2989 arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
2992 if (config_fill && unlikely(opt_junk_free)) {
2993 memset((void *)((uintptr_t)ptr + usize), 0x5a,
2998 #undef arena_ralloc_junk_large
2999 #define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
3000 arena_ralloc_junk_large_t *arena_ralloc_junk_large =
3001 JEMALLOC_N(arena_ralloc_junk_large_impl);
3005 * Try to resize a large allocation, in order to avoid copying. This will
3006 * always fail if growing an object, and the following run is already in use.
3009 arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min,
3010 size_t usize_max, bool zero)
3012 arena_chunk_t *chunk;
3015 if (oldsize == usize_max) {
3016 /* Current size class is compatible and maximal. */
3020 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
3021 arena = extent_node_arena_get(&chunk->node);
3023 if (oldsize < usize_max) {
3024 bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize,
3025 usize_min, usize_max, zero);
3026 if (config_fill && !ret && !zero) {
3027 if (unlikely(opt_junk_alloc)) {
3028 memset((void *)((uintptr_t)ptr + oldsize), 0xa5,
3029 isalloc(ptr, config_prof) - oldsize);
3030 } else if (unlikely(opt_zero)) {
3031 memset((void *)((uintptr_t)ptr + oldsize), 0,
3032 isalloc(ptr, config_prof) - oldsize);
3038 assert(oldsize > usize_max);
3039 /* Fill before shrinking in order avoid a race. */
3040 arena_ralloc_junk_large(ptr, oldsize, usize_max);
3041 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max);
3046 arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
3047 size_t extra, bool zero)
3049 size_t usize_min, usize_max;
3051 /* Calls with non-zero extra had to clamp extra. */
3052 assert(extra == 0 || size + extra <= HUGE_MAXCLASS);
3054 if (unlikely(size > HUGE_MAXCLASS))
3057 usize_min = s2u(size);
3058 usize_max = s2u(size + extra);
3059 if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
3060 arena_chunk_t *chunk;
3063 * Avoid moving the allocation if the size class can be left the
3066 if (oldsize <= SMALL_MAXCLASS) {
3067 assert(arena_bin_info[size2index(oldsize)].reg_size ==
3069 if ((usize_max > SMALL_MAXCLASS ||
3070 size2index(usize_max) != size2index(oldsize)) &&
3071 (size > oldsize || usize_max < oldsize))
3074 if (usize_max <= SMALL_MAXCLASS)
3076 if (arena_ralloc_large(ptr, oldsize, usize_min,
3081 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
3082 arena_decay_tick(tsd, extent_node_arena_get(&chunk->node));
3085 return (huge_ralloc_no_move(tsd, ptr, oldsize, usize_min,
3091 arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
3092 size_t alignment, bool zero, tcache_t *tcache)
3096 return (arena_malloc(tsd, arena, usize, size2index(usize), zero,
3098 usize = sa2u(usize, alignment);
3099 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
3101 return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
3105 arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
3106 size_t alignment, bool zero, tcache_t *tcache)
3112 if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
3115 if (likely(usize <= large_maxclass)) {
3118 /* Try to avoid moving the allocation. */
3119 if (!arena_ralloc_no_move(tsd, ptr, oldsize, usize, 0, zero))
3123 * size and oldsize are different enough that we need to move
3124 * the object. In that case, fall back to allocating new space
3127 ret = arena_ralloc_move_helper(tsd, arena, usize, alignment,
3133 * Junk/zero-filling were already done by
3134 * ipalloc()/arena_malloc().
3137 copysize = (usize < oldsize) ? usize : oldsize;
3138 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
3139 memcpy(ret, ptr, copysize);
3140 isqalloc(tsd, ptr, oldsize, tcache);
3142 ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
3149 arena_dss_prec_get(arena_t *arena)
3153 malloc_mutex_lock(&arena->lock);
3154 ret = arena->dss_prec;
3155 malloc_mutex_unlock(&arena->lock);
3160 arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
3164 return (dss_prec != dss_prec_disabled);
3165 malloc_mutex_lock(&arena->lock);
3166 arena->dss_prec = dss_prec;
3167 malloc_mutex_unlock(&arena->lock);
3172 arena_lg_dirty_mult_default_get(void)
3175 return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default));
3179 arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
3182 if (opt_purge != purge_mode_ratio)
3184 if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
3186 atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
3191 arena_decay_time_default_get(void)
3194 return ((ssize_t)atomic_read_z((size_t *)&decay_time_default));
3198 arena_decay_time_default_set(ssize_t decay_time)
3201 if (opt_purge != purge_mode_decay)
3203 if (!arena_decay_time_valid(decay_time))
3205 atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time);
3210 arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
3211 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3212 size_t *nactive, size_t *ndirty)
3215 *nthreads += arena_nthreads_get(arena);
3216 *dss = dss_prec_names[arena->dss_prec];
3217 *lg_dirty_mult = arena->lg_dirty_mult;
3218 *decay_time = arena->decay_time;
3219 *nactive += arena->nactive;
3220 *ndirty += arena->ndirty;
3224 arena_basic_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss,
3225 ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive,
3229 malloc_mutex_lock(&arena->lock);
3230 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3231 decay_time, nactive, ndirty);
3232 malloc_mutex_unlock(&arena->lock);
3236 arena_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss,
3237 ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive,
3238 size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
3239 malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats)
3243 cassert(config_stats);
3245 malloc_mutex_lock(&arena->lock);
3246 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3247 decay_time, nactive, ndirty);
3249 astats->mapped += arena->stats.mapped;
3250 astats->npurge += arena->stats.npurge;
3251 astats->nmadvise += arena->stats.nmadvise;
3252 astats->purged += arena->stats.purged;
3253 astats->metadata_mapped += arena->stats.metadata_mapped;
3254 astats->metadata_allocated += arena_metadata_allocated_get(arena);
3255 astats->allocated_large += arena->stats.allocated_large;
3256 astats->nmalloc_large += arena->stats.nmalloc_large;
3257 astats->ndalloc_large += arena->stats.ndalloc_large;
3258 astats->nrequests_large += arena->stats.nrequests_large;
3259 astats->allocated_huge += arena->stats.allocated_huge;
3260 astats->nmalloc_huge += arena->stats.nmalloc_huge;
3261 astats->ndalloc_huge += arena->stats.ndalloc_huge;
3263 for (i = 0; i < nlclasses; i++) {
3264 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
3265 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
3266 lstats[i].nrequests += arena->stats.lstats[i].nrequests;
3267 lstats[i].curruns += arena->stats.lstats[i].curruns;
3270 for (i = 0; i < nhclasses; i++) {
3271 hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
3272 hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
3273 hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
3275 malloc_mutex_unlock(&arena->lock);
3277 for (i = 0; i < NBINS; i++) {
3278 arena_bin_t *bin = &arena->bins[i];
3280 malloc_mutex_lock(&bin->lock);
3281 bstats[i].nmalloc += bin->stats.nmalloc;
3282 bstats[i].ndalloc += bin->stats.ndalloc;
3283 bstats[i].nrequests += bin->stats.nrequests;
3284 bstats[i].curregs += bin->stats.curregs;
3285 if (config_tcache) {
3286 bstats[i].nfills += bin->stats.nfills;
3287 bstats[i].nflushes += bin->stats.nflushes;
3289 bstats[i].nruns += bin->stats.nruns;
3290 bstats[i].reruns += bin->stats.reruns;
3291 bstats[i].curruns += bin->stats.curruns;
3292 malloc_mutex_unlock(&bin->lock);
3297 arena_nthreads_get(arena_t *arena)
3300 return (atomic_read_u(&arena->nthreads));
3304 arena_nthreads_inc(arena_t *arena)
3307 atomic_add_u(&arena->nthreads, 1);
3311 arena_nthreads_dec(arena_t *arena)
3314 atomic_sub_u(&arena->nthreads, 1);
3318 arena_new(unsigned ind)
3325 /* Compute arena size to incorporate sufficient runs_avail elements. */
3326 arena_size = offsetof(arena_t, runs_avail) + (sizeof(arena_run_tree_t) *
3327 runs_avail_nclasses);
3329 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
3330 * because there is no way to clean up if base_alloc() OOMs.
3333 arena = (arena_t *)base_alloc(CACHELINE_CEILING(arena_size) +
3334 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) +
3335 nhclasses) * sizeof(malloc_huge_stats_t));
3337 arena = (arena_t *)base_alloc(arena_size);
3342 arena->nthreads = 0;
3343 if (malloc_mutex_init(&arena->lock))
3347 memset(&arena->stats, 0, sizeof(arena_stats_t));
3348 arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
3349 + CACHELINE_CEILING(arena_size));
3350 memset(arena->stats.lstats, 0, nlclasses *
3351 sizeof(malloc_large_stats_t));
3352 arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
3353 + CACHELINE_CEILING(arena_size) +
3354 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
3355 memset(arena->stats.hstats, 0, nhclasses *
3356 sizeof(malloc_huge_stats_t));
3358 ql_new(&arena->tcache_ql);
3362 arena->prof_accumbytes = 0;
3364 if (config_cache_oblivious) {
3366 * A nondeterministic seed based on the address of arena reduces
3367 * the likelihood of lockstep non-uniform cache index
3368 * utilization among identical concurrent processes, but at the
3369 * cost of test repeatability. For debug builds, instead use a
3370 * deterministic seed.
3372 arena->offset_state = config_debug ? ind :
3373 (uint64_t)(uintptr_t)arena;
3376 arena->dss_prec = chunk_dss_prec_get();
3378 arena->spare = NULL;
3380 arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
3381 arena->purging = false;
3385 for(i = 0; i < runs_avail_nclasses; i++)
3386 arena_run_tree_new(&arena->runs_avail[i]);
3387 qr_new(&arena->runs_dirty, rd_link);
3388 qr_new(&arena->chunks_cache, cc_link);
3390 if (opt_purge == purge_mode_decay)
3391 arena_decay_init(arena, arena_decay_time_default_get());
3393 ql_new(&arena->huge);
3394 if (malloc_mutex_init(&arena->huge_mtx))
3397 extent_tree_szad_new(&arena->chunks_szad_cached);
3398 extent_tree_ad_new(&arena->chunks_ad_cached);
3399 extent_tree_szad_new(&arena->chunks_szad_retained);
3400 extent_tree_ad_new(&arena->chunks_ad_retained);
3401 if (malloc_mutex_init(&arena->chunks_mtx))
3403 ql_new(&arena->node_cache);
3404 if (malloc_mutex_init(&arena->node_cache_mtx))
3407 arena->chunk_hooks = chunk_hooks_default;
3409 /* Initialize bins. */
3410 for (i = 0; i < NBINS; i++) {
3411 bin = &arena->bins[i];
3412 if (malloc_mutex_init(&bin->lock))
3415 arena_run_tree_new(&bin->runs);
3417 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
3424 * Calculate bin_info->run_size such that it meets the following constraints:
3426 * *) bin_info->run_size <= arena_maxrun
3427 * *) bin_info->nregs <= RUN_MAXREGS
3429 * bin_info->nregs and bin_info->reg0_offset are also calculated here, since
3430 * these settings are all interdependent.
3433 bin_info_run_size_calc(arena_bin_info_t *bin_info)
3436 size_t try_run_size, perfect_run_size, actual_run_size;
3437 uint32_t try_nregs, perfect_nregs, actual_nregs;
3440 * Determine redzone size based on minimum alignment and minimum
3441 * redzone size. Add padding to the end of the run if it is needed to
3442 * align the regions. The padding allows each redzone to be half the
3443 * minimum alignment; without the padding, each redzone would have to
3444 * be twice as large in order to maintain alignment.
3446 if (config_fill && unlikely(opt_redzone)) {
3447 size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1);
3448 if (align_min <= REDZONE_MINSIZE) {
3449 bin_info->redzone_size = REDZONE_MINSIZE;
3452 bin_info->redzone_size = align_min >> 1;
3453 pad_size = bin_info->redzone_size;
3456 bin_info->redzone_size = 0;
3459 bin_info->reg_interval = bin_info->reg_size +
3460 (bin_info->redzone_size << 1);
3463 * Compute run size under ideal conditions (no redzones, no limit on run
3466 try_run_size = PAGE;
3467 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
3469 perfect_run_size = try_run_size;
3470 perfect_nregs = try_nregs;
3472 try_run_size += PAGE;
3473 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
3474 } while (perfect_run_size != perfect_nregs * bin_info->reg_size);
3475 assert(perfect_nregs <= RUN_MAXREGS);
3477 actual_run_size = perfect_run_size;
3478 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3479 bin_info->reg_interval);
3482 * Redzones can require enough padding that not even a single region can
3483 * fit within the number of pages that would normally be dedicated to a
3484 * run for this size class. Increase the run size until at least one
3487 while (actual_nregs == 0) {
3488 assert(config_fill && unlikely(opt_redzone));
3490 actual_run_size += PAGE;
3491 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3492 bin_info->reg_interval);
3496 * Make sure that the run will fit within an arena chunk.
3498 while (actual_run_size > arena_maxrun) {
3499 actual_run_size -= PAGE;
3500 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3501 bin_info->reg_interval);
3503 assert(actual_nregs > 0);
3504 assert(actual_run_size == s2u(actual_run_size));
3506 /* Copy final settings. */
3507 bin_info->run_size = actual_run_size;
3508 bin_info->nregs = actual_nregs;
3509 bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs *
3510 bin_info->reg_interval) - pad_size + bin_info->redzone_size);
3512 if (actual_run_size > small_maxrun)
3513 small_maxrun = actual_run_size;
3515 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
3516 * bin_info->reg_interval) + pad_size == bin_info->run_size);
3522 arena_bin_info_t *bin_info;
3524 #define BIN_INFO_INIT_bin_yes(index, size) \
3525 bin_info = &arena_bin_info[index]; \
3526 bin_info->reg_size = size; \
3527 bin_info_run_size_calc(bin_info); \
3528 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
3529 #define BIN_INFO_INIT_bin_no(index, size)
3530 #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
3531 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
3533 #undef BIN_INFO_INIT_bin_yes
3534 #undef BIN_INFO_INIT_bin_no
3539 small_run_size_init(void)
3542 assert(small_maxrun != 0);
3544 small_run_tab = (bool *)base_alloc(sizeof(bool) * (small_maxrun >>
3546 if (small_run_tab == NULL)
3549 #define TAB_INIT_bin_yes(index, size) { \
3550 arena_bin_info_t *bin_info = &arena_bin_info[index]; \
3551 small_run_tab[bin_info->run_size >> LG_PAGE] = true; \
3553 #define TAB_INIT_bin_no(index, size)
3554 #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
3555 TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
3557 #undef TAB_INIT_bin_yes
3558 #undef TAB_INIT_bin_no
3565 run_quantize_init(void)
3569 run_quantize_max = chunksize + large_pad;
3571 run_quantize_floor_tab = (size_t *)base_alloc(sizeof(size_t) *
3572 (run_quantize_max >> LG_PAGE));
3573 if (run_quantize_floor_tab == NULL)
3576 run_quantize_ceil_tab = (size_t *)base_alloc(sizeof(size_t) *
3577 (run_quantize_max >> LG_PAGE));
3578 if (run_quantize_ceil_tab == NULL)
3581 for (i = 1; i <= run_quantize_max >> LG_PAGE; i++) {
3582 size_t run_size = i << LG_PAGE;
3584 run_quantize_floor_tab[i-1] =
3585 run_quantize_floor_compute(run_size);
3586 run_quantize_ceil_tab[i-1] =
3587 run_quantize_ceil_compute(run_size);
3598 arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
3599 arena_decay_time_default_set(opt_decay_time);
3602 * Compute the header size such that it is large enough to contain the
3603 * page map. The page map is biased to omit entries for the header
3604 * itself, so some iteration is necessary to compute the map bias.
3606 * 1) Compute safe header_size and map_bias values that include enough
3607 * space for an unbiased page map.
3608 * 2) Refine map_bias based on (1) to omit the header pages in the page
3609 * map. The resulting map_bias may be one too small.
3610 * 3) Refine map_bias based on (2). The result will be >= the result
3611 * from (2), and will always be correct.
3614 for (i = 0; i < 3; i++) {
3615 size_t header_size = offsetof(arena_chunk_t, map_bits) +
3616 ((sizeof(arena_chunk_map_bits_t) +
3617 sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
3618 map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
3620 assert(map_bias > 0);
3622 map_misc_offset = offsetof(arena_chunk_t, map_bits) +
3623 sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
3625 arena_maxrun = chunksize - (map_bias << LG_PAGE);
3626 assert(arena_maxrun > 0);
3627 large_maxclass = index2size(size2index(chunksize)-1);
3628 if (large_maxclass > arena_maxrun) {
3630 * For small chunk sizes it's possible for there to be fewer
3631 * non-header pages available than are necessary to serve the
3632 * size classes just below chunksize.
3634 large_maxclass = arena_maxrun;
3636 assert(large_maxclass > 0);
3637 nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
3638 nhclasses = NSIZES - nlclasses - NBINS;
3641 if (small_run_size_init())
3643 if (run_quantize_init())
3646 runs_avail_bias = size2index(PAGE);
3647 runs_avail_nclasses = size2index(run_quantize_max)+1 - runs_avail_bias;
3653 arena_prefork(arena_t *arena)
3657 malloc_mutex_prefork(&arena->lock);
3658 malloc_mutex_prefork(&arena->huge_mtx);
3659 malloc_mutex_prefork(&arena->chunks_mtx);
3660 malloc_mutex_prefork(&arena->node_cache_mtx);
3661 for (i = 0; i < NBINS; i++)
3662 malloc_mutex_prefork(&arena->bins[i].lock);
3666 arena_postfork_parent(arena_t *arena)
3670 for (i = 0; i < NBINS; i++)
3671 malloc_mutex_postfork_parent(&arena->bins[i].lock);
3672 malloc_mutex_postfork_parent(&arena->node_cache_mtx);
3673 malloc_mutex_postfork_parent(&arena->chunks_mtx);
3674 malloc_mutex_postfork_parent(&arena->huge_mtx);
3675 malloc_mutex_postfork_parent(&arena->lock);
3679 arena_postfork_child(arena_t *arena)
3683 for (i = 0; i < NBINS; i++)
3684 malloc_mutex_postfork_child(&arena->bins[i].lock);
3685 malloc_mutex_postfork_child(&arena->node_cache_mtx);
3686 malloc_mutex_postfork_child(&arena->chunks_mtx);
3687 malloc_mutex_postfork_child(&arena->huge_mtx);
3688 malloc_mutex_postfork_child(&arena->lock);