1 #define JEMALLOC_ARENA_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
8 arena_bin_info_t arena_bin_info[NBINS];
10 JEMALLOC_ALIGNED(CACHELINE)
11 const uint8_t small_size2bin[] = {
13 #define S2B_16(i) S2B_8(i) S2B_8(i)
14 #define S2B_32(i) S2B_16(i) S2B_16(i)
15 #define S2B_64(i) S2B_32(i) S2B_32(i)
16 #define S2B_128(i) S2B_64(i) S2B_64(i)
17 #define S2B_256(i) S2B_128(i) S2B_128(i)
18 #define S2B_512(i) S2B_256(i) S2B_256(i)
19 #define S2B_1024(i) S2B_512(i) S2B_512(i)
20 #define S2B_2048(i) S2B_1024(i) S2B_1024(i)
21 #define S2B_4096(i) S2B_2048(i) S2B_2048(i)
22 #define S2B_8192(i) S2B_4096(i) S2B_4096(i)
23 #define SIZE_CLASS(bin, delta, size) \
40 /******************************************************************************/
41 /* Function prototypes for non-inline static functions. */
43 static void arena_avail_insert(arena_t *arena, arena_chunk_t *chunk,
44 size_t pageind, size_t npages, bool maybe_adjac_pred,
45 bool maybe_adjac_succ);
46 static void arena_avail_remove(arena_t *arena, arena_chunk_t *chunk,
47 size_t pageind, size_t npages, bool maybe_adjac_pred,
48 bool maybe_adjac_succ);
49 static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
50 bool large, size_t binind, bool zero);
51 static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
52 static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
53 static arena_run_t *arena_run_alloc_helper(arena_t *arena, size_t size,
54 bool large, size_t binind, bool zero);
55 static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
56 size_t binind, bool zero);
57 static arena_chunk_t *chunks_dirty_iter_cb(arena_chunk_tree_t *tree,
58 arena_chunk_t *chunk, void *arg);
59 static void arena_purge(arena_t *arena, bool all);
60 static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
62 static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
63 arena_run_t *run, size_t oldsize, size_t newsize);
64 static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
65 arena_run_t *run, size_t oldsize, size_t newsize, bool dirty);
66 static arena_run_t *arena_bin_runs_first(arena_bin_t *bin);
67 static void arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run);
68 static void arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run);
69 static arena_run_t *arena_bin_nonfull_run_tryget(arena_bin_t *bin);
70 static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
71 static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
72 static void arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
74 static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
75 arena_run_t *run, arena_bin_t *bin);
76 static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
77 arena_run_t *run, arena_bin_t *bin);
78 static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk,
79 void *ptr, size_t oldsize, size_t size);
80 static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
81 void *ptr, size_t oldsize, size_t size, size_t extra, bool zero);
82 static bool arena_ralloc_large(void *ptr, size_t oldsize, size_t size,
83 size_t extra, bool zero);
84 static size_t bin_info_run_size_calc(arena_bin_info_t *bin_info,
86 static void bin_info_init(void);
88 /******************************************************************************/
91 arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
93 uintptr_t a_mapelm = (uintptr_t)a;
94 uintptr_t b_mapelm = (uintptr_t)b;
99 return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
102 /* Generate red-black tree functions. */
103 rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t,
104 u.rb_link, arena_run_comp)
107 arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
110 size_t a_size = a->bits & ~PAGE_MASK;
111 size_t b_size = b->bits & ~PAGE_MASK;
113 ret = (a_size > b_size) - (a_size < b_size);
115 uintptr_t a_mapelm, b_mapelm;
117 if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY)
118 a_mapelm = (uintptr_t)a;
121 * Treat keys as though they are lower than anything
126 b_mapelm = (uintptr_t)b;
128 ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
134 /* Generate red-black tree functions. */
135 rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
136 u.rb_link, arena_avail_comp)
139 arena_chunk_dirty_comp(arena_chunk_t *a, arena_chunk_t *b)
146 * Short-circuit for self comparison. The following comparison code
147 * would come to the same result, but at the cost of executing the slow
154 * Order such that chunks with higher fragmentation are "less than"
155 * those with lower fragmentation -- purging order is from "least" to
156 * "greatest". Fragmentation is measured as:
158 * mean current avail run size
159 * --------------------------------
160 * mean defragmented avail run size
164 * nruns_avail nruns_avail-nruns_adjac
165 * = ========================= = -----------------------
167 * -----------------------
168 * nruns_avail-nruns_adjac
170 * The following code multiplies away the denominator prior to
171 * comparison, in order to avoid division.
175 size_t a_val = (a->nruns_avail - a->nruns_adjac) *
177 size_t b_val = (b->nruns_avail - b->nruns_adjac) *
186 * Break ties by chunk address. For fragmented chunks, report lower
187 * addresses as "lower", so that fragmentation reduction happens first
188 * at lower addresses. However, use the opposite ordering for
189 * unfragmented chunks, in order to increase the chances of
190 * re-allocating dirty runs.
193 uintptr_t a_chunk = (uintptr_t)a;
194 uintptr_t b_chunk = (uintptr_t)b;
195 int ret = ((a_chunk > b_chunk) - (a_chunk < b_chunk));
196 if (a->nruns_adjac == 0) {
197 assert(b->nruns_adjac == 0);
204 /* Generate red-black tree functions. */
205 rb_gen(static UNUSED, arena_chunk_dirty_, arena_chunk_tree_t, arena_chunk_t,
206 dirty_link, arena_chunk_dirty_comp)
209 arena_avail_adjac_pred(arena_chunk_t *chunk, size_t pageind)
213 if (pageind-1 < map_bias)
216 ret = (arena_mapbits_allocated_get(chunk, pageind-1) == 0);
217 assert(ret == false || arena_mapbits_dirty_get(chunk,
218 pageind-1) != arena_mapbits_dirty_get(chunk, pageind));
224 arena_avail_adjac_succ(arena_chunk_t *chunk, size_t pageind, size_t npages)
228 if (pageind+npages == chunk_npages)
231 assert(pageind+npages < chunk_npages);
232 ret = (arena_mapbits_allocated_get(chunk, pageind+npages) == 0);
233 assert(ret == false || arena_mapbits_dirty_get(chunk, pageind)
234 != arena_mapbits_dirty_get(chunk, pageind+npages));
240 arena_avail_adjac(arena_chunk_t *chunk, size_t pageind, size_t npages)
243 return (arena_avail_adjac_pred(chunk, pageind) ||
244 arena_avail_adjac_succ(chunk, pageind, npages));
248 arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
249 size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
252 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
256 * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
257 * removed and reinserted even if the run to be inserted is clean.
259 if (chunk->ndirty != 0)
260 arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
262 if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
263 chunk->nruns_adjac++;
264 if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
265 chunk->nruns_adjac++;
266 chunk->nruns_avail++;
267 assert(chunk->nruns_avail > chunk->nruns_adjac);
269 if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
270 arena->ndirty += npages;
271 chunk->ndirty += npages;
273 if (chunk->ndirty != 0)
274 arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
276 arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk,
281 arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
282 size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
285 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
289 * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
290 * removed and reinserted even if the run to be removed is clean.
292 if (chunk->ndirty != 0)
293 arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
295 if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
296 chunk->nruns_adjac--;
297 if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
298 chunk->nruns_adjac--;
299 chunk->nruns_avail--;
300 assert(chunk->nruns_avail > chunk->nruns_adjac || (chunk->nruns_avail
301 == 0 && chunk->nruns_adjac == 0));
303 if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
304 arena->ndirty -= npages;
305 chunk->ndirty -= npages;
307 if (chunk->ndirty != 0)
308 arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
310 arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk,
315 arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
319 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
320 (uintptr_t)bin_info->bitmap_offset);
322 assert(run->nfree > 0);
323 assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false);
325 regind = bitmap_sfu(bitmap, &bin_info->bitmap_info);
326 ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset +
327 (uintptr_t)(bin_info->reg_interval * regind));
329 if (regind == run->nextind)
331 assert(regind < run->nextind);
336 arena_run_reg_dalloc(arena_run_t *run, void *ptr)
338 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
339 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
340 size_t mapbits = arena_mapbits_get(chunk, pageind);
341 size_t binind = arena_ptr_small_binind_get(ptr, mapbits);
342 arena_bin_info_t *bin_info = &arena_bin_info[binind];
343 unsigned regind = arena_run_regind(run, bin_info, ptr);
344 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
345 (uintptr_t)bin_info->bitmap_offset);
347 assert(run->nfree < bin_info->nregs);
348 /* Freeing an interior pointer can cause assertion failure. */
349 assert(((uintptr_t)ptr - ((uintptr_t)run +
350 (uintptr_t)bin_info->reg0_offset)) %
351 (uintptr_t)bin_info->reg_interval == 0);
352 assert((uintptr_t)ptr >= (uintptr_t)run +
353 (uintptr_t)bin_info->reg0_offset);
354 /* Freeing an unallocated pointer can cause assertion failure. */
355 assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind));
357 bitmap_unset(bitmap, &bin_info->bitmap_info, regind);
362 arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
365 VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
366 LG_PAGE)), (npages << LG_PAGE));
367 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
368 (npages << LG_PAGE));
372 arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
375 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
377 VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind <<
379 for (i = 0; i < PAGE / sizeof(size_t); i++)
384 arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
385 size_t binind, bool zero)
387 arena_chunk_t *chunk;
388 size_t run_ind, total_pages, need_pages, rem_pages, i;
391 assert((large && binind == BININD_INVALID) || (large == false && binind
394 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
395 run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
396 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
397 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
399 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
401 need_pages = (size >> LG_PAGE);
402 assert(need_pages > 0);
403 assert(need_pages <= total_pages);
404 rem_pages = total_pages - need_pages;
406 arena_avail_remove(arena, chunk, run_ind, total_pages, true, true);
409 * Update stats_cactive if nactive is crossing a chunk
412 size_t cactive_diff = CHUNK_CEILING((arena->nactive +
413 need_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
415 if (cactive_diff != 0)
416 stats_cactive_add(cactive_diff);
418 arena->nactive += need_pages;
420 /* Keep track of trailing unused pages for later use. */
422 if (flag_dirty != 0) {
423 arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
424 (rem_pages << LG_PAGE), CHUNK_MAP_DIRTY);
425 arena_mapbits_unallocated_set(chunk,
426 run_ind+total_pages-1, (rem_pages << LG_PAGE),
429 arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
430 (rem_pages << LG_PAGE),
431 arena_mapbits_unzeroed_get(chunk,
432 run_ind+need_pages));
433 arena_mapbits_unallocated_set(chunk,
434 run_ind+total_pages-1, (rem_pages << LG_PAGE),
435 arena_mapbits_unzeroed_get(chunk,
436 run_ind+total_pages-1));
438 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages,
443 * Update the page map separately for large vs. small runs, since it is
444 * possible to avoid iteration for large mallocs.
448 if (flag_dirty == 0) {
450 * The run is clean, so some pages may be
451 * zeroed (i.e. never before touched).
453 for (i = 0; i < need_pages; i++) {
454 if (arena_mapbits_unzeroed_get(chunk,
456 arena_run_zero(chunk, run_ind+i,
458 } else if (config_debug) {
459 arena_run_page_validate_zeroed(
465 * The run is dirty, so all pages must be
468 arena_run_zero(chunk, run_ind, need_pages);
473 * Set the last element first, in case the run only contains one
474 * page (i.e. both statements set the same element).
476 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0,
478 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
480 assert(zero == false);
482 * Propagate the dirty and unzeroed flags to the allocated
483 * small run, so that arena_dalloc_bin_run() has the ability to
484 * conditionally trim clean pages.
486 arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty);
488 * The first page will always be dirtied during small run
489 * initialization, so a validation failure here would not
490 * actually cause an observable failure.
492 if (config_debug && flag_dirty == 0 &&
493 arena_mapbits_unzeroed_get(chunk, run_ind) == 0)
494 arena_run_page_validate_zeroed(chunk, run_ind);
495 for (i = 1; i < need_pages - 1; i++) {
496 arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
497 if (config_debug && flag_dirty == 0 &&
498 arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0) {
499 arena_run_page_validate_zeroed(chunk,
503 arena_mapbits_small_set(chunk, run_ind+need_pages-1,
504 need_pages-1, binind, flag_dirty);
505 if (config_debug && flag_dirty == 0 &&
506 arena_mapbits_unzeroed_get(chunk, run_ind+need_pages-1) ==
508 arena_run_page_validate_zeroed(chunk,
509 run_ind+need_pages-1);
512 VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
513 LG_PAGE)), (need_pages << LG_PAGE));
516 static arena_chunk_t *
517 arena_chunk_alloc(arena_t *arena)
519 arena_chunk_t *chunk;
522 if (arena->spare != NULL) {
523 chunk = arena->spare;
526 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
527 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
528 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
530 assert(arena_mapbits_unallocated_size_get(chunk,
531 chunk_npages-1) == arena_maxclass);
532 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
533 arena_mapbits_dirty_get(chunk, chunk_npages-1));
539 malloc_mutex_unlock(&arena->lock);
540 chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize,
541 false, &zero, arena->dss_prec);
542 malloc_mutex_lock(&arena->lock);
546 arena->stats.mapped += chunksize;
548 chunk->arena = arena;
551 * Claim that no pages are in use, since the header is merely
556 chunk->nruns_avail = 0;
557 chunk->nruns_adjac = 0;
560 * Initialize the map to contain one maximal free untouched run.
561 * Mark the pages as zeroed iff chunk_alloc() returned a zeroed
564 unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
565 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
568 * There is no need to initialize the internal page map entries
569 * unless the chunk is not zeroed.
572 for (i = map_bias+1; i < chunk_npages-1; i++)
573 arena_mapbits_unzeroed_set(chunk, i, unzeroed);
574 } else if (config_debug) {
575 VALGRIND_MAKE_MEM_DEFINED(
576 (void *)arena_mapp_get(chunk, map_bias+1),
578 arena_mapp_get(chunk, chunk_npages-1)
579 - (uintptr_t)arena_mapp_get(chunk, map_bias+1)));
580 for (i = map_bias+1; i < chunk_npages-1; i++) {
581 assert(arena_mapbits_unzeroed_get(chunk, i) ==
585 arena_mapbits_unallocated_set(chunk, chunk_npages-1,
586 arena_maxclass, unzeroed);
589 /* Insert the run into the runs_avail tree. */
590 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias,
597 arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
599 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
600 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
601 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
603 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
605 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
606 arena_mapbits_dirty_get(chunk, chunk_npages-1));
609 * Remove run from the runs_avail tree, so that the arena does not use
612 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias,
615 if (arena->spare != NULL) {
616 arena_chunk_t *spare = arena->spare;
618 arena->spare = chunk;
619 malloc_mutex_unlock(&arena->lock);
620 chunk_dealloc((void *)spare, chunksize, true);
621 malloc_mutex_lock(&arena->lock);
623 arena->stats.mapped -= chunksize;
625 arena->spare = chunk;
629 arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind,
633 arena_chunk_map_t *mapelm, key;
635 key.bits = size | CHUNK_MAP_KEY;
636 mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
637 if (mapelm != NULL) {
638 arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
639 size_t pageind = (((uintptr_t)mapelm -
640 (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
643 run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
645 arena_run_split(arena, run, size, large, binind, zero);
653 arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
656 arena_chunk_t *chunk;
659 assert(size <= arena_maxclass);
660 assert((size & PAGE_MASK) == 0);
661 assert((large && binind == BININD_INVALID) || (large == false && binind
664 /* Search the arena's chunks for the lowest best fit. */
665 run = arena_run_alloc_helper(arena, size, large, binind, zero);
670 * No usable runs. Create a new chunk from which to allocate the run.
672 chunk = arena_chunk_alloc(arena);
674 run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
675 arena_run_split(arena, run, size, large, binind, zero);
680 * arena_chunk_alloc() failed, but another thread may have made
681 * sufficient memory available while this one dropped arena->lock in
682 * arena_chunk_alloc(), so search one more time.
684 return (arena_run_alloc_helper(arena, size, large, binind, zero));
688 arena_maybe_purge(arena_t *arena)
690 size_t npurgeable, threshold;
692 /* Don't purge if the option is disabled. */
693 if (opt_lg_dirty_mult < 0)
695 /* Don't purge if all dirty pages are already being purged. */
696 if (arena->ndirty <= arena->npurgatory)
698 npurgeable = arena->ndirty - arena->npurgatory;
699 threshold = (arena->nactive >> opt_lg_dirty_mult);
701 * Don't purge unless the number of purgeable pages exceeds the
704 if (npurgeable <= threshold)
707 arena_purge(arena, false);
711 arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
714 ql_head(arena_chunk_map_t) mapelms;
715 arena_chunk_map_t *mapelm;
716 size_t pageind, npages;
722 * If chunk is the spare, temporarily re-allocate it, 1) so that its
723 * run is reinserted into runs_avail, and 2) so that it cannot be
724 * completely discarded by another thread while arena->lock is dropped
725 * by this thread. Note that the arena_run_dalloc() call will
726 * implicitly deallocate the chunk, so no explicit action is required
727 * in this function to deallocate the chunk.
729 * Note that once a chunk contains dirty pages, it cannot again contain
730 * a single run unless 1) it is a dirty run, or 2) this function purges
731 * dirty pages and causes the transition to a single clean run. Thus
732 * (chunk == arena->spare) is possible, but it is not possible for
733 * this function to be called on the spare unless it contains a dirty
736 if (chunk == arena->spare) {
737 assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
738 assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0);
740 arena_chunk_alloc(arena);
744 arena->stats.purged += chunk->ndirty;
747 * Operate on all dirty runs if there is no clean/dirty run
750 if (chunk->nruns_adjac == 0)
754 * Temporarily allocate free dirty runs within chunk. If all is false,
755 * only operate on dirty runs that are fragments; otherwise operate on
758 for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
759 mapelm = arena_mapp_get(chunk, pageind);
760 if (arena_mapbits_allocated_get(chunk, pageind) == 0) {
762 arena_mapbits_unallocated_size_get(chunk, pageind);
764 npages = run_size >> LG_PAGE;
765 assert(pageind + npages <= chunk_npages);
766 assert(arena_mapbits_dirty_get(chunk, pageind) ==
767 arena_mapbits_dirty_get(chunk, pageind+npages-1));
769 if (arena_mapbits_dirty_get(chunk, pageind) != 0 &&
770 (all || arena_avail_adjac(chunk, pageind,
772 arena_run_t *run = (arena_run_t *)((uintptr_t)
773 chunk + (uintptr_t)(pageind << LG_PAGE));
775 arena_run_split(arena, run, run_size, true,
776 BININD_INVALID, false);
777 /* Append to list for later processing. */
778 ql_elm_new(mapelm, u.ql_link);
779 ql_tail_insert(&mapelms, mapelm, u.ql_link);
783 if (arena_mapbits_large_get(chunk, pageind) != 0) {
784 npages = arena_mapbits_large_size_get(chunk,
788 arena_bin_info_t *bin_info;
789 arena_run_t *run = (arena_run_t *)((uintptr_t)
790 chunk + (uintptr_t)(pageind << LG_PAGE));
792 assert(arena_mapbits_small_runind_get(chunk,
794 binind = arena_bin_index(arena, run->bin);
795 bin_info = &arena_bin_info[binind];
796 npages = bin_info->run_size >> LG_PAGE;
800 assert(pageind == chunk_npages);
801 assert(chunk->ndirty == 0 || all == false);
802 assert(chunk->nruns_adjac == 0);
804 malloc_mutex_unlock(&arena->lock);
808 ql_foreach(mapelm, &mapelms, u.ql_link) {
810 size_t flag_unzeroed, i;
812 pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
813 sizeof(arena_chunk_map_t)) + map_bias;
814 npages = arena_mapbits_large_size_get(chunk, pageind) >>
816 assert(pageind + npages <= chunk_npages);
817 unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind <<
818 LG_PAGE)), (npages << LG_PAGE));
819 flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
821 * Set the unzeroed flag for all pages, now that pages_purge()
822 * has returned whether the pages were zeroed as a side effect
823 * of purging. This chunk map modification is safe even though
824 * the arena mutex isn't currently owned by this thread,
825 * because the run is marked as allocated, thus protecting it
826 * from being modified by any other thread. As long as these
827 * writes don't perturb the first and last elements'
828 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
830 for (i = 0; i < npages; i++) {
831 arena_mapbits_unzeroed_set(chunk, pageind+i,
838 malloc_mutex_lock(&arena->lock);
840 arena->stats.nmadvise += nmadvise;
842 /* Deallocate runs. */
843 for (mapelm = ql_first(&mapelms); mapelm != NULL;
844 mapelm = ql_first(&mapelms)) {
847 pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
848 sizeof(arena_chunk_map_t)) + map_bias;
849 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind <<
851 ql_remove(&mapelms, mapelm, u.ql_link);
852 arena_run_dalloc(arena, run, false, true);
858 static arena_chunk_t *
859 chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg)
861 size_t *ndirty = (size_t *)arg;
863 assert(chunk->ndirty != 0);
864 *ndirty += chunk->ndirty;
869 arena_purge(arena_t *arena, bool all)
871 arena_chunk_t *chunk;
876 arena_chunk_dirty_iter(&arena->chunks_dirty, NULL,
877 chunks_dirty_iter_cb, (void *)&ndirty);
878 assert(ndirty == arena->ndirty);
880 assert(arena->ndirty > arena->npurgatory || all);
881 assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
882 arena->npurgatory) || all);
885 arena->stats.npurge++;
888 * Compute the minimum number of pages that this thread should try to
889 * purge, and add the result to arena->npurgatory. This will keep
890 * multiple threads from racing to reduce ndirty below the threshold.
893 size_t npurgeable = arena->ndirty - arena->npurgatory;
896 size_t threshold = (arena->nactive >>
899 npurgatory = npurgeable - threshold;
901 npurgatory = npurgeable;
903 arena->npurgatory += npurgatory;
905 while (npurgatory > 0) {
906 size_t npurgeable, npurged, nunpurged;
908 /* Get next chunk with dirty pages. */
909 chunk = arena_chunk_dirty_first(&arena->chunks_dirty);
912 * This thread was unable to purge as many pages as
913 * originally intended, due to races with other threads
914 * that either did some of the purging work, or re-used
917 arena->npurgatory -= npurgatory;
920 npurgeable = chunk->ndirty;
921 assert(npurgeable != 0);
923 if (npurgeable > npurgatory && chunk->nruns_adjac == 0) {
925 * This thread will purge all the dirty pages in chunk,
926 * so set npurgatory to reflect this thread's intent to
927 * purge the pages. This tends to reduce the chances
928 * of the following scenario:
930 * 1) This thread sets arena->npurgatory such that
931 * (arena->ndirty - arena->npurgatory) is at the
933 * 2) This thread drops arena->lock.
934 * 3) Another thread causes one or more pages to be
935 * dirtied, and immediately determines that it must
938 * If this scenario *does* play out, that's okay,
939 * because all of the purging work being done really
942 arena->npurgatory += npurgeable - npurgatory;
943 npurgatory = npurgeable;
947 * Keep track of how many pages are purgeable, versus how many
948 * actually get purged, and adjust counters accordingly.
950 arena->npurgatory -= npurgeable;
951 npurgatory -= npurgeable;
952 npurged = arena_chunk_purge(arena, chunk, all);
953 nunpurged = npurgeable - npurged;
954 arena->npurgatory += nunpurged;
955 npurgatory += nunpurged;
960 arena_purge_all(arena_t *arena)
963 malloc_mutex_lock(&arena->lock);
964 arena_purge(arena, true);
965 malloc_mutex_unlock(&arena->lock);
969 arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
971 arena_chunk_t *chunk;
972 size_t size, run_ind, run_pages, flag_dirty;
974 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
975 run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
976 assert(run_ind >= map_bias);
977 assert(run_ind < chunk_npages);
978 if (arena_mapbits_large_get(chunk, run_ind) != 0) {
979 size = arena_mapbits_large_size_get(chunk, run_ind);
980 assert(size == PAGE ||
981 arena_mapbits_large_size_get(chunk,
982 run_ind+(size>>LG_PAGE)-1) == 0);
984 size_t binind = arena_bin_index(arena, run->bin);
985 arena_bin_info_t *bin_info = &arena_bin_info[binind];
986 size = bin_info->run_size;
988 run_pages = (size >> LG_PAGE);
991 * Update stats_cactive if nactive is crossing a chunk
994 size_t cactive_diff = CHUNK_CEILING(arena->nactive << LG_PAGE) -
995 CHUNK_CEILING((arena->nactive - run_pages) << LG_PAGE);
996 if (cactive_diff != 0)
997 stats_cactive_sub(cactive_diff);
999 arena->nactive -= run_pages;
1002 * The run is dirty if the caller claims to have dirtied it, as well as
1003 * if it was already dirty before being allocated and the caller
1004 * doesn't claim to have cleaned it.
1006 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1007 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
1008 if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0)
1010 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
1012 /* Mark pages as unallocated in the chunk map. */
1014 arena_mapbits_unallocated_set(chunk, run_ind, size,
1016 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1019 arena_mapbits_unallocated_set(chunk, run_ind, size,
1020 arena_mapbits_unzeroed_get(chunk, run_ind));
1021 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1022 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
1025 /* Try to coalesce forward. */
1026 if (run_ind + run_pages < chunk_npages &&
1027 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
1028 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) {
1029 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
1031 size_t nrun_pages = nrun_size >> LG_PAGE;
1034 * Remove successor from runs_avail; the coalesced run is
1037 assert(arena_mapbits_unallocated_size_get(chunk,
1038 run_ind+run_pages+nrun_pages-1) == nrun_size);
1039 assert(arena_mapbits_dirty_get(chunk,
1040 run_ind+run_pages+nrun_pages-1) == flag_dirty);
1041 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages,
1045 run_pages += nrun_pages;
1047 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1048 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1052 /* Try to coalesce backward. */
1053 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, run_ind-1)
1054 == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == flag_dirty) {
1055 size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
1057 size_t prun_pages = prun_size >> LG_PAGE;
1059 run_ind -= prun_pages;
1062 * Remove predecessor from runs_avail; the coalesced run is
1065 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1067 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
1068 arena_avail_remove(arena, chunk, run_ind, prun_pages, true,
1072 run_pages += prun_pages;
1074 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1075 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1079 /* Insert into runs_avail, now that coalescing is complete. */
1080 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1081 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
1082 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1083 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
1084 arena_avail_insert(arena, chunk, run_ind, run_pages, true, true);
1086 /* Deallocate chunk if it is now completely unused. */
1087 if (size == arena_maxclass) {
1088 assert(run_ind == map_bias);
1089 assert(run_pages == (arena_maxclass >> LG_PAGE));
1090 arena_chunk_dealloc(arena, chunk);
1094 * It is okay to do dirty page processing here even if the chunk was
1095 * deallocated above, since in that case it is the spare. Waiting
1096 * until after possible chunk deallocation to do dirty processing
1097 * allows for an old spare to be fully deallocated, thus decreasing the
1098 * chances of spuriously crossing the dirty page purging threshold.
1101 arena_maybe_purge(arena);
1105 arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1106 size_t oldsize, size_t newsize)
1108 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1109 size_t head_npages = (oldsize - newsize) >> LG_PAGE;
1110 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
1112 assert(oldsize > newsize);
1115 * Update the chunk map so that arena_run_dalloc() can treat the
1116 * leading run as separately allocated. Set the last element of each
1117 * run first, in case of single-page runs.
1119 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
1120 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
1121 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty);
1124 UNUSED size_t tail_npages = newsize >> LG_PAGE;
1125 assert(arena_mapbits_large_size_get(chunk,
1126 pageind+head_npages+tail_npages-1) == 0);
1127 assert(arena_mapbits_dirty_get(chunk,
1128 pageind+head_npages+tail_npages-1) == flag_dirty);
1130 arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
1133 arena_run_dalloc(arena, run, false, false);
1137 arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1138 size_t oldsize, size_t newsize, bool dirty)
1140 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1141 size_t head_npages = newsize >> LG_PAGE;
1142 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
1144 assert(oldsize > newsize);
1147 * Update the chunk map so that arena_run_dalloc() can treat the
1148 * trailing run as separately allocated. Set the last element of each
1149 * run first, in case of single-page runs.
1151 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
1152 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
1153 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty);
1156 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
1157 assert(arena_mapbits_large_size_get(chunk,
1158 pageind+head_npages+tail_npages-1) == 0);
1159 assert(arena_mapbits_dirty_get(chunk,
1160 pageind+head_npages+tail_npages-1) == flag_dirty);
1162 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
1165 arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
1169 static arena_run_t *
1170 arena_bin_runs_first(arena_bin_t *bin)
1172 arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs);
1173 if (mapelm != NULL) {
1174 arena_chunk_t *chunk;
1178 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
1179 pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
1180 sizeof(arena_chunk_map_t))) + map_bias;
1181 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1182 arena_mapbits_small_runind_get(chunk, pageind)) <<
1191 arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
1193 arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
1194 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1195 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
1197 assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
1199 arena_run_tree_insert(&bin->runs, mapelm);
1203 arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
1205 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1206 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1207 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
1209 assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
1211 arena_run_tree_remove(&bin->runs, mapelm);
1214 static arena_run_t *
1215 arena_bin_nonfull_run_tryget(arena_bin_t *bin)
1217 arena_run_t *run = arena_bin_runs_first(bin);
1219 arena_bin_runs_remove(bin, run);
1221 bin->stats.reruns++;
1226 static arena_run_t *
1227 arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
1231 arena_bin_info_t *bin_info;
1233 /* Look for a usable run. */
1234 run = arena_bin_nonfull_run_tryget(bin);
1237 /* No existing runs have any space available. */
1239 binind = arena_bin_index(arena, bin);
1240 bin_info = &arena_bin_info[binind];
1242 /* Allocate a new run. */
1243 malloc_mutex_unlock(&bin->lock);
1244 /******************************/
1245 malloc_mutex_lock(&arena->lock);
1246 run = arena_run_alloc(arena, bin_info->run_size, false, binind, false);
1248 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
1249 (uintptr_t)bin_info->bitmap_offset);
1251 /* Initialize run internals. */
1254 run->nfree = bin_info->nregs;
1255 bitmap_init(bitmap, &bin_info->bitmap_info);
1257 malloc_mutex_unlock(&arena->lock);
1258 /********************************/
1259 malloc_mutex_lock(&bin->lock);
1263 bin->stats.curruns++;
1269 * arena_run_alloc() failed, but another thread may have made
1270 * sufficient memory available while this one dropped bin->lock above,
1271 * so search one more time.
1273 run = arena_bin_nonfull_run_tryget(bin);
1280 /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
1282 arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
1286 arena_bin_info_t *bin_info;
1289 binind = arena_bin_index(arena, bin);
1290 bin_info = &arena_bin_info[binind];
1292 run = arena_bin_nonfull_run_get(arena, bin);
1293 if (bin->runcur != NULL && bin->runcur->nfree > 0) {
1295 * Another thread updated runcur while this one ran without the
1296 * bin lock in arena_bin_nonfull_run_get().
1298 assert(bin->runcur->nfree > 0);
1299 ret = arena_run_reg_alloc(bin->runcur, bin_info);
1301 arena_chunk_t *chunk;
1304 * arena_run_alloc() may have allocated run, or it may
1305 * have pulled run from the bin's run tree. Therefore
1306 * it is unsafe to make any assumptions about how run
1307 * has previously been used, and arena_bin_lower_run()
1308 * must be called, as if a region were just deallocated
1311 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1312 if (run->nfree == bin_info->nregs)
1313 arena_dalloc_bin_run(arena, chunk, run, bin);
1315 arena_bin_lower_run(arena, chunk, run, bin);
1325 assert(bin->runcur->nfree > 0);
1327 return (arena_run_reg_alloc(bin->runcur, bin_info));
1331 arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
1332 uint64_t prof_accumbytes)
1339 assert(tbin->ncached == 0);
1341 if (config_prof && arena_prof_accum(arena, prof_accumbytes))
1343 bin = &arena->bins[binind];
1344 malloc_mutex_lock(&bin->lock);
1345 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
1346 tbin->lg_fill_div); i < nfill; i++) {
1347 if ((run = bin->runcur) != NULL && run->nfree > 0)
1348 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
1350 ptr = arena_bin_malloc_hard(arena, bin);
1353 if (config_fill && opt_junk) {
1354 arena_alloc_junk_small(ptr, &arena_bin_info[binind],
1357 /* Insert such that low regions get used first. */
1358 tbin->avail[nfill - 1 - i] = ptr;
1361 bin->stats.allocated += i * arena_bin_info[binind].reg_size;
1362 bin->stats.nmalloc += i;
1363 bin->stats.nrequests += tbin->tstats.nrequests;
1364 bin->stats.nfills++;
1365 tbin->tstats.nrequests = 0;
1367 malloc_mutex_unlock(&bin->lock);
1372 arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
1376 size_t redzone_size = bin_info->redzone_size;
1377 memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
1379 memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
1382 memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
1383 bin_info->reg_interval);
1388 arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
1390 size_t size = bin_info->reg_size;
1391 size_t redzone_size = bin_info->redzone_size;
1395 for (i = 1; i <= redzone_size; i++) {
1397 if ((byte = *(uint8_t *)((uintptr_t)ptr - i)) != 0xa5) {
1399 malloc_printf("<jemalloc>: Corrupt redzone "
1400 "%zu byte%s before %p (size %zu), byte=%#x\n", i,
1401 (i == 1) ? "" : "s", ptr, size, byte);
1404 for (i = 0; i < redzone_size; i++) {
1406 if ((byte = *(uint8_t *)((uintptr_t)ptr + size + i)) != 0xa5) {
1408 malloc_printf("<jemalloc>: Corrupt redzone "
1409 "%zu byte%s after end of %p (size %zu), byte=%#x\n",
1410 i, (i == 1) ? "" : "s", ptr, size, byte);
1413 if (opt_abort && error)
1416 memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
1417 bin_info->reg_interval);
1421 arena_malloc_small(arena_t *arena, size_t size, bool zero)
1428 binind = SMALL_SIZE2BIN(size);
1429 assert(binind < NBINS);
1430 bin = &arena->bins[binind];
1431 size = arena_bin_info[binind].reg_size;
1433 malloc_mutex_lock(&bin->lock);
1434 if ((run = bin->runcur) != NULL && run->nfree > 0)
1435 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
1437 ret = arena_bin_malloc_hard(arena, bin);
1440 malloc_mutex_unlock(&bin->lock);
1445 bin->stats.allocated += size;
1446 bin->stats.nmalloc++;
1447 bin->stats.nrequests++;
1449 malloc_mutex_unlock(&bin->lock);
1450 if (config_prof && isthreaded == false && arena_prof_accum(arena, size))
1453 if (zero == false) {
1456 arena_alloc_junk_small(ret,
1457 &arena_bin_info[binind], false);
1458 } else if (opt_zero)
1459 memset(ret, 0, size);
1462 if (config_fill && opt_junk) {
1463 arena_alloc_junk_small(ret, &arena_bin_info[binind],
1466 VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
1467 memset(ret, 0, size);
1469 VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
1475 arena_malloc_large(arena_t *arena, size_t size, bool zero)
1480 /* Large allocation. */
1481 size = PAGE_CEILING(size);
1482 malloc_mutex_lock(&arena->lock);
1483 ret = (void *)arena_run_alloc(arena, size, true, BININD_INVALID, zero);
1485 malloc_mutex_unlock(&arena->lock);
1489 arena->stats.nmalloc_large++;
1490 arena->stats.nrequests_large++;
1491 arena->stats.allocated_large += size;
1492 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1493 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1494 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1497 idump = arena_prof_accum_locked(arena, size);
1498 malloc_mutex_unlock(&arena->lock);
1499 if (config_prof && idump)
1502 if (zero == false) {
1505 memset(ret, 0xa5, size);
1507 memset(ret, 0, size);
1514 /* Only handles large allocations that require more than page alignment. */
1516 arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
1519 size_t alloc_size, leadsize, trailsize;
1521 arena_chunk_t *chunk;
1523 assert((size & PAGE_MASK) == 0);
1525 alignment = PAGE_CEILING(alignment);
1526 alloc_size = size + alignment - PAGE;
1528 malloc_mutex_lock(&arena->lock);
1529 run = arena_run_alloc(arena, alloc_size, true, BININD_INVALID, zero);
1531 malloc_mutex_unlock(&arena->lock);
1534 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1536 leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) -
1538 assert(alloc_size >= leadsize + size);
1539 trailsize = alloc_size - leadsize - size;
1540 ret = (void *)((uintptr_t)run + leadsize);
1541 if (leadsize != 0) {
1542 arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size -
1545 if (trailsize != 0) {
1546 arena_run_trim_tail(arena, chunk, ret, size + trailsize, size,
1551 arena->stats.nmalloc_large++;
1552 arena->stats.nrequests_large++;
1553 arena->stats.allocated_large += size;
1554 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1555 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1556 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1558 malloc_mutex_unlock(&arena->lock);
1560 if (config_fill && zero == false) {
1562 memset(ret, 0xa5, size);
1564 memset(ret, 0, size);
1570 arena_prof_promoted(const void *ptr, size_t size)
1572 arena_chunk_t *chunk;
1573 size_t pageind, binind;
1575 cassert(config_prof);
1576 assert(ptr != NULL);
1577 assert(CHUNK_ADDR2BASE(ptr) != ptr);
1578 assert(isalloc(ptr, false) == PAGE);
1579 assert(isalloc(ptr, true) == PAGE);
1580 assert(size <= SMALL_MAXCLASS);
1582 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1583 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1584 binind = SMALL_SIZE2BIN(size);
1585 assert(binind < NBINS);
1586 arena_mapbits_large_binind_set(chunk, pageind, binind);
1588 assert(isalloc(ptr, false) == PAGE);
1589 assert(isalloc(ptr, true) == size);
1593 arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
1597 /* Dissociate run from bin. */
1598 if (run == bin->runcur)
1601 size_t binind = arena_bin_index(chunk->arena, bin);
1602 arena_bin_info_t *bin_info = &arena_bin_info[binind];
1604 if (bin_info->nregs != 1) {
1606 * This block's conditional is necessary because if the
1607 * run only contains one region, then it never gets
1608 * inserted into the non-full runs tree.
1610 arena_bin_runs_remove(bin, run);
1616 arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1620 arena_bin_info_t *bin_info;
1621 size_t npages, run_ind, past;
1623 assert(run != bin->runcur);
1624 assert(arena_run_tree_search(&bin->runs,
1625 arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE))
1628 binind = arena_bin_index(chunk->arena, run->bin);
1629 bin_info = &arena_bin_info[binind];
1631 malloc_mutex_unlock(&bin->lock);
1632 /******************************/
1633 npages = bin_info->run_size >> LG_PAGE;
1634 run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
1635 past = (size_t)(PAGE_CEILING((uintptr_t)run +
1636 (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
1637 bin_info->reg_interval - bin_info->redzone_size) -
1638 (uintptr_t)chunk) >> LG_PAGE);
1639 malloc_mutex_lock(&arena->lock);
1642 * If the run was originally clean, and some pages were never touched,
1643 * trim the clean pages before deallocating the dirty portion of the
1646 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1647 arena_mapbits_dirty_get(chunk, run_ind+npages-1));
1648 if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind <
1650 /* Trim clean pages. Convert to large run beforehand. */
1652 arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, 0);
1653 arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0);
1654 arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
1655 ((past - run_ind) << LG_PAGE), false);
1656 /* npages = past - run_ind; */
1658 arena_run_dalloc(arena, run, true, false);
1659 malloc_mutex_unlock(&arena->lock);
1660 /****************************/
1661 malloc_mutex_lock(&bin->lock);
1663 bin->stats.curruns--;
1667 arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1672 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
1673 * non-full run. It is okay to NULL runcur out rather than proactively
1674 * keeping it pointing at the lowest non-full run.
1676 if ((uintptr_t)run < (uintptr_t)bin->runcur) {
1677 /* Switch runcur. */
1678 if (bin->runcur->nfree > 0)
1679 arena_bin_runs_insert(bin, bin->runcur);
1682 bin->stats.reruns++;
1684 arena_bin_runs_insert(bin, run);
1688 arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1689 arena_chunk_map_t *mapelm)
1694 arena_bin_info_t *bin_info;
1695 size_t size, binind;
1697 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1698 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1699 arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
1701 binind = arena_ptr_small_binind_get(ptr, mapelm->bits);
1702 bin_info = &arena_bin_info[binind];
1703 if (config_fill || config_stats)
1704 size = bin_info->reg_size;
1706 if (config_fill && opt_junk)
1707 arena_dalloc_junk_small(ptr, bin_info);
1709 arena_run_reg_dalloc(run, ptr);
1710 if (run->nfree == bin_info->nregs) {
1711 arena_dissociate_bin_run(chunk, run, bin);
1712 arena_dalloc_bin_run(arena, chunk, run, bin);
1713 } else if (run->nfree == 1 && run != bin->runcur)
1714 arena_bin_lower_run(arena, chunk, run, bin);
1717 bin->stats.allocated -= size;
1718 bin->stats.ndalloc++;
1723 arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1724 size_t pageind, arena_chunk_map_t *mapelm)
1729 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1730 arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
1732 malloc_mutex_lock(&bin->lock);
1733 arena_dalloc_bin_locked(arena, chunk, ptr, mapelm);
1734 malloc_mutex_unlock(&bin->lock);
1738 arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1741 arena_chunk_map_t *mapelm;
1744 /* arena_ptr_small_binind_get() does extra sanity checking. */
1745 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
1746 pageind)) != BININD_INVALID);
1748 mapelm = arena_mapp_get(chunk, pageind);
1749 arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
1753 arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
1756 if (config_fill || config_stats) {
1757 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1758 size_t size = arena_mapbits_large_size_get(chunk, pageind);
1760 if (config_fill && config_stats && opt_junk)
1761 memset(ptr, 0x5a, size);
1763 arena->stats.ndalloc_large++;
1764 arena->stats.allocated_large -= size;
1765 arena->stats.lstats[(size >> LG_PAGE) - 1].ndalloc++;
1766 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns--;
1770 arena_run_dalloc(arena, (arena_run_t *)ptr, true, false);
1774 arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
1777 malloc_mutex_lock(&arena->lock);
1778 arena_dalloc_large_locked(arena, chunk, ptr);
1779 malloc_mutex_unlock(&arena->lock);
1783 arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1784 size_t oldsize, size_t size)
1787 assert(size < oldsize);
1790 * Shrink the run, and make trailing pages available for other
1793 malloc_mutex_lock(&arena->lock);
1794 arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
1797 arena->stats.ndalloc_large++;
1798 arena->stats.allocated_large -= oldsize;
1799 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
1800 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
1802 arena->stats.nmalloc_large++;
1803 arena->stats.nrequests_large++;
1804 arena->stats.allocated_large += size;
1805 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1806 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1807 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1809 malloc_mutex_unlock(&arena->lock);
1813 arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1814 size_t oldsize, size_t size, size_t extra, bool zero)
1816 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1817 size_t npages = oldsize >> LG_PAGE;
1820 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind));
1822 /* Try to extend the run. */
1823 assert(size + extra > oldsize);
1824 malloc_mutex_lock(&arena->lock);
1825 if (pageind + npages < chunk_npages &&
1826 arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
1827 (followsize = arena_mapbits_unallocated_size_get(chunk,
1828 pageind+npages)) >= size - oldsize) {
1830 * The next run is available and sufficiently large. Split the
1831 * following run, then merge the first part with the existing
1835 size_t splitsize = (oldsize + followsize <= size + extra)
1836 ? followsize : size + extra - oldsize;
1837 arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
1838 ((pageind+npages) << LG_PAGE)), splitsize, true,
1839 BININD_INVALID, zero);
1841 size = oldsize + splitsize;
1842 npages = size >> LG_PAGE;
1845 * Mark the extended run as dirty if either portion of the run
1846 * was dirty before allocation. This is rather pedantic,
1847 * because there's not actually any sequence of events that
1848 * could cause the resulting run to be passed to
1849 * arena_run_dalloc() with the dirty argument set to false
1850 * (which is when dirty flag consistency would really matter).
1852 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
1853 arena_mapbits_dirty_get(chunk, pageind+npages-1);
1854 arena_mapbits_large_set(chunk, pageind, size, flag_dirty);
1855 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
1858 arena->stats.ndalloc_large++;
1859 arena->stats.allocated_large -= oldsize;
1860 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
1861 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
1863 arena->stats.nmalloc_large++;
1864 arena->stats.nrequests_large++;
1865 arena->stats.allocated_large += size;
1866 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1867 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1868 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1870 malloc_mutex_unlock(&arena->lock);
1873 malloc_mutex_unlock(&arena->lock);
1879 * Try to resize a large allocation, in order to avoid copying. This will
1880 * always fail if growing an object, and the following run is already in use.
1883 arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
1888 psize = PAGE_CEILING(size + extra);
1889 if (psize == oldsize) {
1890 /* Same size class. */
1891 if (config_fill && opt_junk && size < oldsize) {
1892 memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize -
1897 arena_chunk_t *chunk;
1900 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1901 arena = chunk->arena;
1903 if (psize < oldsize) {
1904 /* Fill before shrinking in order avoid a race. */
1905 if (config_fill && opt_junk) {
1906 memset((void *)((uintptr_t)ptr + size), 0x5a,
1909 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
1913 bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
1914 oldsize, PAGE_CEILING(size),
1915 psize - PAGE_CEILING(size), zero);
1916 if (config_fill && ret == false && zero == false &&
1918 memset((void *)((uintptr_t)ptr + oldsize), 0,
1927 arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
1932 * Avoid moving the allocation if the size class can be left the same.
1934 if (oldsize <= arena_maxclass) {
1935 if (oldsize <= SMALL_MAXCLASS) {
1936 assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size
1938 if ((size + extra <= SMALL_MAXCLASS &&
1939 SMALL_SIZE2BIN(size + extra) ==
1940 SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
1941 size + extra >= oldsize)) {
1942 if (config_fill && opt_junk && size < oldsize) {
1943 memset((void *)((uintptr_t)ptr + size),
1944 0x5a, oldsize - size);
1949 assert(size <= arena_maxclass);
1950 if (size + extra > SMALL_MAXCLASS) {
1951 if (arena_ralloc_large(ptr, oldsize, size,
1952 extra, zero) == false)
1958 /* Reallocation would require a move. */
1963 arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
1964 size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
1965 bool try_tcache_dalloc)
1970 /* Try to avoid moving the allocation. */
1971 ret = arena_ralloc_no_move(ptr, oldsize, size, extra, zero);
1976 * size and oldsize are different enough that we need to move the
1977 * object. In that case, fall back to allocating new space and
1980 if (alignment != 0) {
1981 size_t usize = sa2u(size + extra, alignment);
1984 ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
1986 ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc);
1991 /* Try again, this time without extra. */
1992 if (alignment != 0) {
1993 size_t usize = sa2u(size, alignment);
1996 ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
1999 ret = arena_malloc(arena, size, zero, try_tcache_alloc);
2005 /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
2008 * Copy at most size bytes (not size+extra), since the caller has no
2009 * expectation that the extra bytes will be reliably preserved.
2011 copysize = (size < oldsize) ? size : oldsize;
2012 VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
2013 memcpy(ret, ptr, copysize);
2014 iqallocx(ptr, try_tcache_dalloc);
2019 arena_dss_prec_get(arena_t *arena)
2023 malloc_mutex_lock(&arena->lock);
2024 ret = arena->dss_prec;
2025 malloc_mutex_unlock(&arena->lock);
2030 arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
2033 malloc_mutex_lock(&arena->lock);
2034 arena->dss_prec = dss_prec;
2035 malloc_mutex_unlock(&arena->lock);
2039 arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
2040 size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
2041 malloc_large_stats_t *lstats)
2045 malloc_mutex_lock(&arena->lock);
2046 *dss = dss_prec_names[arena->dss_prec];
2047 *nactive += arena->nactive;
2048 *ndirty += arena->ndirty;
2050 astats->mapped += arena->stats.mapped;
2051 astats->npurge += arena->stats.npurge;
2052 astats->nmadvise += arena->stats.nmadvise;
2053 astats->purged += arena->stats.purged;
2054 astats->allocated_large += arena->stats.allocated_large;
2055 astats->nmalloc_large += arena->stats.nmalloc_large;
2056 astats->ndalloc_large += arena->stats.ndalloc_large;
2057 astats->nrequests_large += arena->stats.nrequests_large;
2059 for (i = 0; i < nlclasses; i++) {
2060 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
2061 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
2062 lstats[i].nrequests += arena->stats.lstats[i].nrequests;
2063 lstats[i].curruns += arena->stats.lstats[i].curruns;
2065 malloc_mutex_unlock(&arena->lock);
2067 for (i = 0; i < NBINS; i++) {
2068 arena_bin_t *bin = &arena->bins[i];
2070 malloc_mutex_lock(&bin->lock);
2071 bstats[i].allocated += bin->stats.allocated;
2072 bstats[i].nmalloc += bin->stats.nmalloc;
2073 bstats[i].ndalloc += bin->stats.ndalloc;
2074 bstats[i].nrequests += bin->stats.nrequests;
2075 if (config_tcache) {
2076 bstats[i].nfills += bin->stats.nfills;
2077 bstats[i].nflushes += bin->stats.nflushes;
2079 bstats[i].nruns += bin->stats.nruns;
2080 bstats[i].reruns += bin->stats.reruns;
2081 bstats[i].curruns += bin->stats.curruns;
2082 malloc_mutex_unlock(&bin->lock);
2087 arena_new(arena_t *arena, unsigned ind)
2093 arena->nthreads = 0;
2095 if (malloc_mutex_init(&arena->lock))
2099 memset(&arena->stats, 0, sizeof(arena_stats_t));
2100 arena->stats.lstats =
2101 (malloc_large_stats_t *)base_alloc(nlclasses *
2102 sizeof(malloc_large_stats_t));
2103 if (arena->stats.lstats == NULL)
2105 memset(arena->stats.lstats, 0, nlclasses *
2106 sizeof(malloc_large_stats_t));
2108 ql_new(&arena->tcache_ql);
2112 arena->prof_accumbytes = 0;
2114 arena->dss_prec = chunk_dss_prec_get();
2116 /* Initialize chunks. */
2117 arena_chunk_dirty_new(&arena->chunks_dirty);
2118 arena->spare = NULL;
2122 arena->npurgatory = 0;
2124 arena_avail_tree_new(&arena->runs_avail);
2126 /* Initialize bins. */
2127 for (i = 0; i < NBINS; i++) {
2128 bin = &arena->bins[i];
2129 if (malloc_mutex_init(&bin->lock))
2132 arena_run_tree_new(&bin->runs);
2134 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
2141 * Calculate bin_info->run_size such that it meets the following constraints:
2143 * *) bin_info->run_size >= min_run_size
2144 * *) bin_info->run_size <= arena_maxclass
2145 * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
2146 * *) bin_info->nregs <= RUN_MAXREGS
2148 * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
2149 * calculated here, since these settings are all interdependent.
2152 bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
2155 size_t try_run_size, good_run_size;
2156 uint32_t try_nregs, good_nregs;
2157 uint32_t try_hdr_size, good_hdr_size;
2158 uint32_t try_bitmap_offset, good_bitmap_offset;
2159 uint32_t try_ctx0_offset, good_ctx0_offset;
2160 uint32_t try_redzone0_offset, good_redzone0_offset;
2162 assert(min_run_size >= PAGE);
2163 assert(min_run_size <= arena_maxclass);
2166 * Determine redzone size based on minimum alignment and minimum
2167 * redzone size. Add padding to the end of the run if it is needed to
2168 * align the regions. The padding allows each redzone to be half the
2169 * minimum alignment; without the padding, each redzone would have to
2170 * be twice as large in order to maintain alignment.
2172 if (config_fill && opt_redzone) {
2173 size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1);
2174 if (align_min <= REDZONE_MINSIZE) {
2175 bin_info->redzone_size = REDZONE_MINSIZE;
2178 bin_info->redzone_size = align_min >> 1;
2179 pad_size = bin_info->redzone_size;
2182 bin_info->redzone_size = 0;
2185 bin_info->reg_interval = bin_info->reg_size +
2186 (bin_info->redzone_size << 1);
2189 * Calculate known-valid settings before entering the run_size
2190 * expansion loop, so that the first part of the loop always copies
2193 * The do..while loop iteratively reduces the number of regions until
2194 * the run header and the regions no longer overlap. A closed formula
2195 * would be quite messy, since there is an interdependency between the
2196 * header's mask length and the number of regions.
2198 try_run_size = min_run_size;
2199 try_nregs = ((try_run_size - sizeof(arena_run_t)) /
2200 bin_info->reg_interval)
2201 + 1; /* Counter-act try_nregs-- in loop. */
2202 if (try_nregs > RUN_MAXREGS) {
2203 try_nregs = RUN_MAXREGS
2204 + 1; /* Counter-act try_nregs-- in loop. */
2208 try_hdr_size = sizeof(arena_run_t);
2209 /* Pad to a long boundary. */
2210 try_hdr_size = LONG_CEILING(try_hdr_size);
2211 try_bitmap_offset = try_hdr_size;
2212 /* Add space for bitmap. */
2213 try_hdr_size += bitmap_size(try_nregs);
2214 if (config_prof && opt_prof && prof_promote == false) {
2215 /* Pad to a quantum boundary. */
2216 try_hdr_size = QUANTUM_CEILING(try_hdr_size);
2217 try_ctx0_offset = try_hdr_size;
2218 /* Add space for one (prof_ctx_t *) per region. */
2219 try_hdr_size += try_nregs * sizeof(prof_ctx_t *);
2221 try_ctx0_offset = 0;
2222 try_redzone0_offset = try_run_size - (try_nregs *
2223 bin_info->reg_interval) - pad_size;
2224 } while (try_hdr_size > try_redzone0_offset);
2226 /* run_size expansion loop. */
2229 * Copy valid settings before trying more aggressive settings.
2231 good_run_size = try_run_size;
2232 good_nregs = try_nregs;
2233 good_hdr_size = try_hdr_size;
2234 good_bitmap_offset = try_bitmap_offset;
2235 good_ctx0_offset = try_ctx0_offset;
2236 good_redzone0_offset = try_redzone0_offset;
2238 /* Try more aggressive settings. */
2239 try_run_size += PAGE;
2240 try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) /
2241 bin_info->reg_interval)
2242 + 1; /* Counter-act try_nregs-- in loop. */
2243 if (try_nregs > RUN_MAXREGS) {
2244 try_nregs = RUN_MAXREGS
2245 + 1; /* Counter-act try_nregs-- in loop. */
2249 try_hdr_size = sizeof(arena_run_t);
2250 /* Pad to a long boundary. */
2251 try_hdr_size = LONG_CEILING(try_hdr_size);
2252 try_bitmap_offset = try_hdr_size;
2253 /* Add space for bitmap. */
2254 try_hdr_size += bitmap_size(try_nregs);
2255 if (config_prof && opt_prof && prof_promote == false) {
2256 /* Pad to a quantum boundary. */
2257 try_hdr_size = QUANTUM_CEILING(try_hdr_size);
2258 try_ctx0_offset = try_hdr_size;
2260 * Add space for one (prof_ctx_t *) per region.
2262 try_hdr_size += try_nregs *
2263 sizeof(prof_ctx_t *);
2265 try_redzone0_offset = try_run_size - (try_nregs *
2266 bin_info->reg_interval) - pad_size;
2267 } while (try_hdr_size > try_redzone0_offset);
2268 } while (try_run_size <= arena_maxclass
2269 && try_run_size <= arena_maxclass
2270 && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
2272 && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
2273 && try_nregs < RUN_MAXREGS);
2275 assert(good_hdr_size <= good_redzone0_offset);
2277 /* Copy final settings. */
2278 bin_info->run_size = good_run_size;
2279 bin_info->nregs = good_nregs;
2280 bin_info->bitmap_offset = good_bitmap_offset;
2281 bin_info->ctx0_offset = good_ctx0_offset;
2282 bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size;
2284 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
2285 * bin_info->reg_interval) + pad_size == bin_info->run_size);
2287 return (good_run_size);
2293 arena_bin_info_t *bin_info;
2294 size_t prev_run_size = PAGE;
2296 #define SIZE_CLASS(bin, delta, size) \
2297 bin_info = &arena_bin_info[bin]; \
2298 bin_info->reg_size = size; \
2299 prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\
2300 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
2312 * Compute the header size such that it is large enough to contain the
2313 * page map. The page map is biased to omit entries for the header
2314 * itself, so some iteration is necessary to compute the map bias.
2316 * 1) Compute safe header_size and map_bias values that include enough
2317 * space for an unbiased page map.
2318 * 2) Refine map_bias based on (1) to omit the header pages in the page
2319 * map. The resulting map_bias may be one too small.
2320 * 3) Refine map_bias based on (2). The result will be >= the result
2321 * from (2), and will always be correct.
2324 for (i = 0; i < 3; i++) {
2325 header_size = offsetof(arena_chunk_t, map) +
2326 (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
2327 map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK)
2330 assert(map_bias > 0);
2332 arena_maxclass = chunksize - (map_bias << LG_PAGE);
2338 arena_prefork(arena_t *arena)
2342 malloc_mutex_prefork(&arena->lock);
2343 for (i = 0; i < NBINS; i++)
2344 malloc_mutex_prefork(&arena->bins[i].lock);
2348 arena_postfork_parent(arena_t *arena)
2352 for (i = 0; i < NBINS; i++)
2353 malloc_mutex_postfork_parent(&arena->bins[i].lock);
2354 malloc_mutex_postfork_parent(&arena->lock);
2358 arena_postfork_child(arena_t *arena)
2362 for (i = 0; i < NBINS; i++)
2363 malloc_mutex_postfork_child(&arena->bins[i].lock);
2364 malloc_mutex_postfork_child(&arena->lock);