1 /******************************************************************************/
2 #ifdef JEMALLOC_H_TYPES
4 #define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
6 /* Maximum number of regions in one run. */
7 #define LG_RUN_MAXREGS (LG_PAGE - LG_TINY_MIN)
8 #define RUN_MAXREGS (1U << LG_RUN_MAXREGS)
11 * Minimum redzone size. Redzones may be larger than this if necessary to
12 * preserve region alignment.
14 #define REDZONE_MINSIZE 16
17 * The minimum ratio of active:dirty pages per arena is computed as:
19 * (nactive >> lg_dirty_mult) >= ndirty
21 * So, supposing that lg_dirty_mult is 3, there can be no less than 8 times as
22 * many active pages as dirty pages.
24 #define LG_DIRTY_MULT_DEFAULT 3
32 #define PURGE_DEFAULT purge_mode_ratio
33 /* Default decay time in seconds. */
34 #define DECAY_TIME_DEFAULT 10
35 /* Number of event ticks between time checks. */
36 #define DECAY_NTICKS_PER_UPDATE 1000
38 typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t;
39 typedef struct arena_run_s arena_run_t;
40 typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t;
41 typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t;
42 typedef struct arena_chunk_s arena_chunk_t;
43 typedef struct arena_bin_info_s arena_bin_info_t;
44 typedef struct arena_bin_s arena_bin_t;
45 typedef struct arena_s arena_t;
46 typedef struct arena_tdata_s arena_tdata_t;
48 #endif /* JEMALLOC_H_TYPES */
49 /******************************************************************************/
50 #ifdef JEMALLOC_H_STRUCTS
52 #ifdef JEMALLOC_ARENA_STRUCTS_A
54 /* Index of bin this run is associated with. */
57 /* Number of free regions in run. */
60 /* Per region allocated/deallocated bitmap. */
61 bitmap_t bitmap[BITMAP_GROUPS_MAX];
64 /* Each element of the chunk map corresponds to one page within the chunk. */
65 struct arena_chunk_map_bits_s {
67 * Run address (or size) and various flags are stored together. The bit
68 * layout looks like (assuming 32-bit system):
70 * ???????? ???????? ???nnnnn nnndumla
72 * ? : Unallocated: Run address for first/last pages, unset for internal
74 * Small: Run page offset.
75 * Large: Run page count for first page, unset for trailing pages.
76 * n : binind for small size class, BININD_INVALID for large size class.
83 * Following are example bit patterns for the three types of runs.
87 * n : binind for size class; large objects set these to BININD_INVALID
94 * Unallocated (clean):
95 * ssssssss ssssssss sss+++++ +++dum-a
96 * xxxxxxxx xxxxxxxx xxxxxxxx xxx-Uxxx
97 * ssssssss ssssssss sss+++++ +++dUm-a
99 * Unallocated (dirty):
100 * ssssssss ssssssss sss+++++ +++D-m-a
101 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
102 * ssssssss ssssssss sss+++++ +++D-m-a
105 * pppppppp pppppppp pppnnnnn nnnd---A
106 * pppppppp pppppppp pppnnnnn nnn----A
107 * pppppppp pppppppp pppnnnnn nnnd---A
110 * ssssssss ssssssss sss+++++ +++D--LA
111 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
112 * -------- -------- ---+++++ +++D--LA
114 * Large (sampled, size <= LARGE_MINCLASS):
115 * ssssssss ssssssss sssnnnnn nnnD--LA
116 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
117 * -------- -------- ---+++++ +++D--LA
119 * Large (not sampled, size == LARGE_MINCLASS):
120 * ssssssss ssssssss sss+++++ +++D--LA
121 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
122 * -------- -------- ---+++++ +++D--LA
125 #define CHUNK_MAP_ALLOCATED ((size_t)0x01U)
126 #define CHUNK_MAP_LARGE ((size_t)0x02U)
127 #define CHUNK_MAP_STATE_MASK ((size_t)0x3U)
129 #define CHUNK_MAP_DECOMMITTED ((size_t)0x04U)
130 #define CHUNK_MAP_UNZEROED ((size_t)0x08U)
131 #define CHUNK_MAP_DIRTY ((size_t)0x10U)
132 #define CHUNK_MAP_FLAGS_MASK ((size_t)0x1cU)
134 #define CHUNK_MAP_BININD_SHIFT 5
135 #define BININD_INVALID ((size_t)0xffU)
136 #define CHUNK_MAP_BININD_MASK (BININD_INVALID << CHUNK_MAP_BININD_SHIFT)
137 #define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK
139 #define CHUNK_MAP_RUNIND_SHIFT (CHUNK_MAP_BININD_SHIFT + 8)
140 #define CHUNK_MAP_SIZE_SHIFT (CHUNK_MAP_RUNIND_SHIFT - LG_PAGE)
141 #define CHUNK_MAP_SIZE_MASK \
142 (~(CHUNK_MAP_BININD_MASK | CHUNK_MAP_FLAGS_MASK | CHUNK_MAP_STATE_MASK))
145 struct arena_runs_dirty_link_s {
146 qr(arena_runs_dirty_link_t) rd_link;
150 * Each arena_chunk_map_misc_t corresponds to one page within the chunk, just
151 * like arena_chunk_map_bits_t. Two separate arrays are stored within each
152 * chunk header in order to improve cache locality.
154 struct arena_chunk_map_misc_s {
156 * Linkage for run trees. There are two disjoint uses:
158 * 1) arena_t's runs_avail tree.
159 * 2) arena_run_t conceptually uses this linkage for in-use non-full
160 * runs, rather than directly embedding linkage.
162 rb_node(arena_chunk_map_misc_t) rb_link;
165 /* Linkage for list of dirty runs. */
166 arena_runs_dirty_link_t rd;
168 /* Profile counters, used for large object runs. */
171 prof_tctx_t *prof_tctx;
174 /* Small region run metadata. */
178 typedef rb_tree(arena_chunk_map_misc_t) arena_run_tree_t;
179 #endif /* JEMALLOC_ARENA_STRUCTS_A */
181 #ifdef JEMALLOC_ARENA_STRUCTS_B
182 /* Arena chunk header. */
183 struct arena_chunk_s {
185 * A pointer to the arena that owns the chunk is stored within the node.
186 * This field as a whole is used by chunks_rtree to support both
187 * ivsalloc() and core-based debugging.
192 * Map of pages within chunk that keeps track of free/large/small. The
193 * first map_bias entries are omitted, since the chunk header does not
194 * need to be tracked in the map. This omission saves a header page
195 * for common chunk sizes (e.g. 4 MiB).
197 arena_chunk_map_bits_t map_bits[1]; /* Dynamically sized. */
201 * Read-only information associated with each element of arena_t's bins array
202 * is stored separately, partly to reduce memory usage (only one copy, rather
203 * than one per arena), but mainly to avoid false cacheline sharing.
205 * Each run has the following layout:
207 * /--------------------\
209 * |--------------------|
211 * reg0_offset | region 0 |
213 * |--------------------| \
215 * | region 1 | > reg_interval
217 * |--------------------|
221 * |--------------------|
225 * |--------------------|
227 * \--------------------/
229 * reg_interval has at least the same minimum alignment as reg_size; this
230 * preserves the alignment constraint that sa2u() depends on. Alignment pad is
231 * either 0 or redzone_size; it is present only if needed to align reg0_offset.
233 struct arena_bin_info_s {
234 /* Size of regions in a run for this bin's size class. */
240 /* Interval between regions (reg_size + (redzone_size << 1)). */
243 /* Total size of a run for this bin's size class. */
246 /* Total number of regions in a run for this bin's size class. */
250 * Metadata used to manipulate bitmaps for runs associated with this
253 bitmap_info_t bitmap_info;
255 /* Offset of first region in a run for this bin's size class. */
256 uint32_t reg0_offset;
261 * All operations on runcur, runs, and stats require that lock be
262 * locked. Run allocation/deallocation are protected by the arena lock,
263 * which may be acquired while holding one or more bin locks, but not
269 * Current run being used to service allocations of this bin's size
275 * Tree of non-full runs. This tree is used when looking for an
276 * existing run when runcur is no longer usable. We choose the
277 * non-full run that is lowest in memory; this policy tends to keep
278 * objects packed well, and it can also help reduce the number of
279 * almost-empty chunks.
281 arena_run_tree_t runs;
283 /* Bin statistics. */
284 malloc_bin_stats_t stats;
288 /* This arena's index within the arenas array. */
292 * Number of threads currently assigned to this arena. This field is
293 * synchronized via atomic operations.
298 * There are three classes of arena operations from a locking
300 * 1) Thread assignment (modifies nthreads) is synchronized via atomics.
301 * 2) Bin-related operations are protected by bin locks.
302 * 3) Chunk- and run-related operations are protected by this mutex.
308 * List of tcaches for extant threads associated with this arena.
309 * Stats from these are merged incrementally, and at exit if
310 * opt_stats_print is enabled.
312 ql_head(tcache_t) tcache_ql;
314 uint64_t prof_accumbytes;
317 * PRNG state for cache index randomization of large allocation base
320 uint64_t offset_state;
325 * In order to avoid rapid chunk allocation/deallocation when an arena
326 * oscillates right on the cusp of needing a new chunk, cache the most
327 * recently freed chunk. The spare is left in the arena's chunk trees
328 * until it is deleted.
330 * There is one spare chunk per arena, rather than one spare total, in
331 * order to avoid interactions between multiple threads that could make
332 * a single spare inadequate.
334 arena_chunk_t *spare;
336 /* Minimum ratio (log base 2) of nactive:ndirty. */
337 ssize_t lg_dirty_mult;
339 /* True if a thread is currently executing arena_purge_to_limit(). */
342 /* Number of pages in active runs and huge regions. */
346 * Current count of pages within unused runs that are potentially
347 * dirty, and for which madvise(... MADV_DONTNEED) has not been called.
348 * By tracking this, we can institute a limit on how much dirty unused
349 * memory is mapped for each arena.
354 * Unused dirty memory this arena manages. Dirty memory is conceptually
355 * tracked as an arbitrarily interleaved LRU of dirty runs and cached
356 * chunks, but the list linkage is actually semi-duplicated in order to
357 * avoid extra arena_chunk_map_misc_t space overhead.
359 * LRU-----------------------------------------------------------MRU
364 * |------------| /- chunk -\
365 * ...->|chunks_cache|<--------------------------->| /----\ |<--...
366 * |------------| | |node| |
368 * | | /- run -\ /- run -\ | | | |
369 * | | | | | | | | | |
370 * | | | | | | | | | |
371 * |------------| |-------| |-------| | |----| |
372 * ...->|runs_dirty |<-->|rd |<-->|rd |<---->|rd |<----...
373 * |------------| |-------| |-------| | |----| |
374 * | | | | | | | | | |
375 * | | | | | | | \----/ |
376 * | | \-------/ \-------/ | |
379 * \------------/ \---------/
381 arena_runs_dirty_link_t runs_dirty;
382 extent_node_t chunks_cache;
385 * Approximate time in seconds from the creation of a set of unused
386 * dirty pages until an equivalent set of unused dirty pages is purged
390 /* decay_time / SMOOTHSTEP_NSTEPS. */
391 nstime_t decay_interval;
393 * Time at which the current decay interval logically started. We do
394 * not actually advance to a new epoch until sometime after it starts
395 * because of scheduling and computation delays, and it is even possible
396 * to completely skip epochs. In all cases, during epoch advancement we
397 * merge all relevant activity into the most recently recorded epoch.
399 nstime_t decay_epoch;
400 /* decay_deadline randomness generator. */
401 uint64_t decay_jitter_state;
403 * Deadline for current epoch. This is the sum of decay_interval and
404 * per epoch jitter which is a uniform random variable in
405 * [0..decay_interval). Epochs always advance by precise multiples of
406 * decay_interval, but we randomize the deadline to reduce the
407 * likelihood of arenas purging in lockstep.
409 nstime_t decay_deadline;
411 * Number of dirty pages at beginning of current epoch. During epoch
412 * advancement we use the delta between decay_ndirty and ndirty to
413 * determine how many dirty pages, if any, were generated, and record
414 * the result in decay_backlog.
418 * Memoized result of arena_decay_backlog_npages_limit() corresponding
419 * to the current contents of decay_backlog, i.e. the limit on how many
420 * pages are allowed to exist for the decay epochs.
422 size_t decay_backlog_npages_limit;
424 * Trailing log of how many unused dirty pages were generated during
425 * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
426 * element is the most recent epoch. Corresponding epoch times are
427 * relative to decay_epoch.
429 size_t decay_backlog[SMOOTHSTEP_NSTEPS];
431 /* Extant huge allocations. */
432 ql_head(extent_node_t) huge;
433 /* Synchronizes all huge allocation/update/deallocation. */
434 malloc_mutex_t huge_mtx;
437 * Trees of chunks that were previously allocated (trees differ only in
438 * node ordering). These are used when allocating chunks, in an attempt
439 * to re-use address space. Depending on function, different tree
440 * orderings are needed, which is why there are two trees with the same
443 extent_tree_t chunks_szad_cached;
444 extent_tree_t chunks_ad_cached;
445 extent_tree_t chunks_szad_retained;
446 extent_tree_t chunks_ad_retained;
448 malloc_mutex_t chunks_mtx;
449 /* Cache of nodes that were allocated via base_alloc(). */
450 ql_head(extent_node_t) node_cache;
451 malloc_mutex_t node_cache_mtx;
453 /* User-configurable chunk hook functions. */
454 chunk_hooks_t chunk_hooks;
456 /* bins is used to store trees of free regions. */
457 arena_bin_t bins[NBINS];
460 * Quantized address-ordered trees of this arena's available runs. The
461 * trees are used for first-best-fit run allocation.
463 arena_run_tree_t runs_avail[1]; /* Dynamically sized. */
466 /* Used in conjunction with tsd for fast arena-related context lookup. */
467 struct arena_tdata_s {
468 ticker_t decay_ticker;
470 #endif /* JEMALLOC_ARENA_STRUCTS_B */
472 #endif /* JEMALLOC_H_STRUCTS */
473 /******************************************************************************/
474 #ifdef JEMALLOC_H_EXTERNS
476 static const size_t large_pad =
477 #ifdef JEMALLOC_CACHE_OBLIVIOUS
484 extern purge_mode_t opt_purge;
485 extern const char *purge_mode_names[];
486 extern ssize_t opt_lg_dirty_mult;
487 extern ssize_t opt_decay_time;
489 extern arena_bin_info_t arena_bin_info[NBINS];
491 extern size_t map_bias; /* Number of arena chunk header pages. */
492 extern size_t map_misc_offset;
493 extern size_t arena_maxrun; /* Max run size for arenas. */
494 extern size_t large_maxclass; /* Max large size class. */
495 extern size_t run_quantize_max; /* Max run_quantize_*() input. */
496 extern unsigned nlclasses; /* Number of large size classes. */
497 extern unsigned nhclasses; /* Number of huge size classes. */
500 typedef size_t (run_quantize_t)(size_t);
501 extern run_quantize_t *run_quantize_floor;
502 extern run_quantize_t *run_quantize_ceil;
504 void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node,
506 void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node,
508 extent_node_t *arena_node_alloc(arena_t *arena);
509 void arena_node_dalloc(arena_t *arena, extent_node_t *node);
510 void *arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
512 void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize);
513 void arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk,
514 size_t oldsize, size_t usize);
515 void arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk,
516 size_t oldsize, size_t usize);
517 bool arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk,
518 size_t oldsize, size_t usize, bool *zero);
519 ssize_t arena_lg_dirty_mult_get(arena_t *arena);
520 bool arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult);
521 ssize_t arena_decay_time_get(arena_t *arena);
522 bool arena_decay_time_set(arena_t *arena, ssize_t decay_time);
523 void arena_maybe_purge(arena_t *arena);
524 void arena_purge(arena_t *arena, bool all);
525 void arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin,
526 szind_t binind, uint64_t prof_accumbytes);
527 void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
530 typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t,
532 extern arena_redzone_corruption_t *arena_redzone_corruption;
533 typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *);
534 extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
536 void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
538 void arena_quarantine_junk_small(void *ptr, size_t usize);
539 void *arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t ind, bool zero);
540 void *arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
541 bool zero, tcache_t *tcache);
542 void *arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize,
543 size_t alignment, bool zero, tcache_t *tcache);
544 void arena_prof_promoted(const void *ptr, size_t size);
545 void arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk,
546 void *ptr, arena_chunk_map_bits_t *bitselm);
547 void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
548 size_t pageind, arena_chunk_map_bits_t *bitselm);
549 void arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
550 void *ptr, size_t pageind);
552 typedef void (arena_dalloc_junk_large_t)(void *, size_t);
553 extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
555 void arena_dalloc_junk_large(void *ptr, size_t usize);
557 void arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
559 void arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
562 typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
563 extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
565 bool arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
566 size_t extra, bool zero);
567 void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
568 size_t size, size_t alignment, bool zero, tcache_t *tcache);
569 dss_prec_t arena_dss_prec_get(arena_t *arena);
570 bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
571 ssize_t arena_lg_dirty_mult_default_get(void);
572 bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult);
573 ssize_t arena_decay_time_default_get(void);
574 bool arena_decay_time_default_set(ssize_t decay_time);
575 void arena_basic_stats_merge(arena_t *arena, unsigned *nthreads,
576 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
577 size_t *nactive, size_t *ndirty);
578 void arena_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss,
579 ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive,
580 size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
581 malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats);
582 unsigned arena_nthreads_get(arena_t *arena);
583 void arena_nthreads_inc(arena_t *arena);
584 void arena_nthreads_dec(arena_t *arena);
585 arena_t *arena_new(unsigned ind);
586 bool arena_boot(void);
587 void arena_prefork(arena_t *arena);
588 void arena_postfork_parent(arena_t *arena);
589 void arena_postfork_child(arena_t *arena);
591 #endif /* JEMALLOC_H_EXTERNS */
592 /******************************************************************************/
593 #ifdef JEMALLOC_H_INLINES
595 #ifndef JEMALLOC_ENABLE_INLINE
596 arena_chunk_map_bits_t *arena_bitselm_get(arena_chunk_t *chunk,
598 arena_chunk_map_misc_t *arena_miscelm_get(arena_chunk_t *chunk,
600 size_t arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm);
601 void *arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm);
602 arena_chunk_map_misc_t *arena_rd_to_miscelm(arena_runs_dirty_link_t *rd);
603 arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run);
604 size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
605 size_t arena_mapbitsp_read(size_t *mapbitsp);
606 size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
607 size_t arena_mapbits_size_decode(size_t mapbits);
608 size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
610 size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
611 size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
612 szind_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
613 size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
614 size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
615 size_t arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind);
616 size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
617 size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
618 void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
619 size_t arena_mapbits_size_encode(size_t size);
620 void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
621 size_t size, size_t flags);
622 void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
624 void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind,
626 void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
627 size_t size, size_t flags);
628 void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
630 void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
631 size_t runind, szind_t binind, size_t flags);
632 void arena_metadata_allocated_add(arena_t *arena, size_t size);
633 void arena_metadata_allocated_sub(arena_t *arena, size_t size);
634 size_t arena_metadata_allocated_get(arena_t *arena);
635 bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
636 bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
637 bool arena_prof_accum(arena_t *arena, uint64_t accumbytes);
638 szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
639 szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
640 size_t arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
642 prof_tctx_t *arena_prof_tctx_get(const void *ptr);
643 void arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
644 void arena_prof_tctx_reset(const void *ptr, size_t usize,
645 const void *old_ptr, prof_tctx_t *old_tctx);
646 void arena_decay_ticks(tsd_t *tsd, arena_t *arena, unsigned nticks);
647 void arena_decay_tick(tsd_t *tsd, arena_t *arena);
648 void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
649 bool zero, tcache_t *tcache, bool slow_path);
650 arena_t *arena_aalloc(const void *ptr);
651 size_t arena_salloc(const void *ptr, bool demote);
652 void arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path);
653 void arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
656 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
657 # ifdef JEMALLOC_ARENA_INLINE_A
658 JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t *
659 arena_bitselm_get(arena_chunk_t *chunk, size_t pageind)
662 assert(pageind >= map_bias);
663 assert(pageind < chunk_npages);
665 return (&chunk->map_bits[pageind-map_bias]);
668 JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
669 arena_miscelm_get(arena_chunk_t *chunk, size_t pageind)
672 assert(pageind >= map_bias);
673 assert(pageind < chunk_npages);
675 return ((arena_chunk_map_misc_t *)((uintptr_t)chunk +
676 (uintptr_t)map_misc_offset) + pageind-map_bias);
679 JEMALLOC_ALWAYS_INLINE size_t
680 arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm)
682 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
683 size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk +
684 map_misc_offset)) / sizeof(arena_chunk_map_misc_t) + map_bias;
686 assert(pageind >= map_bias);
687 assert(pageind < chunk_npages);
692 JEMALLOC_ALWAYS_INLINE void *
693 arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm)
695 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
696 size_t pageind = arena_miscelm_to_pageind(miscelm);
698 return ((void *)((uintptr_t)chunk + (pageind << LG_PAGE)));
701 JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
702 arena_rd_to_miscelm(arena_runs_dirty_link_t *rd)
704 arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
705 *)((uintptr_t)rd - offsetof(arena_chunk_map_misc_t, rd));
707 assert(arena_miscelm_to_pageind(miscelm) >= map_bias);
708 assert(arena_miscelm_to_pageind(miscelm) < chunk_npages);
713 JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
714 arena_run_to_miscelm(arena_run_t *run)
716 arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
717 *)((uintptr_t)run - offsetof(arena_chunk_map_misc_t, run));
719 assert(arena_miscelm_to_pageind(miscelm) >= map_bias);
720 assert(arena_miscelm_to_pageind(miscelm) < chunk_npages);
725 JEMALLOC_ALWAYS_INLINE size_t *
726 arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
729 return (&arena_bitselm_get(chunk, pageind)->bits);
732 JEMALLOC_ALWAYS_INLINE size_t
733 arena_mapbitsp_read(size_t *mapbitsp)
739 JEMALLOC_ALWAYS_INLINE size_t
740 arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
743 return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind)));
746 JEMALLOC_ALWAYS_INLINE size_t
747 arena_mapbits_size_decode(size_t mapbits)
751 #if CHUNK_MAP_SIZE_SHIFT > 0
752 size = (mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT;
753 #elif CHUNK_MAP_SIZE_SHIFT == 0
754 size = mapbits & CHUNK_MAP_SIZE_MASK;
756 size = (mapbits & CHUNK_MAP_SIZE_MASK) << -CHUNK_MAP_SIZE_SHIFT;
762 JEMALLOC_ALWAYS_INLINE size_t
763 arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
767 mapbits = arena_mapbits_get(chunk, pageind);
768 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
769 return (arena_mapbits_size_decode(mapbits));
772 JEMALLOC_ALWAYS_INLINE size_t
773 arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
777 mapbits = arena_mapbits_get(chunk, pageind);
778 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
779 (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
780 return (arena_mapbits_size_decode(mapbits));
783 JEMALLOC_ALWAYS_INLINE size_t
784 arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
788 mapbits = arena_mapbits_get(chunk, pageind);
789 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
790 CHUNK_MAP_ALLOCATED);
791 return (mapbits >> CHUNK_MAP_RUNIND_SHIFT);
794 JEMALLOC_ALWAYS_INLINE szind_t
795 arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
800 mapbits = arena_mapbits_get(chunk, pageind);
801 binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
802 assert(binind < NBINS || binind == BININD_INVALID);
806 JEMALLOC_ALWAYS_INLINE size_t
807 arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
811 mapbits = arena_mapbits_get(chunk, pageind);
812 assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
813 (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
814 return (mapbits & CHUNK_MAP_DIRTY);
817 JEMALLOC_ALWAYS_INLINE size_t
818 arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
822 mapbits = arena_mapbits_get(chunk, pageind);
823 assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
824 (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
825 return (mapbits & CHUNK_MAP_UNZEROED);
828 JEMALLOC_ALWAYS_INLINE size_t
829 arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind)
833 mapbits = arena_mapbits_get(chunk, pageind);
834 assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
835 (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
836 return (mapbits & CHUNK_MAP_DECOMMITTED);
839 JEMALLOC_ALWAYS_INLINE size_t
840 arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
844 mapbits = arena_mapbits_get(chunk, pageind);
845 return (mapbits & CHUNK_MAP_LARGE);
848 JEMALLOC_ALWAYS_INLINE size_t
849 arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
853 mapbits = arena_mapbits_get(chunk, pageind);
854 return (mapbits & CHUNK_MAP_ALLOCATED);
857 JEMALLOC_ALWAYS_INLINE void
858 arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits)
864 JEMALLOC_ALWAYS_INLINE size_t
865 arena_mapbits_size_encode(size_t size)
869 #if CHUNK_MAP_SIZE_SHIFT > 0
870 mapbits = size << CHUNK_MAP_SIZE_SHIFT;
871 #elif CHUNK_MAP_SIZE_SHIFT == 0
874 mapbits = size >> -CHUNK_MAP_SIZE_SHIFT;
877 assert((mapbits & ~CHUNK_MAP_SIZE_MASK) == 0);
881 JEMALLOC_ALWAYS_INLINE void
882 arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
885 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
887 assert((size & PAGE_MASK) == 0);
888 assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
889 assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
890 (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
891 arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
892 CHUNK_MAP_BININD_INVALID | flags);
895 JEMALLOC_ALWAYS_INLINE void
896 arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
899 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
900 size_t mapbits = arena_mapbitsp_read(mapbitsp);
902 assert((size & PAGE_MASK) == 0);
903 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
904 arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
905 (mapbits & ~CHUNK_MAP_SIZE_MASK));
908 JEMALLOC_ALWAYS_INLINE void
909 arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags)
911 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
913 assert((flags & CHUNK_MAP_UNZEROED) == flags);
914 arena_mapbitsp_write(mapbitsp, flags);
917 JEMALLOC_ALWAYS_INLINE void
918 arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
921 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
923 assert((size & PAGE_MASK) == 0);
924 assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
925 assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
926 (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
927 arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
928 CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE |
929 CHUNK_MAP_ALLOCATED);
932 JEMALLOC_ALWAYS_INLINE void
933 arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
936 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
937 size_t mapbits = arena_mapbitsp_read(mapbitsp);
939 assert(binind <= BININD_INVALID);
940 assert(arena_mapbits_large_size_get(chunk, pageind) == LARGE_MINCLASS +
942 arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) |
943 (binind << CHUNK_MAP_BININD_SHIFT));
946 JEMALLOC_ALWAYS_INLINE void
947 arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
948 szind_t binind, size_t flags)
950 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
952 assert(binind < BININD_INVALID);
953 assert(pageind - runind >= map_bias);
954 assert((flags & CHUNK_MAP_UNZEROED) == flags);
955 arena_mapbitsp_write(mapbitsp, (runind << CHUNK_MAP_RUNIND_SHIFT) |
956 (binind << CHUNK_MAP_BININD_SHIFT) | flags | CHUNK_MAP_ALLOCATED);
960 arena_metadata_allocated_add(arena_t *arena, size_t size)
963 atomic_add_z(&arena->stats.metadata_allocated, size);
967 arena_metadata_allocated_sub(arena_t *arena, size_t size)
970 atomic_sub_z(&arena->stats.metadata_allocated, size);
973 JEMALLOC_INLINE size_t
974 arena_metadata_allocated_get(arena_t *arena)
977 return (atomic_read_z(&arena->stats.metadata_allocated));
981 arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
984 cassert(config_prof);
985 assert(prof_interval != 0);
987 arena->prof_accumbytes += accumbytes;
988 if (arena->prof_accumbytes >= prof_interval) {
989 arena->prof_accumbytes -= prof_interval;
996 arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
999 cassert(config_prof);
1001 if (likely(prof_interval == 0))
1003 return (arena_prof_accum_impl(arena, accumbytes));
1006 JEMALLOC_INLINE bool
1007 arena_prof_accum(arena_t *arena, uint64_t accumbytes)
1010 cassert(config_prof);
1012 if (likely(prof_interval == 0))
1018 malloc_mutex_lock(&arena->lock);
1019 ret = arena_prof_accum_impl(arena, accumbytes);
1020 malloc_mutex_unlock(&arena->lock);
1025 JEMALLOC_ALWAYS_INLINE szind_t
1026 arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
1030 binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
1033 arena_chunk_t *chunk;
1036 size_t actual_mapbits;
1040 szind_t run_binind, actual_binind;
1041 arena_bin_info_t *bin_info;
1042 arena_chunk_map_misc_t *miscelm;
1045 assert(binind != BININD_INVALID);
1046 assert(binind < NBINS);
1047 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1048 arena = extent_node_arena_get(&chunk->node);
1049 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1050 actual_mapbits = arena_mapbits_get(chunk, pageind);
1051 assert(mapbits == actual_mapbits);
1052 assert(arena_mapbits_large_get(chunk, pageind) == 0);
1053 assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
1054 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk,
1056 miscelm = arena_miscelm_get(chunk, rpages_ind);
1057 run = &miscelm->run;
1058 run_binind = run->binind;
1059 bin = &arena->bins[run_binind];
1060 actual_binind = (szind_t)(bin - arena->bins);
1061 assert(run_binind == actual_binind);
1062 bin_info = &arena_bin_info[actual_binind];
1063 rpages = arena_miscelm_to_rpages(miscelm);
1064 assert(((uintptr_t)ptr - ((uintptr_t)rpages +
1065 (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
1071 # endif /* JEMALLOC_ARENA_INLINE_A */
1073 # ifdef JEMALLOC_ARENA_INLINE_B
1074 JEMALLOC_INLINE szind_t
1075 arena_bin_index(arena_t *arena, arena_bin_t *bin)
1077 szind_t binind = (szind_t)(bin - arena->bins);
1078 assert(binind < NBINS);
1082 JEMALLOC_INLINE size_t
1083 arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
1085 size_t diff, interval, shift, regind;
1086 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
1087 void *rpages = arena_miscelm_to_rpages(miscelm);
1090 * Freeing a pointer lower than region zero can cause assertion
1093 assert((uintptr_t)ptr >= (uintptr_t)rpages +
1094 (uintptr_t)bin_info->reg0_offset);
1097 * Avoid doing division with a variable divisor if possible. Using
1098 * actual division here can reduce allocator throughput by over 20%!
1100 diff = (size_t)((uintptr_t)ptr - (uintptr_t)rpages -
1101 bin_info->reg0_offset);
1103 /* Rescale (factor powers of 2 out of the numerator and denominator). */
1104 interval = bin_info->reg_interval;
1105 shift = ffs_zu(interval) - 1;
1109 if (interval == 1) {
1110 /* The divisor was a power of 2. */
1114 * To divide by a number D that is not a power of two we
1115 * multiply by (2^21 / D) and then right shift by 21 positions.
1121 * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT
1123 * We can omit the first three elements, because we never
1124 * divide by 0, and 1 and 2 are both powers of two, which are
1127 #define SIZE_INV_SHIFT ((sizeof(size_t) << 3) - LG_RUN_MAXREGS)
1128 #define SIZE_INV(s) (((ZU(1) << SIZE_INV_SHIFT) / (s)) + 1)
1129 static const size_t interval_invs[] = {
1131 SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
1132 SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
1133 SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
1134 SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
1135 SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
1136 SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
1137 SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
1140 if (likely(interval <= ((sizeof(interval_invs) / sizeof(size_t))
1142 regind = (diff * interval_invs[interval - 3]) >>
1145 regind = diff / interval;
1147 #undef SIZE_INV_SHIFT
1149 assert(diff == regind * interval);
1150 assert(regind < bin_info->nregs);
1155 JEMALLOC_INLINE prof_tctx_t *
1156 arena_prof_tctx_get(const void *ptr)
1159 arena_chunk_t *chunk;
1161 cassert(config_prof);
1162 assert(ptr != NULL);
1164 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1165 if (likely(chunk != ptr)) {
1166 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1167 size_t mapbits = arena_mapbits_get(chunk, pageind);
1168 assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
1169 if (likely((mapbits & CHUNK_MAP_LARGE) == 0))
1170 ret = (prof_tctx_t *)(uintptr_t)1U;
1172 arena_chunk_map_misc_t *elm = arena_miscelm_get(chunk,
1174 ret = atomic_read_p(&elm->prof_tctx_pun);
1177 ret = huge_prof_tctx_get(ptr);
1182 JEMALLOC_INLINE void
1183 arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
1185 arena_chunk_t *chunk;
1187 cassert(config_prof);
1188 assert(ptr != NULL);
1190 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1191 if (likely(chunk != ptr)) {
1192 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1194 assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
1196 if (unlikely(usize > SMALL_MAXCLASS || (uintptr_t)tctx >
1198 arena_chunk_map_misc_t *elm;
1200 assert(arena_mapbits_large_get(chunk, pageind) != 0);
1202 elm = arena_miscelm_get(chunk, pageind);
1203 atomic_write_p(&elm->prof_tctx_pun, tctx);
1206 * tctx must always be initialized for large runs.
1207 * Assert that the surrounding conditional logic is
1208 * equivalent to checking whether ptr refers to a large
1211 assert(arena_mapbits_large_get(chunk, pageind) == 0);
1214 huge_prof_tctx_set(ptr, tctx);
1217 JEMALLOC_INLINE void
1218 arena_prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
1219 prof_tctx_t *old_tctx)
1222 cassert(config_prof);
1223 assert(ptr != NULL);
1225 if (unlikely(usize > SMALL_MAXCLASS || (ptr == old_ptr &&
1226 (uintptr_t)old_tctx > (uintptr_t)1U))) {
1227 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1228 if (likely(chunk != ptr)) {
1230 arena_chunk_map_misc_t *elm;
1232 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
1234 assert(arena_mapbits_allocated_get(chunk, pageind) !=
1236 assert(arena_mapbits_large_get(chunk, pageind) != 0);
1238 elm = arena_miscelm_get(chunk, pageind);
1239 atomic_write_p(&elm->prof_tctx_pun,
1240 (prof_tctx_t *)(uintptr_t)1U);
1242 huge_prof_tctx_reset(ptr);
1246 JEMALLOC_ALWAYS_INLINE void
1247 arena_decay_ticks(tsd_t *tsd, arena_t *arena, unsigned nticks)
1249 ticker_t *decay_ticker;
1251 if (unlikely(tsd == NULL))
1253 decay_ticker = decay_ticker_get(tsd, arena->ind);
1254 if (unlikely(decay_ticker == NULL))
1256 if (unlikely(ticker_ticks(decay_ticker, nticks)))
1257 arena_purge(arena, false);
1260 JEMALLOC_ALWAYS_INLINE void
1261 arena_decay_tick(tsd_t *tsd, arena_t *arena)
1264 arena_decay_ticks(tsd, arena, 1);
1267 JEMALLOC_ALWAYS_INLINE void *
1268 arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind, bool zero,
1269 tcache_t *tcache, bool slow_path)
1274 if (likely(tcache != NULL)) {
1275 if (likely(size <= SMALL_MAXCLASS)) {
1276 return (tcache_alloc_small(tsd, arena, tcache, size,
1277 ind, zero, slow_path));
1279 if (likely(size <= tcache_maxclass)) {
1280 return (tcache_alloc_large(tsd, arena, tcache, size,
1281 ind, zero, slow_path));
1283 /* (size > tcache_maxclass) case falls through. */
1284 assert(size > tcache_maxclass);
1287 return (arena_malloc_hard(tsd, arena, size, ind, zero, tcache));
1290 JEMALLOC_ALWAYS_INLINE arena_t *
1291 arena_aalloc(const void *ptr)
1293 arena_chunk_t *chunk;
1295 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1296 if (likely(chunk != ptr))
1297 return (extent_node_arena_get(&chunk->node));
1299 return (huge_aalloc(ptr));
1302 /* Return the size of the allocation pointed to by ptr. */
1303 JEMALLOC_ALWAYS_INLINE size_t
1304 arena_salloc(const void *ptr, bool demote)
1307 arena_chunk_t *chunk;
1311 assert(ptr != NULL);
1313 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1314 if (likely(chunk != ptr)) {
1315 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1316 assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
1317 binind = arena_mapbits_binind_get(chunk, pageind);
1318 if (unlikely(binind == BININD_INVALID || (config_prof && !demote
1319 && arena_mapbits_large_get(chunk, pageind) != 0))) {
1321 * Large allocation. In the common case (demote), and
1322 * as this is an inline function, most callers will only
1323 * end up looking at binind to determine that ptr is a
1326 assert(config_cache_oblivious || ((uintptr_t)ptr &
1328 ret = arena_mapbits_large_size_get(chunk, pageind) -
1331 assert(pageind + ((ret+large_pad)>>LG_PAGE) <=
1333 assert(arena_mapbits_dirty_get(chunk, pageind) ==
1334 arena_mapbits_dirty_get(chunk,
1335 pageind+((ret+large_pad)>>LG_PAGE)-1));
1338 * Small allocation (possibly promoted to a large
1341 assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
1342 arena_ptr_small_binind_get(ptr,
1343 arena_mapbits_get(chunk, pageind)) == binind);
1344 ret = index2size(binind);
1347 ret = huge_salloc(ptr);
1352 JEMALLOC_ALWAYS_INLINE void
1353 arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
1355 arena_chunk_t *chunk;
1356 size_t pageind, mapbits;
1358 assert(ptr != NULL);
1360 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1361 if (likely(chunk != ptr)) {
1362 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1363 mapbits = arena_mapbits_get(chunk, pageind);
1364 assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
1365 if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) {
1366 /* Small allocation. */
1367 if (likely(tcache != NULL)) {
1368 szind_t binind = arena_ptr_small_binind_get(ptr,
1370 tcache_dalloc_small(tsd, tcache, ptr, binind,
1373 arena_dalloc_small(tsd, extent_node_arena_get(
1374 &chunk->node), chunk, ptr, pageind);
1377 size_t size = arena_mapbits_large_size_get(chunk,
1380 assert(config_cache_oblivious || ((uintptr_t)ptr &
1383 if (likely(tcache != NULL) && size - large_pad <=
1385 tcache_dalloc_large(tsd, tcache, ptr, size -
1386 large_pad, slow_path);
1388 arena_dalloc_large(tsd, extent_node_arena_get(
1389 &chunk->node), chunk, ptr);
1393 huge_dalloc(tsd, ptr, tcache);
1396 JEMALLOC_ALWAYS_INLINE void
1397 arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
1399 arena_chunk_t *chunk;
1401 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1402 if (likely(chunk != ptr)) {
1403 if (config_prof && opt_prof) {
1404 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
1406 assert(arena_mapbits_allocated_get(chunk, pageind) !=
1408 if (arena_mapbits_large_get(chunk, pageind) != 0) {
1410 * Make sure to use promoted size, not request
1413 size = arena_mapbits_large_size_get(chunk,
1414 pageind) - large_pad;
1417 assert(s2u(size) == s2u(arena_salloc(ptr, false)));
1419 if (likely(size <= SMALL_MAXCLASS)) {
1420 /* Small allocation. */
1421 if (likely(tcache != NULL)) {
1422 szind_t binind = size2index(size);
1423 tcache_dalloc_small(tsd, tcache, ptr, binind,
1426 size_t pageind = ((uintptr_t)ptr -
1427 (uintptr_t)chunk) >> LG_PAGE;
1428 arena_dalloc_small(tsd, extent_node_arena_get(
1429 &chunk->node), chunk, ptr, pageind);
1432 assert(config_cache_oblivious || ((uintptr_t)ptr &
1435 if (likely(tcache != NULL) && size <= tcache_maxclass) {
1436 tcache_dalloc_large(tsd, tcache, ptr, size,
1439 arena_dalloc_large(tsd, extent_node_arena_get(
1440 &chunk->node), chunk, ptr);
1444 huge_dalloc(tsd, ptr, tcache);
1446 # endif /* JEMALLOC_ARENA_INLINE_B */
1449 #endif /* JEMALLOC_H_INLINES */
1450 /******************************************************************************/