1 #define JEMALLOC_BASE_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/extent_mmap.h"
7 #include "jemalloc/internal/mutex.h"
8 #include "jemalloc/internal/sz.h"
10 /******************************************************************************/
15 metadata_thp_mode_t opt_metadata_thp = METADATA_THP_DEFAULT;
17 const char *metadata_thp_mode_names[] = {
23 /******************************************************************************/
26 metadata_thp_madvise(void) {
27 return (metadata_thp_enabled() &&
28 (init_system_thp_mode == thp_mode_default));
32 base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) {
37 /* Use huge page sizes and alignment regardless of opt_metadata_thp. */
38 assert(size == HUGEPAGE_CEILING(size));
39 size_t alignment = HUGEPAGE;
40 if (extent_hooks == &extent_hooks_default) {
41 addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit);
43 /* No arena context as we are creating new arenas. */
44 tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
45 pre_reentrancy(tsd, NULL);
46 addr = extent_hooks->alloc(extent_hooks, NULL, size, alignment,
55 base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
58 * Cascade through dalloc, decommit, purge_forced, and purge_lazy,
59 * stopping at first success. This cascade is performed for consistency
60 * with the cascade in extent_dalloc_wrapper() because an application's
61 * custom hooks may not support e.g. dalloc. This function is only ever
62 * called as a side effect of arena destruction, so although it might
63 * seem pointless to do anything besides dalloc here, the application
64 * may in fact want the end state of all associated virtual memory to be
65 * in some consistent-but-allocated state.
67 if (extent_hooks == &extent_hooks_default) {
68 if (!extent_dalloc_mmap(addr, size)) {
71 if (!pages_decommit(addr, size)) {
74 if (!pages_purge_forced(addr, size)) {
77 if (!pages_purge_lazy(addr, size)) {
80 /* Nothing worked. This should never happen. */
83 tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
84 pre_reentrancy(tsd, NULL);
85 if (extent_hooks->dalloc != NULL &&
86 !extent_hooks->dalloc(extent_hooks, addr, size, true,
88 goto label_post_reentrancy;
90 if (extent_hooks->decommit != NULL &&
91 !extent_hooks->decommit(extent_hooks, addr, size, 0, size,
93 goto label_post_reentrancy;
95 if (extent_hooks->purge_forced != NULL &&
96 !extent_hooks->purge_forced(extent_hooks, addr, size, 0,
98 goto label_post_reentrancy;
100 if (extent_hooks->purge_lazy != NULL &&
101 !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size,
103 goto label_post_reentrancy;
105 /* Nothing worked. That's the application's problem. */
106 label_post_reentrancy:
107 post_reentrancy(tsd);
110 if (metadata_thp_madvise()) {
111 /* Set NOHUGEPAGE after unmap to avoid kernel defrag. */
112 assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
113 (size & HUGEPAGE_MASK) == 0);
114 pages_nohuge(addr, size);
119 base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
123 sn = *extent_sn_next;
126 extent_binit(extent, addr, size, sn);
130 base_get_num_blocks(base_t *base, bool with_new_block) {
131 base_block_t *b = base->blocks;
134 size_t n_blocks = with_new_block ? 2 : 1;
135 while (b->next != NULL) {
144 base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
145 assert(opt_metadata_thp == metadata_thp_auto);
146 malloc_mutex_assert_owner(tsdn, &base->mtx);
147 if (base->auto_thp_switched) {
150 /* Called when adding a new block. */
152 if (base_ind_get(base) != 0) {
153 should_switch = (base_get_num_blocks(base, true) ==
154 BASE_AUTO_THP_THRESHOLD);
156 should_switch = (base_get_num_blocks(base, true) ==
157 BASE_AUTO_THP_THRESHOLD_A0);
159 if (!should_switch) {
163 base->auto_thp_switched = true;
164 assert(!config_stats || base->n_thp == 0);
165 /* Make the initial blocks THP lazily. */
166 base_block_t *block = base->blocks;
167 while (block != NULL) {
168 assert((block->size & HUGEPAGE_MASK) == 0);
169 pages_huge(block, block->size);
171 base->n_thp += HUGEPAGE_CEILING(block->size -
172 extent_bsize_get(&block->extent)) >> LG_HUGEPAGE;
175 assert(block == NULL || (base_ind_get(base) == 0));
180 base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
184 assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
185 assert(size == ALIGNMENT_CEILING(size, alignment));
187 *gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
188 alignment) - (uintptr_t)extent_addr_get(extent);
189 ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
190 assert(extent_bsize_get(extent) >= *gap_size + size);
191 extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) +
192 *gap_size + size), extent_bsize_get(extent) - *gap_size - size,
193 extent_sn_get(extent));
198 base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
199 void *addr, size_t size) {
200 if (extent_bsize_get(extent) > 0) {
202 * Compute the index for the largest size class that does not
203 * exceed extent's size.
205 szind_t index_floor =
206 sz_size2index(extent_bsize_get(extent) + 1) - 1;
207 extent_heap_insert(&base->avail[index_floor], extent);
211 base->allocated += size;
213 * Add one PAGE to base_resident for every page boundary that is
214 * crossed by the new allocation. Adjust n_thp similarly when
215 * metadata_thp is enabled.
217 base->resident += PAGE_CEILING((uintptr_t)addr + size) -
218 PAGE_CEILING((uintptr_t)addr - gap_size);
219 assert(base->allocated <= base->resident);
220 assert(base->resident <= base->mapped);
221 if (metadata_thp_madvise() && (opt_metadata_thp ==
222 metadata_thp_always || base->auto_thp_switched)) {
223 base->n_thp += (HUGEPAGE_CEILING((uintptr_t)addr + size)
224 - HUGEPAGE_CEILING((uintptr_t)addr - gap_size)) >>
226 assert(base->mapped >= base->n_thp << LG_HUGEPAGE);
232 base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size,
237 ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
238 base_extent_bump_alloc_post(base, extent, gap_size, ret, size);
243 * Allocate a block of virtual memory that is large enough to start with a
244 * base_block_t header, followed by an object of specified size and alignment.
245 * On success a pointer to the initialized base_block_t header is returned.
247 static base_block_t *
248 base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
249 unsigned ind, pszind_t *pind_last, size_t *extent_sn_next, size_t size,
251 alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
252 size_t usize = ALIGNMENT_CEILING(size, alignment);
253 size_t header_size = sizeof(base_block_t);
254 size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) -
257 * Create increasingly larger blocks in order to limit the total number
258 * of disjoint virtual memory ranges. Choose the next size in the page
259 * size class series (skipping size classes that are not a multiple of
260 * HUGEPAGE), or a size large enough to satisfy the requested size and
261 * alignment, whichever is larger.
263 size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size
265 pszind_t pind_next = (*pind_last + 1 < NPSIZES) ? *pind_last + 1 :
267 size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
268 size_t block_size = (min_block_size > next_block_size) ? min_block_size
270 base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind,
276 if (metadata_thp_madvise()) {
277 void *addr = (void *)block;
278 assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
279 (block_size & HUGEPAGE_MASK) == 0);
280 if (opt_metadata_thp == metadata_thp_always) {
281 pages_huge(addr, block_size);
282 } else if (opt_metadata_thp == metadata_thp_auto &&
284 /* base != NULL indicates this is not a new base. */
285 malloc_mutex_lock(tsdn, &base->mtx);
286 base_auto_thp_switch(tsdn, base);
287 if (base->auto_thp_switched) {
288 pages_huge(addr, block_size);
290 malloc_mutex_unlock(tsdn, &base->mtx);
294 *pind_last = sz_psz2ind(block_size);
295 block->size = block_size;
297 assert(block_size >= header_size);
298 base_extent_init(extent_sn_next, &block->extent,
299 (void *)((uintptr_t)block + header_size), block_size - header_size);
304 * Allocate an extent that is at least as large as specified size, with
305 * specified alignment.
308 base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
309 malloc_mutex_assert_owner(tsdn, &base->mtx);
311 extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
313 * Drop mutex during base_block_alloc(), because an extent hook will be
316 malloc_mutex_unlock(tsdn, &base->mtx);
317 base_block_t *block = base_block_alloc(tsdn, base, extent_hooks,
318 base_ind_get(base), &base->pind_last, &base->extent_sn_next, size,
320 malloc_mutex_lock(tsdn, &base->mtx);
324 block->next = base->blocks;
325 base->blocks = block;
327 base->allocated += sizeof(base_block_t);
328 base->resident += PAGE_CEILING(sizeof(base_block_t));
329 base->mapped += block->size;
330 if (metadata_thp_madvise() &&
331 !(opt_metadata_thp == metadata_thp_auto
332 && !base->auto_thp_switched)) {
333 assert(base->n_thp > 0);
334 base->n_thp += HUGEPAGE_CEILING(sizeof(base_block_t)) >>
337 assert(base->allocated <= base->resident);
338 assert(base->resident <= base->mapped);
339 assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
341 return &block->extent;
350 base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
351 pszind_t pind_last = 0;
352 size_t extent_sn_next = 0;
353 base_block_t *block = base_block_alloc(tsdn, NULL, extent_hooks, ind,
354 &pind_last, &extent_sn_next, sizeof(base_t), QUANTUM);
360 size_t base_alignment = CACHELINE;
361 size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
362 base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent,
363 &gap_size, base_size, base_alignment);
365 atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED);
366 if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE,
367 malloc_mutex_rank_exclusive)) {
368 base_unmap(tsdn, extent_hooks, ind, block, block->size);
371 base->pind_last = pind_last;
372 base->extent_sn_next = extent_sn_next;
373 base->blocks = block;
374 base->auto_thp_switched = false;
375 for (szind_t i = 0; i < NSIZES; i++) {
376 extent_heap_new(&base->avail[i]);
379 base->allocated = sizeof(base_block_t);
380 base->resident = PAGE_CEILING(sizeof(base_block_t));
381 base->mapped = block->size;
382 base->n_thp = (opt_metadata_thp == metadata_thp_always) &&
383 metadata_thp_madvise() ? HUGEPAGE_CEILING(sizeof(base_block_t))
385 assert(base->allocated <= base->resident);
386 assert(base->resident <= base->mapped);
387 assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
389 base_extent_bump_alloc_post(base, &block->extent, gap_size, base,
396 base_delete(tsdn_t *tsdn, base_t *base) {
397 extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
398 base_block_t *next = base->blocks;
400 base_block_t *block = next;
402 base_unmap(tsdn, extent_hooks, base_ind_get(base), block,
404 } while (next != NULL);
408 base_extent_hooks_get(base_t *base) {
409 return (extent_hooks_t *)atomic_load_p(&base->extent_hooks,
414 base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
415 extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base);
416 atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE);
417 return old_extent_hooks;
421 base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
423 alignment = QUANTUM_CEILING(alignment);
424 size_t usize = ALIGNMENT_CEILING(size, alignment);
425 size_t asize = usize + alignment - QUANTUM;
427 extent_t *extent = NULL;
428 malloc_mutex_lock(tsdn, &base->mtx);
429 for (szind_t i = sz_size2index(asize); i < NSIZES; i++) {
430 extent = extent_heap_remove_first(&base->avail[i]);
431 if (extent != NULL) {
432 /* Use existing space. */
436 if (extent == NULL) {
437 /* Try to allocate more space. */
438 extent = base_extent_alloc(tsdn, base, usize, alignment);
441 if (extent == NULL) {
446 ret = base_extent_bump_alloc(base, extent, usize, alignment);
448 *esn = extent_sn_get(extent);
451 malloc_mutex_unlock(tsdn, &base->mtx);
456 * base_alloc() returns zeroed memory, which is always demand-zeroed for the
457 * auto arenas, in order to make multi-page sparse data structures such as radix
458 * tree nodes efficient with respect to physical memory usage. Upon success a
459 * pointer to at least size bytes with specified alignment is returned. Note
460 * that size is rounded up to the nearest multiple of alignment to avoid false
464 base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
465 return base_alloc_impl(tsdn, base, size, alignment, NULL);
469 base_alloc_extent(tsdn_t *tsdn, base_t *base) {
471 extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t),
473 if (extent == NULL) {
476 extent_esn_set(extent, esn);
481 base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
482 size_t *mapped, size_t *n_thp) {
483 cassert(config_stats);
485 malloc_mutex_lock(tsdn, &base->mtx);
486 assert(base->allocated <= base->resident);
487 assert(base->resident <= base->mapped);
488 *allocated = base->allocated;
489 *resident = base->resident;
490 *mapped = base->mapped;
491 *n_thp = base->n_thp;
492 malloc_mutex_unlock(tsdn, &base->mtx);
496 base_prefork(tsdn_t *tsdn, base_t *base) {
497 malloc_mutex_prefork(tsdn, &base->mtx);
501 base_postfork_parent(tsdn_t *tsdn, base_t *base) {
502 malloc_mutex_postfork_parent(tsdn, &base->mtx);
506 base_postfork_child(tsdn_t *tsdn, base_t *base) {
507 malloc_mutex_postfork_child(tsdn, &base->mtx);
511 base_boot(tsdn_t *tsdn) {
512 b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default);