1 #define JEMALLOC_BASE_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/extent_mmap.h"
7 #include "jemalloc/internal/mutex.h"
8 #include "jemalloc/internal/sz.h"
10 /******************************************************************************/
15 /******************************************************************************/
18 base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) {
23 assert(size == HUGEPAGE_CEILING(size));
25 if (extent_hooks == &extent_hooks_default) {
26 addr = extent_alloc_mmap(NULL, size, PAGE, &zero, &commit);
28 /* No arena context as we are creating new arenas. */
29 tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
30 pre_reentrancy(tsd, NULL);
31 addr = extent_hooks->alloc(extent_hooks, NULL, size, PAGE,
40 base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
43 * Cascade through dalloc, decommit, purge_forced, and purge_lazy,
44 * stopping at first success. This cascade is performed for consistency
45 * with the cascade in extent_dalloc_wrapper() because an application's
46 * custom hooks may not support e.g. dalloc. This function is only ever
47 * called as a side effect of arena destruction, so although it might
48 * seem pointless to do anything besides dalloc here, the application
49 * may in fact want the end state of all associated virtual memory to be
50 * in some consistent-but-allocated state.
52 if (extent_hooks == &extent_hooks_default) {
53 if (!extent_dalloc_mmap(addr, size)) {
56 if (!pages_decommit(addr, size)) {
59 if (!pages_purge_forced(addr, size)) {
62 if (!pages_purge_lazy(addr, size)) {
65 /* Nothing worked. This should never happen. */
68 tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
69 pre_reentrancy(tsd, NULL);
70 if (extent_hooks->dalloc != NULL &&
71 !extent_hooks->dalloc(extent_hooks, addr, size, true,
75 if (extent_hooks->decommit != NULL &&
76 !extent_hooks->decommit(extent_hooks, addr, size, 0, size,
80 if (extent_hooks->purge_forced != NULL &&
81 !extent_hooks->purge_forced(extent_hooks, addr, size, 0,
85 if (extent_hooks->purge_lazy != NULL &&
86 !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size,
90 /* Nothing worked. That's the application's problem. */
98 base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
102 sn = *extent_sn_next;
105 extent_binit(extent, addr, size, sn);
109 base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
113 assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
114 assert(size == ALIGNMENT_CEILING(size, alignment));
116 *gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
117 alignment) - (uintptr_t)extent_addr_get(extent);
118 ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
119 assert(extent_bsize_get(extent) >= *gap_size + size);
120 extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) +
121 *gap_size + size), extent_bsize_get(extent) - *gap_size - size,
122 extent_sn_get(extent));
127 base_extent_bump_alloc_post(tsdn_t *tsdn, base_t *base, extent_t *extent,
128 size_t gap_size, void *addr, size_t size) {
129 if (extent_bsize_get(extent) > 0) {
131 * Compute the index for the largest size class that does not
132 * exceed extent's size.
134 szind_t index_floor =
135 sz_size2index(extent_bsize_get(extent) + 1) - 1;
136 extent_heap_insert(&base->avail[index_floor], extent);
140 base->allocated += size;
142 * Add one PAGE to base_resident for every page boundary that is
143 * crossed by the new allocation.
145 base->resident += PAGE_CEILING((uintptr_t)addr + size) -
146 PAGE_CEILING((uintptr_t)addr - gap_size);
147 assert(base->allocated <= base->resident);
148 assert(base->resident <= base->mapped);
153 base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, extent_t *extent,
154 size_t size, size_t alignment) {
158 ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
159 base_extent_bump_alloc_post(tsdn, base, extent, gap_size, ret, size);
164 * Allocate a block of virtual memory that is large enough to start with a
165 * base_block_t header, followed by an object of specified size and alignment.
166 * On success a pointer to the initialized base_block_t header is returned.
168 static base_block_t *
169 base_block_alloc(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind,
170 pszind_t *pind_last, size_t *extent_sn_next, size_t size,
172 alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
173 size_t usize = ALIGNMENT_CEILING(size, alignment);
174 size_t header_size = sizeof(base_block_t);
175 size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) -
178 * Create increasingly larger blocks in order to limit the total number
179 * of disjoint virtual memory ranges. Choose the next size in the page
180 * size class series (skipping size classes that are not a multiple of
181 * HUGEPAGE), or a size large enough to satisfy the requested size and
182 * alignment, whichever is larger.
184 size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size
186 pszind_t pind_next = (*pind_last + 1 < NPSIZES) ? *pind_last + 1 :
188 size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
189 size_t block_size = (min_block_size > next_block_size) ? min_block_size
191 base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind,
196 *pind_last = sz_psz2ind(block_size);
197 block->size = block_size;
199 assert(block_size >= header_size);
200 base_extent_init(extent_sn_next, &block->extent,
201 (void *)((uintptr_t)block + header_size), block_size - header_size);
206 * Allocate an extent that is at least as large as specified size, with
207 * specified alignment.
210 base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
211 malloc_mutex_assert_owner(tsdn, &base->mtx);
213 extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
215 * Drop mutex during base_block_alloc(), because an extent hook will be
218 malloc_mutex_unlock(tsdn, &base->mtx);
219 base_block_t *block = base_block_alloc(tsdn, extent_hooks,
220 base_ind_get(base), &base->pind_last, &base->extent_sn_next, size,
222 malloc_mutex_lock(tsdn, &base->mtx);
226 block->next = base->blocks;
227 base->blocks = block;
229 base->allocated += sizeof(base_block_t);
230 base->resident += PAGE_CEILING(sizeof(base_block_t));
231 base->mapped += block->size;
232 assert(base->allocated <= base->resident);
233 assert(base->resident <= base->mapped);
235 return &block->extent;
244 base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
245 pszind_t pind_last = 0;
246 size_t extent_sn_next = 0;
247 base_block_t *block = base_block_alloc(tsdn, extent_hooks, ind,
248 &pind_last, &extent_sn_next, sizeof(base_t), QUANTUM);
254 size_t base_alignment = CACHELINE;
255 size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
256 base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent,
257 &gap_size, base_size, base_alignment);
259 atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED);
260 if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE,
261 malloc_mutex_rank_exclusive)) {
262 base_unmap(tsdn, extent_hooks, ind, block, block->size);
265 base->pind_last = pind_last;
266 base->extent_sn_next = extent_sn_next;
267 base->blocks = block;
268 for (szind_t i = 0; i < NSIZES; i++) {
269 extent_heap_new(&base->avail[i]);
272 base->allocated = sizeof(base_block_t);
273 base->resident = PAGE_CEILING(sizeof(base_block_t));
274 base->mapped = block->size;
275 assert(base->allocated <= base->resident);
276 assert(base->resident <= base->mapped);
278 base_extent_bump_alloc_post(tsdn, base, &block->extent, gap_size, base,
285 base_delete(tsdn_t *tsdn, base_t *base) {
286 extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
287 base_block_t *next = base->blocks;
289 base_block_t *block = next;
291 base_unmap(tsdn, extent_hooks, base_ind_get(base), block,
293 } while (next != NULL);
297 base_extent_hooks_get(base_t *base) {
298 return (extent_hooks_t *)atomic_load_p(&base->extent_hooks,
303 base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
304 extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base);
305 atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE);
306 return old_extent_hooks;
310 base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
312 alignment = QUANTUM_CEILING(alignment);
313 size_t usize = ALIGNMENT_CEILING(size, alignment);
314 size_t asize = usize + alignment - QUANTUM;
316 extent_t *extent = NULL;
317 malloc_mutex_lock(tsdn, &base->mtx);
318 for (szind_t i = sz_size2index(asize); i < NSIZES; i++) {
319 extent = extent_heap_remove_first(&base->avail[i]);
320 if (extent != NULL) {
321 /* Use existing space. */
325 if (extent == NULL) {
326 /* Try to allocate more space. */
327 extent = base_extent_alloc(tsdn, base, usize, alignment);
330 if (extent == NULL) {
335 ret = base_extent_bump_alloc(tsdn, base, extent, usize, alignment);
337 *esn = extent_sn_get(extent);
340 malloc_mutex_unlock(tsdn, &base->mtx);
345 * base_alloc() returns zeroed memory, which is always demand-zeroed for the
346 * auto arenas, in order to make multi-page sparse data structures such as radix
347 * tree nodes efficient with respect to physical memory usage. Upon success a
348 * pointer to at least size bytes with specified alignment is returned. Note
349 * that size is rounded up to the nearest multiple of alignment to avoid false
353 base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
354 return base_alloc_impl(tsdn, base, size, alignment, NULL);
358 base_alloc_extent(tsdn_t *tsdn, base_t *base) {
360 extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t),
362 if (extent == NULL) {
365 extent_esn_set(extent, esn);
370 base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
372 cassert(config_stats);
374 malloc_mutex_lock(tsdn, &base->mtx);
375 assert(base->allocated <= base->resident);
376 assert(base->resident <= base->mapped);
377 *allocated = base->allocated;
378 *resident = base->resident;
379 *mapped = base->mapped;
380 malloc_mutex_unlock(tsdn, &base->mtx);
384 base_prefork(tsdn_t *tsdn, base_t *base) {
385 malloc_mutex_prefork(tsdn, &base->mtx);
389 base_postfork_parent(tsdn_t *tsdn, base_t *base) {
390 malloc_mutex_postfork_parent(tsdn, &base->mtx);
394 base_postfork_child(tsdn_t *tsdn, base_t *base) {
395 malloc_mutex_postfork_child(tsdn, &base->mtx);
399 base_boot(tsdn_t *tsdn) {
400 b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default);