1 #define JEMALLOC_CHUNK_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 const char *opt_dss = DSS_DEFAULT;
8 size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
10 malloc_mutex_t chunks_mtx;
11 chunk_stats_t stats_chunks;
14 * Trees of chunks that were previously allocated (trees differ only in node
15 * ordering). These are used when allocating chunks, in an attempt to re-use
16 * address space. Depending on function, different tree orderings are needed,
17 * which is why there are two trees with the same contents.
19 static extent_tree_t chunks_szad_mmap;
20 static extent_tree_t chunks_ad_mmap;
21 static extent_tree_t chunks_szad_dss;
22 static extent_tree_t chunks_ad_dss;
24 rtree_t *chunks_rtree;
26 /* Various chunk-related settings. */
28 size_t chunksize_mask; /* (chunksize - 1). */
31 size_t arena_maxclass; /* Max size class for arenas. */
33 /******************************************************************************/
34 /* Function prototypes for non-inline static functions. */
36 static void *chunk_recycle(extent_tree_t *chunks_szad,
37 extent_tree_t *chunks_ad, size_t size, size_t alignment, bool base,
39 static void chunk_record(extent_tree_t *chunks_szad,
40 extent_tree_t *chunks_ad, void *chunk, size_t size);
42 /******************************************************************************/
45 chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
46 size_t alignment, bool base, bool *zero)
51 size_t alloc_size, leadsize, trailsize;
56 * This function may need to call base_node_{,de}alloc(), but
57 * the current chunk allocation request is on behalf of the
58 * base allocator. Avoid deadlock (and if that weren't an
59 * issue, potential for infinite recursion) by returning NULL.
64 alloc_size = size + alignment - chunksize;
65 /* Beware size_t wrap-around. */
66 if (alloc_size < size)
69 key.size = alloc_size;
70 malloc_mutex_lock(&chunks_mtx);
71 node = extent_tree_szad_nsearch(chunks_szad, &key);
73 malloc_mutex_unlock(&chunks_mtx);
76 leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
77 (uintptr_t)node->addr;
78 assert(node->size >= leadsize + size);
79 trailsize = node->size - leadsize - size;
80 ret = (void *)((uintptr_t)node->addr + leadsize);
81 zeroed = node->zeroed;
84 /* Remove node from the tree. */
85 extent_tree_szad_remove(chunks_szad, node);
86 extent_tree_ad_remove(chunks_ad, node);
88 /* Insert the leading space as a smaller chunk. */
89 node->size = leadsize;
90 extent_tree_szad_insert(chunks_szad, node);
91 extent_tree_ad_insert(chunks_ad, node);
95 /* Insert the trailing space as a smaller chunk. */
98 * An additional node is required, but
99 * base_node_alloc() can cause a new base chunk to be
100 * allocated. Drop chunks_mtx in order to avoid
101 * deadlock, and if node allocation fails, deallocate
102 * the result before returning an error.
104 malloc_mutex_unlock(&chunks_mtx);
105 node = base_node_alloc();
107 chunk_dealloc(ret, size, true);
110 malloc_mutex_lock(&chunks_mtx);
112 node->addr = (void *)((uintptr_t)(ret) + size);
113 node->size = trailsize;
114 node->zeroed = zeroed;
115 extent_tree_szad_insert(chunks_szad, node);
116 extent_tree_ad_insert(chunks_ad, node);
119 malloc_mutex_unlock(&chunks_mtx);
122 base_node_dealloc(node);
125 memset(ret, 0, size);
126 else if (config_debug) {
128 size_t *p = (size_t *)(uintptr_t)ret;
130 VALGRIND_MAKE_MEM_DEFINED(ret, size);
131 for (i = 0; i < size / sizeof(size_t); i++)
139 * If the caller specifies (*zero == false), it is still possible to receive
140 * zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc()
141 * takes advantage of this to avoid demanding zeroed chunks, but taking
142 * advantage of them if they are returned.
145 chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
151 assert((size & chunksize_mask) == 0);
152 assert(alignment != 0);
153 assert((alignment & chunksize_mask) == 0);
156 if (config_dss && dss_prec == dss_prec_primary) {
157 if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
158 alignment, base, zero)) != NULL)
160 if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
164 if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size,
165 alignment, base, zero)) != NULL)
167 if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
169 /* "secondary" dss. */
170 if (config_dss && dss_prec == dss_prec_secondary) {
171 if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
172 alignment, base, zero)) != NULL)
174 if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
178 /* All strategies for allocation failed. */
182 if (config_ivsalloc && base == false) {
183 if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
184 chunk_dealloc(ret, size, true);
188 if (config_stats || config_prof) {
190 malloc_mutex_lock(&chunks_mtx);
192 stats_chunks.nchunks += (size / chunksize);
193 stats_chunks.curchunks += (size / chunksize);
194 if (stats_chunks.curchunks > stats_chunks.highchunks) {
195 stats_chunks.highchunks =
196 stats_chunks.curchunks;
199 } else if (config_prof)
201 malloc_mutex_unlock(&chunks_mtx);
202 if (config_prof && opt_prof && opt_prof_gdump && gdump)
206 VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
208 assert(CHUNK_ADDR2BASE(ret) == ret);
213 chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
217 extent_node_t *xnode, *node, *prev, *xprev, key;
219 unzeroed = pages_purge(chunk, size);
220 VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
223 * Allocate a node before acquiring chunks_mtx even though it might not
224 * be needed, because base_node_alloc() may cause a new base chunk to
225 * be allocated, which could cause deadlock if chunks_mtx were already
228 xnode = base_node_alloc();
229 /* Use xprev to implement conditional deferred deallocation of prev. */
232 malloc_mutex_lock(&chunks_mtx);
233 key.addr = (void *)((uintptr_t)chunk + size);
234 node = extent_tree_ad_nsearch(chunks_ad, &key);
235 /* Try to coalesce forward. */
236 if (node != NULL && node->addr == key.addr) {
238 * Coalesce chunk with the following address range. This does
239 * not change the position within chunks_ad, so only
240 * remove/insert from/into chunks_szad.
242 extent_tree_szad_remove(chunks_szad, node);
245 node->zeroed = (node->zeroed && (unzeroed == false));
246 extent_tree_szad_insert(chunks_szad, node);
248 /* Coalescing forward failed, so insert a new node. */
251 * base_node_alloc() failed, which is an exceedingly
252 * unlikely failure. Leak chunk; its pages have
253 * already been purged, so this is only a virtual
259 xnode = NULL; /* Prevent deallocation below. */
262 node->zeroed = (unzeroed == false);
263 extent_tree_ad_insert(chunks_ad, node);
264 extent_tree_szad_insert(chunks_szad, node);
267 /* Try to coalesce backward. */
268 prev = extent_tree_ad_prev(chunks_ad, node);
269 if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
272 * Coalesce chunk with the previous address range. This does
273 * not change the position within chunks_ad, so only
274 * remove/insert node from/into chunks_szad.
276 extent_tree_szad_remove(chunks_szad, prev);
277 extent_tree_ad_remove(chunks_ad, prev);
279 extent_tree_szad_remove(chunks_szad, node);
280 node->addr = prev->addr;
281 node->size += prev->size;
282 node->zeroed = (node->zeroed && prev->zeroed);
283 extent_tree_szad_insert(chunks_szad, node);
289 malloc_mutex_unlock(&chunks_mtx);
291 * Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
292 * avoid potential deadlock.
295 base_node_dealloc(xnode);
297 base_node_dealloc(prev);
301 chunk_unmap(void *chunk, size_t size)
303 assert(chunk != NULL);
304 assert(CHUNK_ADDR2BASE(chunk) == chunk);
306 assert((size & chunksize_mask) == 0);
308 if (config_dss && chunk_in_dss(chunk))
309 chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size);
310 else if (chunk_dealloc_mmap(chunk, size))
311 chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
315 chunk_dealloc(void *chunk, size_t size, bool unmap)
318 assert(chunk != NULL);
319 assert(CHUNK_ADDR2BASE(chunk) == chunk);
321 assert((size & chunksize_mask) == 0);
324 rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
325 if (config_stats || config_prof) {
326 malloc_mutex_lock(&chunks_mtx);
327 assert(stats_chunks.curchunks >= (size / chunksize));
328 stats_chunks.curchunks -= (size / chunksize);
329 malloc_mutex_unlock(&chunks_mtx);
333 chunk_unmap(chunk, size);
340 /* Set variables according to the value of opt_lg_chunk. */
341 chunksize = (ZU(1) << opt_lg_chunk);
342 assert(chunksize >= PAGE);
343 chunksize_mask = chunksize - 1;
344 chunk_npages = (chunksize >> LG_PAGE);
346 if (config_stats || config_prof) {
347 if (malloc_mutex_init(&chunks_mtx))
349 memset(&stats_chunks, 0, sizeof(chunk_stats_t));
351 if (config_dss && chunk_dss_boot())
353 extent_tree_szad_new(&chunks_szad_mmap);
354 extent_tree_ad_new(&chunks_ad_mmap);
355 extent_tree_szad_new(&chunks_szad_dss);
356 extent_tree_ad_new(&chunks_ad_dss);
357 if (config_ivsalloc) {
358 chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
360 if (chunks_rtree == NULL)
371 malloc_mutex_lock(&chunks_mtx);
373 rtree_prefork(chunks_rtree);
378 chunk_postfork_parent(void)
381 chunk_dss_postfork_parent();
383 rtree_postfork_parent(chunks_rtree);
384 malloc_mutex_postfork_parent(&chunks_mtx);
388 chunk_postfork_child(void)
391 chunk_dss_postfork_child();
393 rtree_postfork_child(chunks_rtree);
394 malloc_mutex_postfork_child(&chunks_mtx);