1 #define JEMALLOC_HUGE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
11 malloc_mutex_t huge_mtx;
13 /******************************************************************************/
15 /* Tree of chunks that are stand-alone huge allocations. */
16 static extent_tree_t huge;
19 huge_malloc(size_t size, bool zero)
22 return (huge_palloc(size, chunksize, zero));
26 huge_palloc(size_t size, size_t alignment, bool zero)
33 /* Allocate one or more contiguous chunks for this request. */
35 csize = CHUNK_CEILING(size);
37 /* size is large enough to cause size_t wrap-around. */
41 /* Allocate an extent node with which to track the chunk. */
42 node = base_node_alloc();
47 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
48 * it is possible to make correct junk/zero fill decisions below.
51 ret = chunk_alloc(csize, alignment, false, &is_zeroed,
52 chunk_dss_prec_get());
54 base_node_dealloc(node);
58 /* Insert node into huge. */
62 malloc_mutex_lock(&huge_mtx);
63 extent_tree_ad_insert(&huge, node);
65 stats_cactive_add(csize);
67 huge_allocated += csize;
69 malloc_mutex_unlock(&huge_mtx);
71 if (config_fill && zero == false) {
73 memset(ret, 0xa5, csize);
74 else if (opt_zero && is_zeroed == false)
75 memset(ret, 0, csize);
82 huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
86 * Avoid moving the allocation if the size class can be left the same.
88 if (oldsize > arena_maxclass
89 && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
90 && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
91 assert(CHUNK_CEILING(oldsize) == oldsize);
95 /* Reallocation would require a move. */
100 huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
101 size_t alignment, bool zero, bool try_tcache_dalloc)
106 /* Try to avoid moving the allocation. */
107 if (huge_ralloc_no_move(ptr, oldsize, size, extra) == false)
111 * size and oldsize are different enough that we need to use a
112 * different size class. In that case, fall back to allocating new
115 if (alignment > chunksize)
116 ret = huge_palloc(size + extra, alignment, zero);
118 ret = huge_malloc(size + extra, zero);
123 /* Try again, this time without extra. */
124 if (alignment > chunksize)
125 ret = huge_palloc(size, alignment, zero);
127 ret = huge_malloc(size, zero);
134 * Copy at most size bytes (not size+extra), since the caller has no
135 * expectation that the extra bytes will be reliably preserved.
137 copysize = (size < oldsize) ? size : oldsize;
139 #ifdef JEMALLOC_MREMAP
141 * Use mremap(2) if this is a huge-->huge reallocation, and neither the
142 * source nor the destination are in dss.
144 if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
145 == false && chunk_in_dss(ret) == false))) {
146 size_t newsize = huge_salloc(ret);
149 * Remove ptr from the tree of huge allocations before
150 * performing the remap operation, in order to avoid the
151 * possibility of another thread acquiring that mapping before
152 * this one removes it from the tree.
154 huge_dalloc(ptr, false);
155 if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
156 ret) == MAP_FAILED) {
158 * Assuming no chunk management bugs in the allocator,
159 * the only documented way an error can occur here is
160 * if the application changed the map type for a
161 * portion of the old allocation. This is firmly in
162 * undefined behavior territory, so write a diagnostic
163 * message, and optionally abort.
165 char buf[BUFERROR_BUF];
167 buferror(get_errno(), buf, sizeof(buf));
168 malloc_printf("<jemalloc>: Error in mremap(): %s\n",
172 memcpy(ret, ptr, copysize);
173 chunk_dealloc_mmap(ptr, oldsize);
178 memcpy(ret, ptr, copysize);
179 iqalloct(ptr, try_tcache_dalloc);
185 #undef huge_dalloc_junk
186 #define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
189 huge_dalloc_junk(void *ptr, size_t usize)
192 if (config_fill && config_dss && opt_junk) {
194 * Only bother junk filling if the chunk isn't about to be
197 if (config_munmap == false || (config_dss && chunk_in_dss(ptr)))
198 memset(ptr, 0x5a, usize);
202 #undef huge_dalloc_junk
203 #define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
204 huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
208 huge_dalloc(void *ptr, bool unmap)
210 extent_node_t *node, key;
212 malloc_mutex_lock(&huge_mtx);
214 /* Extract from tree of huge allocations. */
216 node = extent_tree_ad_search(&huge, &key);
217 assert(node != NULL);
218 assert(node->addr == ptr);
219 extent_tree_ad_remove(&huge, node);
222 stats_cactive_sub(node->size);
224 huge_allocated -= node->size;
227 malloc_mutex_unlock(&huge_mtx);
230 huge_dalloc_junk(node->addr, node->size);
232 chunk_dealloc(node->addr, node->size, unmap);
234 base_node_dealloc(node);
238 huge_salloc(const void *ptr)
241 extent_node_t *node, key;
243 malloc_mutex_lock(&huge_mtx);
245 /* Extract from tree of huge allocations. */
246 key.addr = __DECONST(void *, ptr);
247 node = extent_tree_ad_search(&huge, &key);
248 assert(node != NULL);
252 malloc_mutex_unlock(&huge_mtx);
258 huge_prof_ctx_get(const void *ptr)
261 extent_node_t *node, key;
263 malloc_mutex_lock(&huge_mtx);
265 /* Extract from tree of huge allocations. */
266 key.addr = __DECONST(void *, ptr);
267 node = extent_tree_ad_search(&huge, &key);
268 assert(node != NULL);
270 ret = node->prof_ctx;
272 malloc_mutex_unlock(&huge_mtx);
278 huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
280 extent_node_t *node, key;
282 malloc_mutex_lock(&huge_mtx);
284 /* Extract from tree of huge allocations. */
285 key.addr = __DECONST(void *, ptr);
286 node = extent_tree_ad_search(&huge, &key);
287 assert(node != NULL);
289 node->prof_ctx = ctx;
291 malloc_mutex_unlock(&huge_mtx);
298 /* Initialize chunks data. */
299 if (malloc_mutex_init(&huge_mtx))
301 extent_tree_ad_new(&huge);
316 malloc_mutex_prefork(&huge_mtx);
320 huge_postfork_parent(void)
323 malloc_mutex_postfork_parent(&huge_mtx);
327 huge_postfork_child(void)
330 malloc_mutex_postfork_child(&huge_mtx);