1 #define JEMALLOC_HUGE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
11 malloc_mutex_t huge_mtx;
13 /******************************************************************************/
15 /* Tree of chunks that are stand-alone huge allocations. */
16 static extent_tree_t huge;
19 huge_malloc(size_t size, bool zero, dss_prec_t dss_prec)
22 return (huge_palloc(size, chunksize, zero, dss_prec));
26 huge_palloc(size_t size, size_t alignment, bool zero, dss_prec_t dss_prec)
33 /* Allocate one or more contiguous chunks for this request. */
35 csize = CHUNK_CEILING(size);
37 /* size is large enough to cause size_t wrap-around. */
41 /* Allocate an extent node with which to track the chunk. */
42 node = base_node_alloc();
47 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
48 * it is possible to make correct junk/zero fill decisions below.
51 ret = chunk_alloc(csize, alignment, false, &is_zeroed, dss_prec);
53 base_node_dealloc(node);
57 /* Insert node into huge. */
61 malloc_mutex_lock(&huge_mtx);
62 extent_tree_ad_insert(&huge, node);
64 stats_cactive_add(csize);
66 huge_allocated += csize;
68 malloc_mutex_unlock(&huge_mtx);
70 if (config_fill && zero == false) {
72 memset(ret, 0xa5, csize);
73 else if (opt_zero && is_zeroed == false)
74 memset(ret, 0, csize);
81 huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
85 * Avoid moving the allocation if the size class can be left the same.
87 if (oldsize > arena_maxclass
88 && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
89 && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
90 assert(CHUNK_CEILING(oldsize) == oldsize);
94 /* Reallocation would require a move. */
99 huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
100 size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec)
105 /* Try to avoid moving the allocation. */
106 if (huge_ralloc_no_move(ptr, oldsize, size, extra) == false)
110 * size and oldsize are different enough that we need to use a
111 * different size class. In that case, fall back to allocating new
114 if (alignment > chunksize)
115 ret = huge_palloc(size + extra, alignment, zero, dss_prec);
117 ret = huge_malloc(size + extra, zero, dss_prec);
122 /* Try again, this time without extra. */
123 if (alignment > chunksize)
124 ret = huge_palloc(size, alignment, zero, dss_prec);
126 ret = huge_malloc(size, zero, dss_prec);
133 * Copy at most size bytes (not size+extra), since the caller has no
134 * expectation that the extra bytes will be reliably preserved.
136 copysize = (size < oldsize) ? size : oldsize;
138 #ifdef JEMALLOC_MREMAP
140 * Use mremap(2) if this is a huge-->huge reallocation, and neither the
141 * source nor the destination are in dss.
143 if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
144 == false && chunk_in_dss(ret) == false))) {
145 size_t newsize = huge_salloc(ret);
148 * Remove ptr from the tree of huge allocations before
149 * performing the remap operation, in order to avoid the
150 * possibility of another thread acquiring that mapping before
151 * this one removes it from the tree.
153 huge_dalloc(ptr, false);
154 if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
155 ret) == MAP_FAILED) {
157 * Assuming no chunk management bugs in the allocator,
158 * the only documented way an error can occur here is
159 * if the application changed the map type for a
160 * portion of the old allocation. This is firmly in
161 * undefined behavior territory, so write a diagnostic
162 * message, and optionally abort.
164 char buf[BUFERROR_BUF];
166 buferror(get_errno(), buf, sizeof(buf));
167 malloc_printf("<jemalloc>: Error in mremap(): %s\n",
171 memcpy(ret, ptr, copysize);
172 chunk_dealloc_mmap(ptr, oldsize);
173 } else if (config_fill && zero == false && opt_junk && oldsize
176 * mremap(2) clobbers the original mapping, so
177 * junk/zero filling is not preserved. There is no
178 * need to zero fill here, since any trailing
179 * uninititialized memory is demand-zeroed by the
180 * kernel, but junk filling must be redone.
182 memset(ret + oldsize, 0xa5, newsize - oldsize);
187 memcpy(ret, ptr, copysize);
188 iqalloct(ptr, try_tcache_dalloc);
194 #undef huge_dalloc_junk
195 #define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
198 huge_dalloc_junk(void *ptr, size_t usize)
201 if (config_fill && config_dss && opt_junk) {
203 * Only bother junk filling if the chunk isn't about to be
206 if (config_munmap == false || (config_dss && chunk_in_dss(ptr)))
207 memset(ptr, 0x5a, usize);
211 #undef huge_dalloc_junk
212 #define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
213 huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
217 huge_dalloc(void *ptr, bool unmap)
219 extent_node_t *node, key;
221 malloc_mutex_lock(&huge_mtx);
223 /* Extract from tree of huge allocations. */
225 node = extent_tree_ad_search(&huge, &key);
226 assert(node != NULL);
227 assert(node->addr == ptr);
228 extent_tree_ad_remove(&huge, node);
231 stats_cactive_sub(node->size);
233 huge_allocated -= node->size;
236 malloc_mutex_unlock(&huge_mtx);
239 huge_dalloc_junk(node->addr, node->size);
241 chunk_dealloc(node->addr, node->size, unmap);
243 base_node_dealloc(node);
247 huge_salloc(const void *ptr)
250 extent_node_t *node, key;
252 malloc_mutex_lock(&huge_mtx);
254 /* Extract from tree of huge allocations. */
255 key.addr = __DECONST(void *, ptr);
256 node = extent_tree_ad_search(&huge, &key);
257 assert(node != NULL);
261 malloc_mutex_unlock(&huge_mtx);
267 huge_dss_prec_get(arena_t *arena)
270 return (arena_dss_prec_get(choose_arena(arena)));
274 huge_prof_ctx_get(const void *ptr)
277 extent_node_t *node, key;
279 malloc_mutex_lock(&huge_mtx);
281 /* Extract from tree of huge allocations. */
282 key.addr = __DECONST(void *, ptr);
283 node = extent_tree_ad_search(&huge, &key);
284 assert(node != NULL);
286 ret = node->prof_ctx;
288 malloc_mutex_unlock(&huge_mtx);
294 huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
296 extent_node_t *node, key;
298 malloc_mutex_lock(&huge_mtx);
300 /* Extract from tree of huge allocations. */
301 key.addr = __DECONST(void *, ptr);
302 node = extent_tree_ad_search(&huge, &key);
303 assert(node != NULL);
305 node->prof_ctx = ctx;
307 malloc_mutex_unlock(&huge_mtx);
314 /* Initialize chunks data. */
315 if (malloc_mutex_init(&huge_mtx))
317 extent_tree_ad_new(&huge);
332 malloc_mutex_prefork(&huge_mtx);
336 huge_postfork_parent(void)
339 malloc_mutex_postfork_parent(&huge_mtx);
343 huge_postfork_child(void)
346 malloc_mutex_postfork_child(&huge_mtx);