1 #define JEMALLOC_HUGE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 huge_node_get(const void *ptr)
11 node = chunk_lookup(ptr, true);
12 assert(!extent_node_achunk_get(node));
18 huge_node_set(const void *ptr, extent_node_t *node)
21 assert(extent_node_addr_get(node) == ptr);
22 assert(!extent_node_achunk_get(node));
23 return (chunk_register(ptr, node));
27 huge_node_unset(const void *ptr, const extent_node_t *node)
30 chunk_deregister(ptr, node);
34 huge_malloc(tsd_t *tsd, arena_t *arena, size_t usize, bool zero,
38 assert(usize == s2u(usize));
40 return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache));
44 huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
45 bool zero, tcache_t *tcache)
52 /* Allocate one or more contiguous chunks for this request. */
54 ausize = sa2u(usize, alignment);
55 if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
57 assert(ausize >= chunksize);
59 /* Allocate an extent node with which to track the chunk. */
60 node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
61 CACHELINE, false, tcache, true, arena);
66 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
67 * it is possible to make correct junk/zero fill decisions below.
70 arena = arena_choose(tsd, arena);
71 if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
72 usize, alignment, &is_zeroed)) == NULL) {
73 idalloctm(tsd, node, tcache, true, true);
77 extent_node_init(node, arena, ret, usize, is_zeroed, true);
79 if (huge_node_set(ret, node)) {
80 arena_chunk_dalloc_huge(arena, ret, usize);
81 idalloctm(tsd, node, tcache, true, true);
85 /* Insert node into huge. */
86 malloc_mutex_lock(&arena->huge_mtx);
87 ql_elm_new(node, ql_link);
88 ql_tail_insert(&arena->huge, node, ql_link);
89 malloc_mutex_unlock(&arena->huge_mtx);
91 if (zero || (config_fill && unlikely(opt_zero))) {
93 memset(ret, 0, usize);
94 } else if (config_fill && unlikely(opt_junk_alloc))
95 memset(ret, 0xa5, usize);
97 arena_decay_tick(tsd, arena);
102 #undef huge_dalloc_junk
103 #define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
106 huge_dalloc_junk(void *ptr, size_t usize)
109 if (config_fill && have_dss && unlikely(opt_junk_free)) {
111 * Only bother junk filling if the chunk isn't about to be
114 if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
115 memset(ptr, 0x5a, usize);
119 #undef huge_dalloc_junk
120 #define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
121 huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
125 huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
126 size_t usize_max, bool zero)
128 size_t usize, usize_next;
131 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
132 bool pre_zeroed, post_zeroed;
134 /* Increase usize to incorporate extra. */
135 for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1))
136 <= oldsize; usize = usize_next)
139 if (oldsize == usize)
142 node = huge_node_get(ptr);
143 arena = extent_node_arena_get(node);
144 pre_zeroed = extent_node_zeroed_get(node);
146 /* Fill if necessary (shrinking). */
147 if (oldsize > usize) {
148 size_t sdiff = oldsize - usize;
149 if (config_fill && unlikely(opt_junk_free)) {
150 memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
153 post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
154 ptr, CHUNK_CEILING(oldsize), usize, sdiff);
157 post_zeroed = pre_zeroed;
159 malloc_mutex_lock(&arena->huge_mtx);
160 /* Update the size of the huge allocation. */
161 assert(extent_node_size_get(node) != usize);
162 extent_node_size_set(node, usize);
164 extent_node_zeroed_set(node, post_zeroed);
165 malloc_mutex_unlock(&arena->huge_mtx);
167 arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
169 /* Fill if necessary (growing). */
170 if (oldsize < usize) {
171 if (zero || (config_fill && unlikely(opt_zero))) {
173 memset((void *)((uintptr_t)ptr + oldsize), 0,
176 } else if (config_fill && unlikely(opt_junk_alloc)) {
177 memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
184 huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
188 chunk_hooks_t chunk_hooks;
190 bool pre_zeroed, post_zeroed;
192 node = huge_node_get(ptr);
193 arena = extent_node_arena_get(node);
194 pre_zeroed = extent_node_zeroed_get(node);
195 chunk_hooks = chunk_hooks_get(arena);
197 assert(oldsize > usize);
199 /* Split excess chunks. */
200 cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
201 if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
202 CHUNK_CEILING(usize), cdiff, true, arena->ind))
205 if (oldsize > usize) {
206 size_t sdiff = oldsize - usize;
207 if (config_fill && unlikely(opt_junk_free)) {
208 huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
212 post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
213 CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
214 CHUNK_CEILING(oldsize),
215 CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
218 post_zeroed = pre_zeroed;
220 malloc_mutex_lock(&arena->huge_mtx);
221 /* Update the size of the huge allocation. */
222 extent_node_size_set(node, usize);
224 extent_node_zeroed_set(node, post_zeroed);
225 malloc_mutex_unlock(&arena->huge_mtx);
227 /* Zap the excess chunks. */
228 arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
234 huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) {
237 bool is_zeroed_subchunk, is_zeroed_chunk;
239 node = huge_node_get(ptr);
240 arena = extent_node_arena_get(node);
241 malloc_mutex_lock(&arena->huge_mtx);
242 is_zeroed_subchunk = extent_node_zeroed_get(node);
243 malloc_mutex_unlock(&arena->huge_mtx);
246 * Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
247 * that it is possible to make correct junk/zero fill decisions below.
249 is_zeroed_chunk = zero;
251 if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize,
255 malloc_mutex_lock(&arena->huge_mtx);
256 /* Update the size of the huge allocation. */
257 extent_node_size_set(node, usize);
258 malloc_mutex_unlock(&arena->huge_mtx);
260 if (zero || (config_fill && unlikely(opt_zero))) {
261 if (!is_zeroed_subchunk) {
262 memset((void *)((uintptr_t)ptr + oldsize), 0,
263 CHUNK_CEILING(oldsize) - oldsize);
265 if (!is_zeroed_chunk) {
266 memset((void *)((uintptr_t)ptr +
267 CHUNK_CEILING(oldsize)), 0, usize -
268 CHUNK_CEILING(oldsize));
270 } else if (config_fill && unlikely(opt_junk_alloc)) {
271 memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
279 huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min,
280 size_t usize_max, bool zero)
283 assert(s2u(oldsize) == oldsize);
284 /* The following should have been caught by callers. */
285 assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
287 /* Both allocations must be huge to avoid a move. */
288 if (oldsize < chunksize || usize_max < chunksize)
291 if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
292 /* Attempt to expand the allocation in-place. */
293 if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max,
295 arena_decay_tick(tsd, huge_aalloc(ptr));
298 /* Try again, this time with usize_min. */
299 if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
300 CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr,
301 oldsize, usize_min, zero)) {
302 arena_decay_tick(tsd, huge_aalloc(ptr));
308 * Avoid moving the allocation if the existing chunk size accommodates
311 if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
312 && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
313 huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max,
315 arena_decay_tick(tsd, huge_aalloc(ptr));
319 /* Attempt to shrink the allocation in-place. */
320 if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) {
321 if (!huge_ralloc_no_move_shrink(ptr, oldsize, usize_max)) {
322 arena_decay_tick(tsd, huge_aalloc(ptr));
330 huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
331 size_t alignment, bool zero, tcache_t *tcache)
334 if (alignment <= chunksize)
335 return (huge_malloc(tsd, arena, usize, zero, tcache));
336 return (huge_palloc(tsd, arena, usize, alignment, zero, tcache));
340 huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
341 size_t alignment, bool zero, tcache_t *tcache)
346 /* The following should have been caught by callers. */
347 assert(usize > 0 && usize <= HUGE_MAXCLASS);
349 /* Try to avoid moving the allocation. */
350 if (!huge_ralloc_no_move(tsd, ptr, oldsize, usize, usize, zero))
354 * usize and oldsize are different enough that we need to use a
355 * different size class. In that case, fall back to allocating new
358 ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero,
363 copysize = (usize < oldsize) ? usize : oldsize;
364 memcpy(ret, ptr, copysize);
365 isqalloc(tsd, ptr, oldsize, tcache);
370 huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
375 node = huge_node_get(ptr);
376 arena = extent_node_arena_get(node);
377 huge_node_unset(ptr, node);
378 malloc_mutex_lock(&arena->huge_mtx);
379 ql_remove(&arena->huge, node, ql_link);
380 malloc_mutex_unlock(&arena->huge_mtx);
382 huge_dalloc_junk(extent_node_addr_get(node),
383 extent_node_size_get(node));
384 arena_chunk_dalloc_huge(extent_node_arena_get(node),
385 extent_node_addr_get(node), extent_node_size_get(node));
386 idalloctm(tsd, node, tcache, true, true);
388 arena_decay_tick(tsd, arena);
392 huge_aalloc(const void *ptr)
395 return (extent_node_arena_get(huge_node_get(ptr)));
399 huge_salloc(const void *ptr)
405 node = huge_node_get(ptr);
406 arena = extent_node_arena_get(node);
407 malloc_mutex_lock(&arena->huge_mtx);
408 size = extent_node_size_get(node);
409 malloc_mutex_unlock(&arena->huge_mtx);
415 huge_prof_tctx_get(const void *ptr)
421 node = huge_node_get(ptr);
422 arena = extent_node_arena_get(node);
423 malloc_mutex_lock(&arena->huge_mtx);
424 tctx = extent_node_prof_tctx_get(node);
425 malloc_mutex_unlock(&arena->huge_mtx);
431 huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
436 node = huge_node_get(ptr);
437 arena = extent_node_arena_get(node);
438 malloc_mutex_lock(&arena->huge_mtx);
439 extent_node_prof_tctx_set(node, tctx);
440 malloc_mutex_unlock(&arena->huge_mtx);
444 huge_prof_tctx_reset(const void *ptr)
447 huge_prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);