1 #define JEMALLOC_HUGE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 huge_node_get(const void *ptr)
11 node = chunk_lookup(ptr, true);
12 assert(!extent_node_achunk_get(node));
18 huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
21 assert(extent_node_addr_get(node) == ptr);
22 assert(!extent_node_achunk_get(node));
23 return (chunk_register(tsdn, ptr, node));
27 huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
31 err = huge_node_set(tsdn, ptr, node);
36 huge_node_unset(const void *ptr, const extent_node_t *node)
39 chunk_deregister(ptr, node);
43 huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero)
46 assert(usize == s2u(usize));
48 return (huge_palloc(tsdn, arena, usize, chunksize, zero));
52 huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
62 /* Allocate one or more contiguous chunks for this request. */
64 assert(!tsdn_null(tsdn) || arena != NULL);
66 ausize = sa2u(usize, alignment);
67 if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
69 assert(ausize >= chunksize);
71 /* Allocate an extent node with which to track the chunk. */
72 iarena = (!tsdn_null(tsdn)) ? arena_ichoose(tsdn_tsd(tsdn), NULL) :
74 node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)),
75 CACHELINE, false, NULL, true, iarena);
80 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
81 * it is possible to make correct junk/zero fill decisions below.
84 if (likely(!tsdn_null(tsdn)))
85 arena = arena_choose(tsdn_tsd(tsdn), arena);
86 if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn,
87 arena, usize, alignment, &sn, &is_zeroed)) == NULL) {
88 idalloctm(tsdn, node, NULL, true, true);
92 extent_node_init(node, arena, ret, usize, sn, is_zeroed, true);
94 if (huge_node_set(tsdn, ret, node)) {
95 arena_chunk_dalloc_huge(tsdn, arena, ret, usize, sn);
96 idalloctm(tsdn, node, NULL, true, true);
100 /* Insert node into huge. */
101 malloc_mutex_lock(tsdn, &arena->huge_mtx);
102 ql_elm_new(node, ql_link);
103 ql_tail_insert(&arena->huge, node, ql_link);
104 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
106 if (zero || (config_fill && unlikely(opt_zero))) {
108 memset(ret, 0, usize);
109 } else if (config_fill && unlikely(opt_junk_alloc))
110 memset(ret, JEMALLOC_ALLOC_JUNK, usize);
112 arena_decay_tick(tsdn, arena);
117 #undef huge_dalloc_junk
118 #define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
121 huge_dalloc_junk(void *ptr, size_t usize)
124 if (config_fill && have_dss && unlikely(opt_junk_free)) {
126 * Only bother junk filling if the chunk isn't about to be
129 if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
130 memset(ptr, JEMALLOC_FREE_JUNK, usize);
134 #undef huge_dalloc_junk
135 #define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
136 huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
140 huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
141 size_t usize_min, size_t usize_max, bool zero)
143 size_t usize, usize_next;
146 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
147 bool pre_zeroed, post_zeroed;
149 /* Increase usize to incorporate extra. */
150 for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1))
151 <= oldsize; usize = usize_next)
154 if (oldsize == usize)
157 node = huge_node_get(ptr);
158 arena = extent_node_arena_get(node);
159 pre_zeroed = extent_node_zeroed_get(node);
161 /* Fill if necessary (shrinking). */
162 if (oldsize > usize) {
163 size_t sdiff = oldsize - usize;
164 if (config_fill && unlikely(opt_junk_free)) {
165 memset((void *)((uintptr_t)ptr + usize),
166 JEMALLOC_FREE_JUNK, sdiff);
169 post_zeroed = !chunk_purge_wrapper(tsdn, arena,
170 &chunk_hooks, ptr, CHUNK_CEILING(oldsize), usize,
174 post_zeroed = pre_zeroed;
176 malloc_mutex_lock(tsdn, &arena->huge_mtx);
177 /* Update the size of the huge allocation. */
178 huge_node_unset(ptr, node);
179 assert(extent_node_size_get(node) != usize);
180 extent_node_size_set(node, usize);
181 huge_node_reset(tsdn, ptr, node);
183 extent_node_zeroed_set(node, post_zeroed);
184 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
186 arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize);
188 /* Fill if necessary (growing). */
189 if (oldsize < usize) {
190 if (zero || (config_fill && unlikely(opt_zero))) {
192 memset((void *)((uintptr_t)ptr + oldsize), 0,
195 } else if (config_fill && unlikely(opt_junk_alloc)) {
196 memset((void *)((uintptr_t)ptr + oldsize),
197 JEMALLOC_ALLOC_JUNK, usize - oldsize);
203 huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
208 chunk_hooks_t chunk_hooks;
210 bool pre_zeroed, post_zeroed;
212 node = huge_node_get(ptr);
213 arena = extent_node_arena_get(node);
214 pre_zeroed = extent_node_zeroed_get(node);
215 chunk_hooks = chunk_hooks_get(tsdn, arena);
217 assert(oldsize > usize);
219 /* Split excess chunks. */
220 cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
221 if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
222 CHUNK_CEILING(usize), cdiff, true, arena->ind))
225 if (oldsize > usize) {
226 size_t sdiff = oldsize - usize;
227 if (config_fill && unlikely(opt_junk_free)) {
228 huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
232 post_zeroed = !chunk_purge_wrapper(tsdn, arena,
233 &chunk_hooks, CHUNK_ADDR2BASE((uintptr_t)ptr +
234 usize), CHUNK_CEILING(oldsize),
235 CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
238 post_zeroed = pre_zeroed;
240 malloc_mutex_lock(tsdn, &arena->huge_mtx);
241 /* Update the size of the huge allocation. */
242 huge_node_unset(ptr, node);
243 extent_node_size_set(node, usize);
244 huge_node_reset(tsdn, ptr, node);
246 extent_node_zeroed_set(node, post_zeroed);
247 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
249 /* Zap the excess chunks. */
250 arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize,
251 extent_node_sn_get(node));
257 huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
258 size_t usize, bool zero) {
261 bool is_zeroed_subchunk, is_zeroed_chunk;
263 node = huge_node_get(ptr);
264 arena = extent_node_arena_get(node);
265 malloc_mutex_lock(tsdn, &arena->huge_mtx);
266 is_zeroed_subchunk = extent_node_zeroed_get(node);
267 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
270 * Use is_zeroed_chunk to detect whether the trailing memory is zeroed,
271 * update extent's zeroed field, and zero as necessary.
273 is_zeroed_chunk = false;
274 if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize,
278 malloc_mutex_lock(tsdn, &arena->huge_mtx);
279 huge_node_unset(ptr, node);
280 extent_node_size_set(node, usize);
281 extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
283 huge_node_reset(tsdn, ptr, node);
284 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
286 if (zero || (config_fill && unlikely(opt_zero))) {
287 if (!is_zeroed_subchunk) {
288 memset((void *)((uintptr_t)ptr + oldsize), 0,
289 CHUNK_CEILING(oldsize) - oldsize);
291 if (!is_zeroed_chunk) {
292 memset((void *)((uintptr_t)ptr +
293 CHUNK_CEILING(oldsize)), 0, usize -
294 CHUNK_CEILING(oldsize));
296 } else if (config_fill && unlikely(opt_junk_alloc)) {
297 memset((void *)((uintptr_t)ptr + oldsize), JEMALLOC_ALLOC_JUNK,
305 huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
306 size_t usize_max, bool zero)
309 assert(s2u(oldsize) == oldsize);
310 /* The following should have been caught by callers. */
311 assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
313 /* Both allocations must be huge to avoid a move. */
314 if (oldsize < chunksize || usize_max < chunksize)
317 if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
318 /* Attempt to expand the allocation in-place. */
319 if (!huge_ralloc_no_move_expand(tsdn, ptr, oldsize, usize_max,
321 arena_decay_tick(tsdn, huge_aalloc(ptr));
324 /* Try again, this time with usize_min. */
325 if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
326 CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(tsdn,
327 ptr, oldsize, usize_min, zero)) {
328 arena_decay_tick(tsdn, huge_aalloc(ptr));
334 * Avoid moving the allocation if the existing chunk size accommodates
337 if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
338 && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
339 huge_ralloc_no_move_similar(tsdn, ptr, oldsize, usize_min,
341 arena_decay_tick(tsdn, huge_aalloc(ptr));
345 /* Attempt to shrink the allocation in-place. */
346 if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) {
347 if (!huge_ralloc_no_move_shrink(tsdn, ptr, oldsize,
349 arena_decay_tick(tsdn, huge_aalloc(ptr));
357 huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
358 size_t alignment, bool zero)
361 if (alignment <= chunksize)
362 return (huge_malloc(tsdn, arena, usize, zero));
363 return (huge_palloc(tsdn, arena, usize, alignment, zero));
367 huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
368 size_t usize, size_t alignment, bool zero, tcache_t *tcache)
373 /* The following should have been caught by callers. */
374 assert(usize > 0 && usize <= HUGE_MAXCLASS);
376 /* Try to avoid moving the allocation. */
377 if (!huge_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, usize,
382 * usize and oldsize are different enough that we need to use a
383 * different size class. In that case, fall back to allocating new
386 ret = huge_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, alignment,
391 copysize = (usize < oldsize) ? usize : oldsize;
392 memcpy(ret, ptr, copysize);
393 isqalloc(tsd, ptr, oldsize, tcache, true);
398 huge_dalloc(tsdn_t *tsdn, void *ptr)
403 node = huge_node_get(ptr);
404 arena = extent_node_arena_get(node);
405 huge_node_unset(ptr, node);
406 malloc_mutex_lock(tsdn, &arena->huge_mtx);
407 ql_remove(&arena->huge, node, ql_link);
408 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
410 huge_dalloc_junk(extent_node_addr_get(node),
411 extent_node_size_get(node));
412 arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node),
413 extent_node_addr_get(node), extent_node_size_get(node),
414 extent_node_sn_get(node));
415 idalloctm(tsdn, node, NULL, true, true);
417 arena_decay_tick(tsdn, arena);
421 huge_aalloc(const void *ptr)
424 return (extent_node_arena_get(huge_node_get(ptr)));
428 huge_salloc(tsdn_t *tsdn, const void *ptr)
434 node = huge_node_get(ptr);
435 arena = extent_node_arena_get(node);
436 malloc_mutex_lock(tsdn, &arena->huge_mtx);
437 size = extent_node_size_get(node);
438 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
444 huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
450 node = huge_node_get(ptr);
451 arena = extent_node_arena_get(node);
452 malloc_mutex_lock(tsdn, &arena->huge_mtx);
453 tctx = extent_node_prof_tctx_get(node);
454 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
460 huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx)
465 node = huge_node_get(ptr);
466 arena = extent_node_arena_get(node);
467 malloc_mutex_lock(tsdn, &arena->huge_mtx);
468 extent_node_prof_tctx_set(node, tctx);
469 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
473 huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr)
476 huge_prof_tctx_set(tsdn, ptr, (prof_tctx_t *)(uintptr_t)1U);