1 #define JEMALLOC_LARGE_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/extent_mmap.h"
7 #include "jemalloc/internal/mutex.h"
8 #include "jemalloc/internal/rtree.h"
9 #include "jemalloc/internal/util.h"
11 /******************************************************************************/
14 large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) {
15 assert(usize == sz_s2u(usize));
17 return large_palloc(tsdn, arena, usize, CACHELINE, zero);
21 large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
26 UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
28 assert(!tsdn_null(tsdn) || arena != NULL);
30 ausize = sz_sa2u(usize, alignment);
31 if (unlikely(ausize == 0 || ausize > LARGE_MAXCLASS)) {
35 if (config_fill && unlikely(opt_zero)) {
39 * Copy zero into is_zeroed and pass the copy when allocating the
40 * extent, so that it is possible to make correct junk/zero fill
41 * decisions below, even if is_zeroed ends up true when zero is false.
44 if (likely(!tsdn_null(tsdn))) {
45 arena = arena_choose(tsdn_tsd(tsdn), arena);
47 if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn,
48 arena, usize, alignment, &is_zeroed)) == NULL) {
52 /* See comments in arena_bin_slabs_full_insert(). */
53 if (!arena_is_auto(arena)) {
54 /* Insert extent into large. */
55 malloc_mutex_lock(tsdn, &arena->large_mtx);
56 extent_list_append(&arena->large, extent);
57 malloc_mutex_unlock(tsdn, &arena->large_mtx);
59 if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
65 } else if (config_fill && unlikely(opt_junk_alloc)) {
66 memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK,
67 extent_usize_get(extent));
70 arena_decay_tick(tsdn, arena);
71 return extent_addr_get(extent);
75 large_dalloc_junk_impl(void *ptr, size_t size) {
76 memset(ptr, JEMALLOC_FREE_JUNK, size);
78 large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk = large_dalloc_junk_impl;
81 large_dalloc_maybe_junk_impl(void *ptr, size_t size) {
82 if (config_fill && have_dss && unlikely(opt_junk_free)) {
84 * Only bother junk filling if the extent isn't about to be
87 if (opt_retain || (have_dss && extent_in_dss(ptr))) {
88 large_dalloc_junk(ptr, size);
92 large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk =
93 large_dalloc_maybe_junk_impl;
96 large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
97 arena_t *arena = extent_arena_get(extent);
98 size_t oldusize = extent_usize_get(extent);
99 extent_hooks_t *extent_hooks = extent_hooks_get(arena);
100 size_t diff = extent_size_get(extent) - (usize + sz_large_pad);
102 assert(oldusize > usize);
104 if (extent_hooks->split == NULL) {
108 /* Split excess pages. */
110 extent_t *trail = extent_split_wrapper(tsdn, arena,
111 &extent_hooks, extent, usize + sz_large_pad,
112 sz_size2index(usize), false, diff, NSIZES, false);
117 if (config_fill && unlikely(opt_junk_free)) {
118 large_dalloc_maybe_junk(extent_addr_get(trail),
119 extent_size_get(trail));
122 arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, trail);
125 arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize);
131 large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
133 arena_t *arena = extent_arena_get(extent);
134 size_t oldusize = extent_usize_get(extent);
135 extent_hooks_t *extent_hooks = extent_hooks_get(arena);
136 size_t trailsize = usize - oldusize;
138 if (extent_hooks->merge == NULL) {
142 if (config_fill && unlikely(opt_zero)) {
146 * Copy zero into is_zeroed_trail and pass the copy when allocating the
147 * extent, so that it is possible to make correct junk/zero fill
148 * decisions below, even if is_zeroed_trail ends up true when zero is
151 bool is_zeroed_trail = zero;
155 if ((trail = extents_alloc(tsdn, arena, &extent_hooks,
156 &arena->extents_dirty, extent_past_get(extent), trailsize, 0,
157 CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL
158 || (trail = extents_alloc(tsdn, arena, &extent_hooks,
159 &arena->extents_muzzy, extent_past_get(extent), trailsize, 0,
160 CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL) {
165 if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks,
166 extent_past_get(extent), trailsize, 0, CACHELINE, false,
167 NSIZES, &is_zeroed_trail, &commit)) == NULL) {
175 if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) {
176 extent_dalloc_wrapper(tsdn, arena, &extent_hooks, trail);
179 rtree_ctx_t rtree_ctx_fallback;
180 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
181 szind_t szind = sz_size2index(usize);
182 extent_szind_set(extent, szind);
183 rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
184 (uintptr_t)extent_addr_get(extent), szind, false);
186 if (config_stats && new_mapping) {
187 arena_stats_mapped_add(tsdn, &arena->stats, trailsize);
191 if (config_cache_oblivious) {
193 * Zero the trailing bytes of the original allocation's
194 * last page, since they are in an indeterminate state.
195 * There will always be trailing bytes, because ptr's
196 * offset from the beginning of the extent is a multiple
197 * of CACHELINE in [0 .. PAGE).
199 void *zbase = (void *)
200 ((uintptr_t)extent_addr_get(extent) + oldusize);
201 void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
203 size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
205 memset(zbase, 0, nzero);
207 assert(is_zeroed_trail);
208 } else if (config_fill && unlikely(opt_junk_alloc)) {
209 memset((void *)((uintptr_t)extent_addr_get(extent) + oldusize),
210 JEMALLOC_ALLOC_JUNK, usize - oldusize);
213 arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize);
219 large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
220 size_t usize_max, bool zero) {
221 size_t oldusize = extent_usize_get(extent);
223 /* The following should have been caught by callers. */
224 assert(usize_min > 0 && usize_max <= LARGE_MAXCLASS);
225 /* Both allocation sizes must be large to avoid a move. */
226 assert(oldusize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS);
228 if (usize_max > oldusize) {
229 /* Attempt to expand the allocation in-place. */
230 if (!large_ralloc_no_move_expand(tsdn, extent, usize_max,
232 arena_decay_tick(tsdn, extent_arena_get(extent));
235 /* Try again, this time with usize_min. */
236 if (usize_min < usize_max && usize_min > oldusize &&
237 large_ralloc_no_move_expand(tsdn, extent, usize_min,
239 arena_decay_tick(tsdn, extent_arena_get(extent));
245 * Avoid moving the allocation if the existing extent size accommodates
248 if (oldusize >= usize_min && oldusize <= usize_max) {
249 arena_decay_tick(tsdn, extent_arena_get(extent));
253 /* Attempt to shrink the allocation in-place. */
254 if (oldusize > usize_max) {
255 if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) {
256 arena_decay_tick(tsdn, extent_arena_get(extent));
264 large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
265 size_t alignment, bool zero) {
266 if (alignment <= CACHELINE) {
267 return large_malloc(tsdn, arena, usize, zero);
269 return large_palloc(tsdn, arena, usize, alignment, zero);
273 large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
274 size_t alignment, bool zero, tcache_t *tcache) {
275 size_t oldusize = extent_usize_get(extent);
277 /* The following should have been caught by callers. */
278 assert(usize > 0 && usize <= LARGE_MAXCLASS);
279 /* Both allocation sizes must be large to avoid a move. */
280 assert(oldusize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS);
282 /* Try to avoid moving the allocation. */
283 if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) {
284 return extent_addr_get(extent);
288 * usize and old size are different enough that we need to use a
289 * different size class. In that case, fall back to allocating new
292 void *ret = large_ralloc_move_helper(tsdn, arena, usize, alignment,
298 size_t copysize = (usize < oldusize) ? usize : oldusize;
299 memcpy(ret, extent_addr_get(extent), copysize);
300 isdalloct(tsdn, extent_addr_get(extent), oldusize, tcache, NULL, true);
305 * junked_locked indicates whether the extent's data have been junk-filled, and
306 * whether the arena's large_mtx is currently held.
309 large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
310 bool junked_locked) {
311 if (!junked_locked) {
312 /* See comments in arena_bin_slabs_full_insert(). */
313 if (!arena_is_auto(arena)) {
314 malloc_mutex_lock(tsdn, &arena->large_mtx);
315 extent_list_remove(&arena->large, extent);
316 malloc_mutex_unlock(tsdn, &arena->large_mtx);
318 large_dalloc_maybe_junk(extent_addr_get(extent),
319 extent_usize_get(extent));
321 malloc_mutex_assert_owner(tsdn, &arena->large_mtx);
322 if (!arena_is_auto(arena)) {
323 extent_list_remove(&arena->large, extent);
326 arena_extent_dalloc_large_prep(tsdn, arena, extent);
330 large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
331 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
332 arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, extent);
336 large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent) {
337 large_dalloc_prep_impl(tsdn, extent_arena_get(extent), extent, true);
341 large_dalloc_finish(tsdn_t *tsdn, extent_t *extent) {
342 large_dalloc_finish_impl(tsdn, extent_arena_get(extent), extent);
346 large_dalloc(tsdn_t *tsdn, extent_t *extent) {
347 arena_t *arena = extent_arena_get(extent);
348 large_dalloc_prep_impl(tsdn, arena, extent, false);
349 large_dalloc_finish_impl(tsdn, arena, extent);
350 arena_decay_tick(tsdn, arena);
354 large_salloc(tsdn_t *tsdn, const extent_t *extent) {
355 return extent_usize_get(extent);
359 large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent) {
360 return extent_prof_tctx_get(extent);
364 large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx) {
365 extent_prof_tctx_set(extent, tctx);
369 large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent) {
370 large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U);