1 #ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H
2 #define JEMALLOC_INTERNAL_ARENA_INLINES_B_H
4 #include "jemalloc/internal/jemalloc_internal_types.h"
5 #include "jemalloc/internal/mutex.h"
6 #include "jemalloc/internal/rtree.h"
7 #include "jemalloc/internal/size_classes.h"
8 #include "jemalloc/internal/sz.h"
9 #include "jemalloc/internal/ticker.h"
11 JEMALLOC_ALWAYS_INLINE prof_tctx_t *
12 arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
17 if (alloc_ctx == NULL) {
18 const extent_t *extent = iealloc(tsdn, ptr);
19 if (unlikely(!extent_slab_get(extent))) {
20 return large_prof_tctx_get(tsdn, extent);
23 if (unlikely(!alloc_ctx->slab)) {
24 return large_prof_tctx_get(tsdn, iealloc(tsdn, ptr));
27 return (prof_tctx_t *)(uintptr_t)1U;
30 JEMALLOC_ALWAYS_INLINE void
31 arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, UNUSED size_t usize,
32 alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
37 if (alloc_ctx == NULL) {
38 extent_t *extent = iealloc(tsdn, ptr);
39 if (unlikely(!extent_slab_get(extent))) {
40 large_prof_tctx_set(tsdn, extent, tctx);
43 if (unlikely(!alloc_ctx->slab)) {
44 large_prof_tctx_set(tsdn, iealloc(tsdn, ptr), tctx);
50 arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, UNUSED prof_tctx_t *tctx) {
54 extent_t *extent = iealloc(tsdn, ptr);
55 assert(!extent_slab_get(extent));
57 large_prof_tctx_reset(tsdn, extent);
60 JEMALLOC_ALWAYS_INLINE void
61 arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
63 ticker_t *decay_ticker;
65 if (unlikely(tsdn_null(tsdn))) {
69 decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena));
70 if (unlikely(decay_ticker == NULL)) {
73 if (unlikely(ticker_ticks(decay_ticker, nticks))) {
74 arena_decay(tsdn, arena, false, false);
78 JEMALLOC_ALWAYS_INLINE void
79 arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
80 malloc_mutex_assert_not_owner(tsdn, &arena->decay_dirty.mtx);
81 malloc_mutex_assert_not_owner(tsdn, &arena->decay_muzzy.mtx);
83 arena_decay_ticks(tsdn, arena, 1);
86 JEMALLOC_ALWAYS_INLINE void *
87 arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
88 tcache_t *tcache, bool slow_path) {
89 assert(!tsdn_null(tsdn) || tcache == NULL);
92 if (likely(tcache != NULL)) {
93 if (likely(size <= SMALL_MAXCLASS)) {
94 return tcache_alloc_small(tsdn_tsd(tsdn), arena,
95 tcache, size, ind, zero, slow_path);
97 if (likely(size <= tcache_maxclass)) {
98 return tcache_alloc_large(tsdn_tsd(tsdn), arena,
99 tcache, size, ind, zero, slow_path);
101 /* (size > tcache_maxclass) case falls through. */
102 assert(size > tcache_maxclass);
105 return arena_malloc_hard(tsdn, arena, size, ind, zero);
108 JEMALLOC_ALWAYS_INLINE arena_t *
109 arena_aalloc(tsdn_t *tsdn, const void *ptr) {
110 return extent_arena_get(iealloc(tsdn, ptr));
113 JEMALLOC_ALWAYS_INLINE size_t
114 arena_salloc(tsdn_t *tsdn, const void *ptr) {
117 rtree_ctx_t rtree_ctx_fallback;
118 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
120 szind_t szind = rtree_szind_read(tsdn, &extents_rtree, rtree_ctx,
121 (uintptr_t)ptr, true);
122 assert(szind != NSIZES);
124 return sz_index2size(szind);
127 JEMALLOC_ALWAYS_INLINE size_t
128 arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
130 * Return 0 if ptr is not within an extent managed by jemalloc. This
131 * function has two extra costs relative to isalloc():
132 * - The rtree calls cannot claim to be dependent lookups, which induces
133 * rtree lookup load dependencies.
134 * - The lookup may fail, so there is an extra branch to check for
138 rtree_ctx_t rtree_ctx_fallback;
139 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
143 if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx,
144 (uintptr_t)ptr, false, &extent, &szind)) {
148 if (extent == NULL) {
151 assert(extent_state_get(extent) == extent_state_active);
152 /* Only slab members should be looked up via interior pointers. */
153 assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
155 assert(szind != NSIZES);
157 return sz_index2size(szind);
161 arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
164 rtree_ctx_t rtree_ctx_fallback;
165 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
169 rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
170 true, &szind, &slab);
173 extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
174 rtree_ctx, (uintptr_t)ptr, true);
175 assert(szind == extent_szind_get(extent));
176 assert(szind < NSIZES);
177 assert(slab == extent_slab_get(extent));
181 /* Small allocation. */
182 arena_dalloc_small(tsdn, ptr);
184 extent_t *extent = iealloc(tsdn, ptr);
185 large_dalloc(tsdn, extent);
189 JEMALLOC_ALWAYS_INLINE void
190 arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
191 alloc_ctx_t *alloc_ctx, bool slow_path) {
192 assert(!tsdn_null(tsdn) || tcache == NULL);
195 if (unlikely(tcache == NULL)) {
196 arena_dalloc_no_tcache(tsdn, ptr);
202 rtree_ctx_t *rtree_ctx;
203 if (alloc_ctx != NULL) {
204 szind = alloc_ctx->szind;
205 slab = alloc_ctx->slab;
206 assert(szind != NSIZES);
208 rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
209 rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
210 (uintptr_t)ptr, true, &szind, &slab);
214 rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
215 extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
216 rtree_ctx, (uintptr_t)ptr, true);
217 assert(szind == extent_szind_get(extent));
218 assert(szind < NSIZES);
219 assert(slab == extent_slab_get(extent));
223 /* Small allocation. */
224 tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
227 if (szind < nhbins) {
228 if (config_prof && unlikely(szind < NBINS)) {
229 arena_dalloc_promoted(tsdn, ptr, tcache,
232 tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
236 extent_t *extent = iealloc(tsdn, ptr);
237 large_dalloc(tsdn, extent);
243 arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
245 assert(size <= LARGE_MAXCLASS);
249 if (!config_prof || !opt_prof) {
251 * There is no risk of being confused by a promoted sampled
252 * object, so base szind and slab on the given size.
254 szind = sz_size2index(size);
255 slab = (szind < NBINS);
258 if ((config_prof && opt_prof) || config_debug) {
259 rtree_ctx_t rtree_ctx_fallback;
260 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
261 &rtree_ctx_fallback);
263 rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
264 (uintptr_t)ptr, true, &szind, &slab);
266 assert(szind == sz_size2index(size));
267 assert((config_prof && opt_prof) || slab == (szind < NBINS));
270 extent_t *extent = rtree_extent_read(tsdn,
271 &extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
272 assert(szind == extent_szind_get(extent));
273 assert(slab == extent_slab_get(extent));
278 /* Small allocation. */
279 arena_dalloc_small(tsdn, ptr);
281 extent_t *extent = iealloc(tsdn, ptr);
282 large_dalloc(tsdn, extent);
286 JEMALLOC_ALWAYS_INLINE void
287 arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
288 alloc_ctx_t *alloc_ctx, bool slow_path) {
289 assert(!tsdn_null(tsdn) || tcache == NULL);
291 assert(size <= LARGE_MAXCLASS);
293 if (unlikely(tcache == NULL)) {
294 arena_sdalloc_no_tcache(tsdn, ptr, size);
300 UNUSED alloc_ctx_t local_ctx;
301 if (config_prof && opt_prof) {
302 if (alloc_ctx == NULL) {
303 /* Uncommon case and should be a static check. */
304 rtree_ctx_t rtree_ctx_fallback;
305 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
306 &rtree_ctx_fallback);
307 rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
308 (uintptr_t)ptr, true, &local_ctx.szind,
310 assert(local_ctx.szind == sz_size2index(size));
311 alloc_ctx = &local_ctx;
313 slab = alloc_ctx->slab;
314 szind = alloc_ctx->szind;
317 * There is no risk of being confused by a promoted sampled
318 * object, so base szind and slab on the given size.
320 szind = sz_size2index(size);
321 slab = (szind < NBINS);
325 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
326 rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
327 (uintptr_t)ptr, true, &szind, &slab);
328 extent_t *extent = rtree_extent_read(tsdn,
329 &extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
330 assert(szind == extent_szind_get(extent));
331 assert(slab == extent_slab_get(extent));
335 /* Small allocation. */
336 tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
339 if (szind < nhbins) {
340 if (config_prof && unlikely(szind < NBINS)) {
341 arena_dalloc_promoted(tsdn, ptr, tcache,
344 tcache_dalloc_large(tsdn_tsd(tsdn),
345 tcache, ptr, szind, slow_path);
348 extent_t *extent = iealloc(tsdn, ptr);
349 large_dalloc(tsdn, extent);
354 #endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */