1 #ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H
2 #define JEMALLOC_INTERNAL_PROF_INLINES_B_H
4 #include "jemalloc/internal/safety_check.h"
5 #include "jemalloc/internal/sz.h"
7 JEMALLOC_ALWAYS_INLINE bool
8 prof_gdump_get_unlocked(void) {
10 * No locking is used when reading prof_gdump_val in the fast path, so
11 * there are no guarantees regarding how long it will take for all
12 * threads to notice state changes.
14 return prof_gdump_val;
17 JEMALLOC_ALWAYS_INLINE prof_tdata_t *
18 prof_tdata_get(tsd_t *tsd, bool create) {
23 tdata = tsd_prof_tdata_get(tsd);
25 if (unlikely(tdata == NULL)) {
26 if (tsd_nominal(tsd)) {
27 tdata = prof_tdata_init(tsd);
28 tsd_prof_tdata_set(tsd, tdata);
30 } else if (unlikely(tdata->expired)) {
31 tdata = prof_tdata_reinit(tsd, tdata);
32 tsd_prof_tdata_set(tsd, tdata);
34 assert(tdata == NULL || tdata->attached);
40 JEMALLOC_ALWAYS_INLINE prof_tctx_t *
41 prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
45 return arena_prof_tctx_get(tsdn, ptr, alloc_ctx);
48 JEMALLOC_ALWAYS_INLINE void
49 prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
50 alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
54 arena_prof_tctx_set(tsdn, ptr, usize, alloc_ctx, tctx);
57 JEMALLOC_ALWAYS_INLINE void
58 prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
62 arena_prof_tctx_reset(tsdn, ptr, tctx);
65 JEMALLOC_ALWAYS_INLINE nstime_t
66 prof_alloc_time_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
70 return arena_prof_alloc_time_get(tsdn, ptr, alloc_ctx);
73 JEMALLOC_ALWAYS_INLINE void
74 prof_alloc_time_set(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx,
79 arena_prof_alloc_time_set(tsdn, ptr, alloc_ctx, t);
82 JEMALLOC_ALWAYS_INLINE bool
83 prof_sample_check(tsd_t *tsd, size_t usize, bool update) {
84 ssize_t check = update ? 0 : usize;
86 int64_t bytes_until_sample = tsd_bytes_until_sample_get(tsd);
88 bytes_until_sample -= usize;
89 if (tsd_nominal(tsd)) {
90 tsd_bytes_until_sample_set(tsd, bytes_until_sample);
93 if (likely(bytes_until_sample >= check)) {
100 JEMALLOC_ALWAYS_INLINE bool
101 prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
102 prof_tdata_t **tdata_out) {
105 cassert(config_prof);
107 /* Fastpath: no need to load tdata */
108 if (likely(prof_sample_check(tsd, usize, update))) {
112 bool booted = tsd_prof_tdata_get(tsd);
113 tdata = prof_tdata_get(tsd, true);
114 if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) {
118 if (tdata_out != NULL) {
122 if (unlikely(tdata == NULL)) {
127 * If this was the first creation of tdata, then
128 * prof_tdata_get() reset bytes_until_sample, so decrement and
131 if (!booted && prof_sample_check(tsd, usize, update)) {
135 if (tsd_reentrancy_level_get(tsd) > 0) {
138 /* Compute new sample threshold. */
140 prof_sample_threshold_update(tdata);
142 return !tdata->active;
145 JEMALLOC_ALWAYS_INLINE prof_tctx_t *
146 prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) {
151 assert(usize == sz_s2u(usize));
153 if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
155 ret = (prof_tctx_t *)(uintptr_t)1U;
157 bt_init(&bt, tdata->vec);
159 ret = prof_lookup(tsd, &bt);
165 JEMALLOC_ALWAYS_INLINE void
166 prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx,
168 cassert(config_prof);
170 assert(usize == isalloc(tsdn, ptr));
172 if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
173 prof_malloc_sample_object(tsdn, ptr, usize, tctx);
175 prof_tctx_set(tsdn, ptr, usize, alloc_ctx,
176 (prof_tctx_t *)(uintptr_t)1U);
180 JEMALLOC_ALWAYS_INLINE void
181 prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
182 bool prof_active, bool updated, const void *old_ptr, size_t old_usize,
183 prof_tctx_t *old_tctx) {
184 bool sampled, old_sampled, moved;
186 cassert(config_prof);
187 assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
189 if (prof_active && !updated && ptr != NULL) {
190 assert(usize == isalloc(tsd_tsdn(tsd), ptr));
191 if (prof_sample_accum_update(tsd, usize, true, NULL)) {
193 * Don't sample. The usize passed to prof_alloc_prep()
194 * was larger than what actually got allocated, so a
195 * backtrace was captured for this allocation, even
196 * though its actual usize was insufficient to cross the
199 prof_alloc_rollback(tsd, tctx, true);
200 tctx = (prof_tctx_t *)(uintptr_t)1U;
204 sampled = ((uintptr_t)tctx > (uintptr_t)1U);
205 old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
206 moved = (ptr != old_ptr);
208 if (unlikely(sampled)) {
209 prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx);
211 prof_tctx_set(tsd_tsdn(tsd), ptr, usize, NULL,
212 (prof_tctx_t *)(uintptr_t)1U);
213 } else if (unlikely(old_sampled)) {
215 * prof_tctx_set() would work for the !moved case as well, but
216 * prof_tctx_reset() is slightly cheaper, and the proper thing
217 * to do here in the presence of explicit knowledge re: moved
220 prof_tctx_reset(tsd_tsdn(tsd), ptr, tctx);
222 assert((uintptr_t)prof_tctx_get(tsd_tsdn(tsd), ptr, NULL) ==
227 * The prof_free_sampled_object() call must come after the
228 * prof_malloc_sample_object() call, because tctx and old_tctx may be
229 * the same, in which case reversing the call order could cause the tctx
230 * to be prematurely destroyed as a side effect of momentarily zeroed
233 if (unlikely(old_sampled)) {
234 prof_free_sampled_object(tsd, ptr, old_usize, old_tctx);
238 JEMALLOC_ALWAYS_INLINE void
239 prof_free(tsd_t *tsd, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx) {
240 prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
242 cassert(config_prof);
243 assert(usize == isalloc(tsd_tsdn(tsd), ptr));
245 if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
246 prof_free_sampled_object(tsd, ptr, usize, tctx);
250 #endif /* JEMALLOC_INTERNAL_PROF_INLINES_B_H */