1 #ifndef JEMALLOC_INTERNAL_PROF_STRUCTS_H
2 #define JEMALLOC_INTERNAL_PROF_STRUCTS_H
4 #include "jemalloc/internal/ckh.h"
5 #include "jemalloc/internal/mutex.h"
6 #include "jemalloc/internal/prng.h"
7 #include "jemalloc/internal/rb.h"
10 /* Backtrace, stored as len program counters. */
15 #ifdef JEMALLOC_PROF_LIBGCC
16 /* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
24 #ifndef JEMALLOC_ATOMIC_U64
28 atomic_u64_t accumbytes;
33 /* Profiling counters. */
41 prof_tctx_state_initializing,
42 prof_tctx_state_nominal,
43 prof_tctx_state_dumping,
44 prof_tctx_state_purgatory /* Dumper must finish destroying. */
48 /* Thread data for thread that performed the allocation. */
52 * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
53 * defunct during teardown.
58 /* Profiling counters, protected by tdata->lock. */
61 /* Associated global context. */
65 * UID that distinguishes multiple tctx's created by the same thread,
66 * but coexisting in gctx->tctxs. There are two ways that such
67 * coexistence can occur:
68 * - A dumper thread can cause a tctx to be retained in the purgatory
70 * - Although a single "producer" thread must create all tctx's which
71 * share the same thr_uid, multiple "consumers" can each concurrently
72 * execute portions of prof_tctx_destroy(). prof_tctx_destroy() only
73 * gets called once each time cnts.cur{objs,bytes} drop to 0, but this
74 * threshold can be hit again before the first consumer finishes
75 * executing prof_tctx_destroy().
79 /* Linkage into gctx's tctxs. */
80 rb_node(prof_tctx_t) tctx_link;
83 * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
84 * sample vs destroy race.
88 /* Current dump-related state, protected by gctx->lock. */
89 prof_tctx_state_t state;
92 * Copy of cnts snapshotted during early dump phase, protected by
97 typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
100 /* Protects nlimbo, cnt_summed, and tctxs. */
101 malloc_mutex_t *lock;
104 * Number of threads that currently cause this gctx to be in a state of
105 * limbo due to one of:
106 * - Initializing this gctx.
107 * - Initializing per thread counters associated with this gctx.
108 * - Preparing to destroy this gctx.
109 * - Dumping a heap profile that includes this gctx.
110 * nlimbo must be 1 (single destroyer) in order to safely destroy the
116 * Tree of profile counters, one for each thread that has allocated in
119 prof_tctx_tree_t tctxs;
121 /* Linkage for tree of contexts to be dumped. */
122 rb_node(prof_gctx_t) dump_link;
124 /* Temporary storage for summation during dump. */
125 prof_cnt_t cnt_summed;
127 /* Associated backtrace. */
130 /* Backtrace vector, variable size, referred to by bt. */
133 typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
135 struct prof_tdata_s {
136 malloc_mutex_t *lock;
138 /* Monotonically increasing unique thread identifier. */
142 * Monotonically increasing discriminator among tdata structures
143 * associated with the same thr_uid.
145 uint64_t thr_discrim;
147 /* Included in heap profile dumps if non-NULL. */
153 rb_node(prof_tdata_t) tdata_link;
156 * Counter used to initialize prof_tctx_t's tctx_uid. No locking is
157 * necessary when incrementing this field, because only one thread ever
160 uint64_t tctx_uid_next;
163 * Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks
164 * backtraces for which it has non-zero allocation/deallocation counters
165 * associated with thread-specific prof_tctx_t objects. Other threads
166 * may write to prof_tctx_t contents when freeing associated objects.
170 /* Sampling state. */
172 uint64_t bytes_until_sample;
174 /* State used to avoid dumping while operating on prof internals. */
180 * Set to true during an early dump phase for tdata's which are
181 * currently being dumped. New threads' tdata's have this initialized
182 * to false so that they aren't accidentally included in later dump
188 * True if profiling is active for this tdata's thread
189 * (thread.prof.active mallctl).
193 /* Temporary storage for summation during dump. */
194 prof_cnt_t cnt_summed;
196 /* Backtrace vector, used for calls to prof_backtrace(). */
197 void *vec[PROF_BT_MAX];
199 typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
201 #endif /* JEMALLOC_INTERNAL_PROF_STRUCTS_H */