1 #define JEMALLOC_PROF_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 /******************************************************************************/
5 #ifdef JEMALLOC_PROF_LIBUNWIND
10 #ifdef JEMALLOC_PROF_LIBGCC
14 /******************************************************************************/
17 bool opt_prof = false;
18 bool opt_prof_active = true;
19 bool opt_prof_thread_active_init = true;
20 size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
21 ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
22 bool opt_prof_gdump = false;
23 bool opt_prof_final = false;
24 bool opt_prof_leak = false;
25 bool opt_prof_accum = false;
27 /* Minimize memory bloat for non-prof builds. */
34 * Initialized as opt_prof_active, and accessed via
35 * prof_active_[gs]et{_unlocked,}().
38 static malloc_mutex_t prof_active_mtx;
41 * Initialized as opt_prof_thread_active_init, and accessed via
42 * prof_thread_active_init_[gs]et().
44 static bool prof_thread_active_init;
45 static malloc_mutex_t prof_thread_active_init_mtx;
48 * Initialized as opt_prof_gdump, and accessed via
49 * prof_gdump_[gs]et{_unlocked,}().
52 static malloc_mutex_t prof_gdump_mtx;
54 uint64_t prof_interval = 0;
56 size_t lg_prof_sample;
59 * Table of mutexes that are shared among gctx's. These are leaf locks, so
60 * there is no problem with using them for more than one gctx at the same time.
61 * The primary motivation for this sharing though is that gctx's are ephemeral,
62 * and destroying mutexes causes complications for systems that allocate when
63 * creating/destroying mutexes.
65 static malloc_mutex_t *gctx_locks;
66 static unsigned cum_gctxs; /* Atomic counter. */
69 * Table of mutexes that are shared among tdata's. No operations require
70 * holding multiple tdata locks, so there is no problem with using them for more
71 * than one tdata at the same time, even though a gctx lock may be acquired
72 * while holding a tdata lock.
74 static malloc_mutex_t *tdata_locks;
77 * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data
78 * structure that knows about all backtraces currently captured.
81 static malloc_mutex_t bt2gctx_mtx;
84 * Tree of all extant prof_tdata_t structures, regardless of state,
85 * {attached,detached,expired}.
87 static prof_tdata_tree_t tdatas;
88 static malloc_mutex_t tdatas_mtx;
90 static uint64_t next_thr_uid;
91 static malloc_mutex_t next_thr_uid_mtx;
93 static malloc_mutex_t prof_dump_seq_mtx;
94 static uint64_t prof_dump_seq;
95 static uint64_t prof_dump_iseq;
96 static uint64_t prof_dump_mseq;
97 static uint64_t prof_dump_useq;
100 * This buffer is rather large for stack allocation, so use a single buffer for
103 static malloc_mutex_t prof_dump_mtx;
104 static char prof_dump_buf[
105 /* Minimize memory bloat for non-prof builds. */
112 static size_t prof_dump_buf_end;
113 static int prof_dump_fd;
115 /* Do not dump any profiles until bootstrapping is complete. */
116 static bool prof_booted = false;
118 /******************************************************************************/
120 * Function prototypes for static functions that are referenced prior to
124 static bool prof_tctx_should_destroy(prof_tctx_t *tctx);
125 static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
126 static bool prof_tdata_should_destroy(prof_tdata_t *tdata,
127 bool even_if_attached);
128 static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
129 bool even_if_attached);
130 static char *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name);
132 /******************************************************************************/
133 /* Red-black trees. */
135 JEMALLOC_INLINE_C int
136 prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b)
138 uint64_t a_thr_uid = a->thr_uid;
139 uint64_t b_thr_uid = b->thr_uid;
140 int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
142 uint64_t a_thr_discrim = a->thr_discrim;
143 uint64_t b_thr_discrim = b->thr_discrim;
144 ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
147 uint64_t a_tctx_uid = a->tctx_uid;
148 uint64_t b_tctx_uid = b->tctx_uid;
149 ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
156 rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
157 tctx_link, prof_tctx_comp)
159 JEMALLOC_INLINE_C int
160 prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b)
162 unsigned a_len = a->bt.len;
163 unsigned b_len = b->bt.len;
164 unsigned comp_len = (a_len < b_len) ? a_len : b_len;
165 int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
167 ret = (a_len > b_len) - (a_len < b_len);
171 rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
174 JEMALLOC_INLINE_C int
175 prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b)
178 uint64_t a_uid = a->thr_uid;
179 uint64_t b_uid = b->thr_uid;
181 ret = ((a_uid > b_uid) - (a_uid < b_uid));
183 uint64_t a_discrim = a->thr_discrim;
184 uint64_t b_discrim = b->thr_discrim;
186 ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
191 rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
194 /******************************************************************************/
197 prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated)
201 cassert(config_prof);
205 * Compute a new sample threshold. This isn't very important in
206 * practice, because this function is rarely executed, so the
207 * potential for sample bias is minimal except in contrived
210 tdata = prof_tdata_get(tsd, true);
212 prof_sample_threshold_update(tdata);
215 if ((uintptr_t)tctx > (uintptr_t)1U) {
216 malloc_mutex_lock(tctx->tdata->lock);
217 tctx->prepared = false;
218 if (prof_tctx_should_destroy(tctx))
219 prof_tctx_destroy(tsd, tctx);
221 malloc_mutex_unlock(tctx->tdata->lock);
226 prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx)
229 prof_tctx_set(ptr, usize, tctx);
231 malloc_mutex_lock(tctx->tdata->lock);
232 tctx->cnts.curobjs++;
233 tctx->cnts.curbytes += usize;
234 if (opt_prof_accum) {
235 tctx->cnts.accumobjs++;
236 tctx->cnts.accumbytes += usize;
238 tctx->prepared = false;
239 malloc_mutex_unlock(tctx->tdata->lock);
243 prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
246 malloc_mutex_lock(tctx->tdata->lock);
247 assert(tctx->cnts.curobjs > 0);
248 assert(tctx->cnts.curbytes >= usize);
249 tctx->cnts.curobjs--;
250 tctx->cnts.curbytes -= usize;
252 if (prof_tctx_should_destroy(tctx))
253 prof_tctx_destroy(tsd, tctx);
255 malloc_mutex_unlock(tctx->tdata->lock);
259 bt_init(prof_bt_t *bt, void **vec)
262 cassert(config_prof);
268 JEMALLOC_INLINE_C void
269 prof_enter(tsd_t *tsd, prof_tdata_t *tdata)
272 cassert(config_prof);
273 assert(tdata == prof_tdata_get(tsd, false));
280 malloc_mutex_lock(&bt2gctx_mtx);
283 JEMALLOC_INLINE_C void
284 prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
287 cassert(config_prof);
288 assert(tdata == prof_tdata_get(tsd, false));
290 malloc_mutex_unlock(&bt2gctx_mtx);
297 idump = tdata->enq_idump;
298 tdata->enq_idump = false;
299 gdump = tdata->enq_gdump;
300 tdata->enq_gdump = false;
309 #ifdef JEMALLOC_PROF_LIBUNWIND
311 prof_backtrace(prof_bt_t *bt)
315 cassert(config_prof);
316 assert(bt->len == 0);
317 assert(bt->vec != NULL);
319 nframes = unw_backtrace(bt->vec, PROF_BT_MAX);
324 #elif (defined(JEMALLOC_PROF_LIBGCC))
325 static _Unwind_Reason_Code
326 prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
329 cassert(config_prof);
331 return (_URC_NO_REASON);
334 static _Unwind_Reason_Code
335 prof_unwind_callback(struct _Unwind_Context *context, void *arg)
337 prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
340 cassert(config_prof);
342 ip = (void *)_Unwind_GetIP(context);
344 return (_URC_END_OF_STACK);
345 data->bt->vec[data->bt->len] = ip;
347 if (data->bt->len == data->max)
348 return (_URC_END_OF_STACK);
350 return (_URC_NO_REASON);
354 prof_backtrace(prof_bt_t *bt)
356 prof_unwind_data_t data = {bt, PROF_BT_MAX};
358 cassert(config_prof);
360 _Unwind_Backtrace(prof_unwind_callback, &data);
362 #elif (defined(JEMALLOC_PROF_GCC))
364 prof_backtrace(prof_bt_t *bt)
366 #define BT_FRAME(i) \
367 if ((i) < PROF_BT_MAX) { \
369 if (__builtin_frame_address(i) == 0) \
371 p = __builtin_return_address(i); \
379 cassert(config_prof);
525 prof_backtrace(prof_bt_t *bt)
528 cassert(config_prof);
533 static malloc_mutex_t *
534 prof_gctx_mutex_choose(void)
536 unsigned ngctxs = atomic_add_u(&cum_gctxs, 1);
538 return (&gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]);
541 static malloc_mutex_t *
542 prof_tdata_mutex_choose(uint64_t thr_uid)
545 return (&tdata_locks[thr_uid % PROF_NTDATA_LOCKS]);
549 prof_gctx_create(tsd_t *tsd, prof_bt_t *bt)
552 * Create a single allocation that has space for vec of length bt->len.
554 size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
555 prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsd, size,
556 size2index(size), false, tcache_get(tsd, true), true, NULL, true);
559 gctx->lock = prof_gctx_mutex_choose();
561 * Set nlimbo to 1, in order to avoid a race condition with
562 * prof_tctx_destroy()/prof_gctx_try_destroy().
565 tctx_tree_new(&gctx->tctxs);
567 memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
568 gctx->bt.vec = gctx->vec;
569 gctx->bt.len = bt->len;
574 prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
578 cassert(config_prof);
581 * Check that gctx is still unused by any thread cache before destroying
582 * it. prof_lookup() increments gctx->nlimbo in order to avoid a race
583 * condition with this function, as does prof_tctx_destroy() in order to
584 * avoid a race between the main body of prof_tctx_destroy() and entry
585 * into this function.
587 prof_enter(tsd, tdata_self);
588 malloc_mutex_lock(gctx->lock);
589 assert(gctx->nlimbo != 0);
590 if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
591 /* Remove gctx from bt2gctx. */
592 if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL))
594 prof_leave(tsd, tdata_self);
596 malloc_mutex_unlock(gctx->lock);
597 idalloctm(tsd, gctx, tcache_get(tsd, false), true, true);
600 * Compensate for increment in prof_tctx_destroy() or
604 malloc_mutex_unlock(gctx->lock);
605 prof_leave(tsd, tdata_self);
609 /* tctx->tdata->lock must be held. */
611 prof_tctx_should_destroy(prof_tctx_t *tctx)
616 if (tctx->cnts.curobjs != 0)
624 prof_gctx_should_destroy(prof_gctx_t *gctx)
629 if (!tctx_tree_empty(&gctx->tctxs))
631 if (gctx->nlimbo != 0)
636 /* tctx->tdata->lock is held upon entry, and released before return. */
638 prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
640 prof_tdata_t *tdata = tctx->tdata;
641 prof_gctx_t *gctx = tctx->gctx;
642 bool destroy_tdata, destroy_tctx, destroy_gctx;
644 assert(tctx->cnts.curobjs == 0);
645 assert(tctx->cnts.curbytes == 0);
646 assert(!opt_prof_accum);
647 assert(tctx->cnts.accumobjs == 0);
648 assert(tctx->cnts.accumbytes == 0);
650 ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
651 destroy_tdata = prof_tdata_should_destroy(tdata, false);
652 malloc_mutex_unlock(tdata->lock);
654 malloc_mutex_lock(gctx->lock);
655 switch (tctx->state) {
656 case prof_tctx_state_nominal:
657 tctx_tree_remove(&gctx->tctxs, tctx);
659 if (prof_gctx_should_destroy(gctx)) {
661 * Increment gctx->nlimbo in order to keep another
662 * thread from winning the race to destroy gctx while
663 * this one has gctx->lock dropped. Without this, it
664 * would be possible for another thread to:
666 * 1) Sample an allocation associated with gctx.
667 * 2) Deallocate the sampled object.
668 * 3) Successfully prof_gctx_try_destroy(gctx).
670 * The result would be that gctx no longer exists by the
671 * time this thread accesses it in
672 * prof_gctx_try_destroy().
677 destroy_gctx = false;
679 case prof_tctx_state_dumping:
681 * A dumping thread needs tctx to remain valid until dumping
682 * has finished. Change state such that the dumping thread will
683 * complete destruction during a late dump iteration phase.
685 tctx->state = prof_tctx_state_purgatory;
686 destroy_tctx = false;
687 destroy_gctx = false;
691 destroy_tctx = false;
692 destroy_gctx = false;
694 malloc_mutex_unlock(gctx->lock);
696 prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
701 prof_tdata_destroy(tsd, tdata, false);
704 idalloctm(tsd, tctx, tcache_get(tsd, false), true, true);
708 prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
709 void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx)
721 prof_enter(tsd, tdata);
722 if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
723 /* bt has never been seen before. Insert it. */
724 gctx.p = prof_gctx_create(tsd, bt);
725 if (gctx.v == NULL) {
726 prof_leave(tsd, tdata);
729 btkey.p = &gctx.p->bt;
730 if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
732 prof_leave(tsd, tdata);
733 idalloctm(tsd, gctx.v, tcache_get(tsd, false), true,
740 * Increment nlimbo, in order to avoid a race condition with
741 * prof_tctx_destroy()/prof_gctx_try_destroy().
743 malloc_mutex_lock(gctx.p->lock);
745 malloc_mutex_unlock(gctx.p->lock);
748 prof_leave(tsd, tdata);
752 *p_new_gctx = new_gctx;
757 prof_lookup(tsd_t *tsd, prof_bt_t *bt)
766 cassert(config_prof);
768 tdata = prof_tdata_get(tsd, false);
772 malloc_mutex_lock(tdata->lock);
773 not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
774 if (!not_found) /* Note double negative! */
775 ret.p->prepared = true;
776 malloc_mutex_unlock(tdata->lock);
781 bool new_gctx, error;
784 * This thread's cache lacks bt. Look for it in the global
787 if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
791 /* Link a prof_tctx_t into gctx for this thread. */
792 tcache = tcache_get(tsd, true);
793 ret.v = iallocztm(tsd, sizeof(prof_tctx_t),
794 size2index(sizeof(prof_tctx_t)), false, tcache, true, NULL,
798 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
801 ret.p->tdata = tdata;
802 ret.p->thr_uid = tdata->thr_uid;
803 ret.p->thr_discrim = tdata->thr_discrim;
804 memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
806 ret.p->tctx_uid = tdata->tctx_uid_next++;
807 ret.p->prepared = true;
808 ret.p->state = prof_tctx_state_initializing;
809 malloc_mutex_lock(tdata->lock);
810 error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
811 malloc_mutex_unlock(tdata->lock);
814 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
815 idalloctm(tsd, ret.v, tcache, true, true);
818 malloc_mutex_lock(gctx->lock);
819 ret.p->state = prof_tctx_state_nominal;
820 tctx_tree_insert(&gctx->tctxs, ret.p);
822 malloc_mutex_unlock(gctx->lock);
829 prof_sample_threshold_update(prof_tdata_t *tdata)
832 * The body of this function is compiled out unless heap profiling is
833 * enabled, so that it is possible to compile jemalloc with floating
834 * point support completely disabled. Avoiding floating point code is
835 * important on memory-constrained systems, but it also enables a
836 * workaround for versions of glibc that don't properly save/restore
837 * floating point registers during dynamic lazy symbol loading (which
838 * internally calls into whatever malloc implementation happens to be
839 * integrated into the application). Note that some compilers (e.g.
840 * gcc 4.8) may use floating point registers for fast memory moves, so
841 * jemalloc must be compiled with such optimizations disabled (e.g.
842 * -mno-sse) in order for the workaround to be complete.
851 if (lg_prof_sample == 0) {
852 tdata->bytes_until_sample = 0;
857 * Compute sample interval as a geometrically distributed random
858 * variable with mean (2^lg_prof_sample).
862 * tdata->bytes_until_sample = | -------- |, where p = ---------------
863 * | log(1-p) | lg_prof_sample
866 * For more information on the math, see:
868 * Non-Uniform Random Variate Generation
870 * Springer-Verlag, New York, 1986
872 * (http://luc.devroye.org/rnbookindex.html)
874 r = prng_lg_range(&tdata->prng_state, 53);
875 u = (double)r * (1.0/9007199254740992.0L);
876 tdata->bytes_until_sample = (uint64_t)(log(u) /
877 log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
883 static prof_tdata_t *
884 prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
886 size_t *tdata_count = (size_t *)arg;
894 prof_tdata_count(void)
896 size_t tdata_count = 0;
898 malloc_mutex_lock(&tdatas_mtx);
899 tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
900 (void *)&tdata_count);
901 malloc_mutex_unlock(&tdatas_mtx);
903 return (tdata_count);
916 tdata = prof_tdata_get(tsd, false);
920 malloc_mutex_lock(&bt2gctx_mtx);
921 bt_count = ckh_count(&bt2gctx);
922 malloc_mutex_unlock(&bt2gctx_mtx);
929 #undef prof_dump_open
930 #define prof_dump_open JEMALLOC_N(prof_dump_open_impl)
933 prof_dump_open(bool propagate_err, const char *filename)
937 fd = creat(filename, 0644);
938 if (fd == -1 && !propagate_err) {
939 malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
948 #undef prof_dump_open
949 #define prof_dump_open JEMALLOC_N(prof_dump_open)
950 prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl);
954 prof_dump_flush(bool propagate_err)
959 cassert(config_prof);
961 err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
963 if (!propagate_err) {
964 malloc_write("<jemalloc>: write() failed during heap "
971 prof_dump_buf_end = 0;
977 prof_dump_close(bool propagate_err)
981 assert(prof_dump_fd != -1);
982 ret = prof_dump_flush(propagate_err);
990 prof_dump_write(bool propagate_err, const char *s)
994 cassert(config_prof);
999 /* Flush the buffer if it is full. */
1000 if (prof_dump_buf_end == PROF_DUMP_BUFSIZE)
1001 if (prof_dump_flush(propagate_err) && propagate_err)
1004 if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
1005 /* Finish writing. */
1008 /* Write as much of s as will fit. */
1009 n = PROF_DUMP_BUFSIZE - prof_dump_buf_end;
1011 memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n);
1012 prof_dump_buf_end += n;
1019 JEMALLOC_FORMAT_PRINTF(2, 3)
1021 prof_dump_printf(bool propagate_err, const char *format, ...)
1025 char buf[PROF_PRINTF_BUFSIZE];
1027 va_start(ap, format);
1028 malloc_vsnprintf(buf, sizeof(buf), format, ap);
1030 ret = prof_dump_write(propagate_err, buf);
1035 /* tctx->tdata->lock is held. */
1037 prof_tctx_merge_tdata(prof_tctx_t *tctx, prof_tdata_t *tdata)
1040 malloc_mutex_lock(tctx->gctx->lock);
1042 switch (tctx->state) {
1043 case prof_tctx_state_initializing:
1044 malloc_mutex_unlock(tctx->gctx->lock);
1046 case prof_tctx_state_nominal:
1047 tctx->state = prof_tctx_state_dumping;
1048 malloc_mutex_unlock(tctx->gctx->lock);
1050 memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
1052 tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1053 tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1054 if (opt_prof_accum) {
1055 tdata->cnt_summed.accumobjs +=
1056 tctx->dump_cnts.accumobjs;
1057 tdata->cnt_summed.accumbytes +=
1058 tctx->dump_cnts.accumbytes;
1061 case prof_tctx_state_dumping:
1062 case prof_tctx_state_purgatory:
1067 /* gctx->lock is held. */
1069 prof_tctx_merge_gctx(prof_tctx_t *tctx, prof_gctx_t *gctx)
1072 gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1073 gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1074 if (opt_prof_accum) {
1075 gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
1076 gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
1080 /* tctx->gctx is held. */
1081 static prof_tctx_t *
1082 prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
1085 switch (tctx->state) {
1086 case prof_tctx_state_nominal:
1087 /* New since dumping started; ignore. */
1089 case prof_tctx_state_dumping:
1090 case prof_tctx_state_purgatory:
1091 prof_tctx_merge_gctx(tctx, tctx->gctx);
1100 /* gctx->lock is held. */
1101 static prof_tctx_t *
1102 prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
1104 bool propagate_err = *(bool *)arg;
1106 switch (tctx->state) {
1107 case prof_tctx_state_initializing:
1108 case prof_tctx_state_nominal:
1109 /* Not captured by this dump. */
1111 case prof_tctx_state_dumping:
1112 case prof_tctx_state_purgatory:
1113 if (prof_dump_printf(propagate_err,
1114 " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
1115 "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
1116 tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
1117 tctx->dump_cnts.accumbytes))
1126 /* tctx->gctx is held. */
1127 static prof_tctx_t *
1128 prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
1132 switch (tctx->state) {
1133 case prof_tctx_state_nominal:
1134 /* New since dumping started; ignore. */
1136 case prof_tctx_state_dumping:
1137 tctx->state = prof_tctx_state_nominal;
1139 case prof_tctx_state_purgatory:
1152 prof_dump_gctx_prep(prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
1155 cassert(config_prof);
1157 malloc_mutex_lock(gctx->lock);
1160 * Increment nlimbo so that gctx won't go away before dump.
1161 * Additionally, link gctx into the dump list so that it is included in
1162 * prof_dump()'s second pass.
1165 gctx_tree_insert(gctxs, gctx);
1167 memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
1169 malloc_mutex_unlock(gctx->lock);
1172 static prof_gctx_t *
1173 prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
1175 size_t *leak_ngctx = (size_t *)arg;
1177 malloc_mutex_lock(gctx->lock);
1178 tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, NULL);
1179 if (gctx->cnt_summed.curobjs != 0)
1181 malloc_mutex_unlock(gctx->lock);
1187 prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
1189 prof_tdata_t *tdata = prof_tdata_get(tsd, false);
1193 * Standard tree iteration won't work here, because as soon as we
1194 * decrement gctx->nlimbo and unlock gctx, another thread can
1195 * concurrently destroy it, which will corrupt the tree. Therefore,
1196 * tear down the tree one node at a time during iteration.
1198 while ((gctx = gctx_tree_first(gctxs)) != NULL) {
1199 gctx_tree_remove(gctxs, gctx);
1200 malloc_mutex_lock(gctx->lock);
1206 prof_tctx_t *to_destroy =
1207 tctx_tree_iter(&gctx->tctxs, next,
1208 prof_tctx_finish_iter, NULL);
1209 if (to_destroy != NULL) {
1210 next = tctx_tree_next(&gctx->tctxs,
1212 tctx_tree_remove(&gctx->tctxs,
1214 idalloctm(tsd, to_destroy,
1215 tcache_get(tsd, false), true, true);
1218 } while (next != NULL);
1221 if (prof_gctx_should_destroy(gctx)) {
1223 malloc_mutex_unlock(gctx->lock);
1224 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
1226 malloc_mutex_unlock(gctx->lock);
1230 static prof_tdata_t *
1231 prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
1233 prof_cnt_t *cnt_all = (prof_cnt_t *)arg;
1235 malloc_mutex_lock(tdata->lock);
1236 if (!tdata->expired) {
1243 tdata->dumping = true;
1244 memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
1245 for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
1247 prof_tctx_merge_tdata(tctx.p, tdata);
1249 cnt_all->curobjs += tdata->cnt_summed.curobjs;
1250 cnt_all->curbytes += tdata->cnt_summed.curbytes;
1251 if (opt_prof_accum) {
1252 cnt_all->accumobjs += tdata->cnt_summed.accumobjs;
1253 cnt_all->accumbytes += tdata->cnt_summed.accumbytes;
1256 tdata->dumping = false;
1257 malloc_mutex_unlock(tdata->lock);
1262 static prof_tdata_t *
1263 prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
1265 bool propagate_err = *(bool *)arg;
1267 if (!tdata->dumping)
1270 if (prof_dump_printf(propagate_err,
1271 " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n",
1272 tdata->thr_uid, tdata->cnt_summed.curobjs,
1273 tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs,
1274 tdata->cnt_summed.accumbytes,
1275 (tdata->thread_name != NULL) ? " " : "",
1276 (tdata->thread_name != NULL) ? tdata->thread_name : ""))
1282 #undef prof_dump_header
1283 #define prof_dump_header JEMALLOC_N(prof_dump_header_impl)
1286 prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all)
1290 if (prof_dump_printf(propagate_err,
1291 "heap_v2/%"FMTu64"\n"
1292 " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
1293 ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
1294 cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes))
1297 malloc_mutex_lock(&tdatas_mtx);
1298 ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
1299 (void *)&propagate_err) != NULL);
1300 malloc_mutex_unlock(&tdatas_mtx);
1304 #undef prof_dump_header
1305 #define prof_dump_header JEMALLOC_N(prof_dump_header)
1306 prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl);
1309 /* gctx->lock is held. */
1311 prof_dump_gctx(bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt,
1312 prof_gctx_tree_t *gctxs)
1317 cassert(config_prof);
1319 /* Avoid dumping such gctx's that have no useful data. */
1320 if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
1321 (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
1322 assert(gctx->cnt_summed.curobjs == 0);
1323 assert(gctx->cnt_summed.curbytes == 0);
1324 assert(gctx->cnt_summed.accumobjs == 0);
1325 assert(gctx->cnt_summed.accumbytes == 0);
1330 if (prof_dump_printf(propagate_err, "@")) {
1334 for (i = 0; i < bt->len; i++) {
1335 if (prof_dump_printf(propagate_err, " %#"FMTxPTR,
1336 (uintptr_t)bt->vec[i])) {
1342 if (prof_dump_printf(propagate_err,
1344 " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
1345 gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes,
1346 gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) {
1351 if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
1352 (void *)&propagate_err) != NULL) {
1363 JEMALLOC_FORMAT_PRINTF(1, 2)
1365 prof_open_maps(const char *format, ...)
1369 char filename[PATH_MAX + 1];
1371 va_start(ap, format);
1372 malloc_vsnprintf(filename, sizeof(filename), format, ap);
1374 mfd = open(filename, O_RDONLY);
1385 return (GetCurrentProcessId());
1392 prof_dump_maps(bool propagate_err)
1397 cassert(config_prof);
1399 mfd = prof_open_maps("/proc/curproc/map");
1400 #elif defined(_WIN32)
1401 mfd = -1; // Not implemented
1404 int pid = prof_getpid();
1406 mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
1408 mfd = prof_open_maps("/proc/%d/maps", pid);
1414 if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
1421 prof_dump_buf_end += nread;
1422 if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
1423 /* Make space in prof_dump_buf before read(). */
1424 if (prof_dump_flush(propagate_err) &&
1430 nread = read(mfd, &prof_dump_buf[prof_dump_buf_end],
1431 PROF_DUMP_BUFSIZE - prof_dump_buf_end);
1432 } while (nread > 0);
1446 prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
1447 const char *filename)
1450 if (cnt_all->curbytes != 0) {
1451 malloc_printf("<jemalloc>: Leak summary: %"FMTu64" byte%s, %"
1452 FMTu64" object%s, %zu context%s\n",
1453 cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "",
1454 cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "",
1455 leak_ngctx, (leak_ngctx != 1) ? "s" : "");
1457 "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
1462 static prof_gctx_t *
1463 prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
1466 bool propagate_err = *(bool *)arg;
1468 malloc_mutex_lock(gctx->lock);
1470 if (prof_dump_gctx(propagate_err, gctx, &gctx->bt, gctxs)) {
1477 malloc_mutex_unlock(gctx->lock);
1482 prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
1484 prof_tdata_t *tdata;
1492 prof_gctx_tree_t gctxs;
1494 cassert(config_prof);
1496 tdata = prof_tdata_get(tsd, true);
1500 malloc_mutex_lock(&prof_dump_mtx);
1501 prof_enter(tsd, tdata);
1504 * Put gctx's in limbo and clear their counters in preparation for
1507 gctx_tree_new(&gctxs);
1508 for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);)
1509 prof_dump_gctx_prep(gctx.p, &gctxs);
1512 * Iterate over tdatas, and for the non-expired ones snapshot their tctx
1513 * stats and merge them into the associated gctx's.
1515 memset(&cnt_all, 0, sizeof(prof_cnt_t));
1516 malloc_mutex_lock(&tdatas_mtx);
1517 tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, (void *)&cnt_all);
1518 malloc_mutex_unlock(&tdatas_mtx);
1520 /* Merge tctx stats into gctx's. */
1522 gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter, (void *)&leak_ngctx);
1524 prof_leave(tsd, tdata);
1526 /* Create dump file. */
1527 if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1)
1528 goto label_open_close_error;
1530 /* Dump profile header. */
1531 if (prof_dump_header(propagate_err, &cnt_all))
1532 goto label_write_error;
1534 /* Dump per gctx profile stats. */
1535 if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter,
1536 (void *)&propagate_err) != NULL)
1537 goto label_write_error;
1539 /* Dump /proc/<pid>/maps if possible. */
1540 if (prof_dump_maps(propagate_err))
1541 goto label_write_error;
1543 if (prof_dump_close(propagate_err))
1544 goto label_open_close_error;
1546 prof_gctx_finish(tsd, &gctxs);
1547 malloc_mutex_unlock(&prof_dump_mtx);
1550 prof_leakcheck(&cnt_all, leak_ngctx, filename);
1554 prof_dump_close(propagate_err);
1555 label_open_close_error:
1556 prof_gctx_finish(tsd, &gctxs);
1557 malloc_mutex_unlock(&prof_dump_mtx);
1561 #define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
1562 #define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
1564 prof_dump_filename(char *filename, char v, uint64_t vseq)
1567 cassert(config_prof);
1569 if (vseq != VSEQ_INVALID) {
1570 /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
1571 malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
1572 "%s.%d.%"FMTu64".%c%"FMTu64".heap",
1573 opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq);
1575 /* "<prefix>.<pid>.<seq>.<v>.heap" */
1576 malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
1577 "%s.%d.%"FMTu64".%c.heap",
1578 opt_prof_prefix, prof_getpid(), prof_dump_seq, v);
1587 char filename[DUMP_FILENAME_BUFSIZE];
1589 cassert(config_prof);
1590 assert(opt_prof_final);
1591 assert(opt_prof_prefix[0] != '\0');
1597 malloc_mutex_lock(&prof_dump_seq_mtx);
1598 prof_dump_filename(filename, 'f', VSEQ_INVALID);
1599 malloc_mutex_unlock(&prof_dump_seq_mtx);
1600 prof_dump(tsd, false, filename, opt_prof_leak);
1607 prof_tdata_t *tdata;
1609 cassert(config_prof);
1614 tdata = prof_tdata_get(tsd, false);
1618 tdata->enq_idump = true;
1622 if (opt_prof_prefix[0] != '\0') {
1623 char filename[PATH_MAX + 1];
1624 malloc_mutex_lock(&prof_dump_seq_mtx);
1625 prof_dump_filename(filename, 'i', prof_dump_iseq);
1627 malloc_mutex_unlock(&prof_dump_seq_mtx);
1628 prof_dump(tsd, false, filename, false);
1633 prof_mdump(const char *filename)
1636 char filename_buf[DUMP_FILENAME_BUFSIZE];
1638 cassert(config_prof);
1640 if (!opt_prof || !prof_booted)
1644 if (filename == NULL) {
1645 /* No filename specified, so automatically generate one. */
1646 if (opt_prof_prefix[0] == '\0')
1648 malloc_mutex_lock(&prof_dump_seq_mtx);
1649 prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
1651 malloc_mutex_unlock(&prof_dump_seq_mtx);
1652 filename = filename_buf;
1654 return (prof_dump(tsd, true, filename, false));
1661 prof_tdata_t *tdata;
1663 cassert(config_prof);
1668 tdata = prof_tdata_get(tsd, false);
1672 tdata->enq_gdump = true;
1676 if (opt_prof_prefix[0] != '\0') {
1677 char filename[DUMP_FILENAME_BUFSIZE];
1678 malloc_mutex_lock(&prof_dump_seq_mtx);
1679 prof_dump_filename(filename, 'u', prof_dump_useq);
1681 malloc_mutex_unlock(&prof_dump_seq_mtx);
1682 prof_dump(tsd, false, filename, false);
1687 prof_bt_hash(const void *key, size_t r_hash[2])
1689 prof_bt_t *bt = (prof_bt_t *)key;
1691 cassert(config_prof);
1693 hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
1697 prof_bt_keycomp(const void *k1, const void *k2)
1699 const prof_bt_t *bt1 = (prof_bt_t *)k1;
1700 const prof_bt_t *bt2 = (prof_bt_t *)k2;
1702 cassert(config_prof);
1704 if (bt1->len != bt2->len)
1706 return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
1709 JEMALLOC_INLINE_C uint64_t
1710 prof_thr_uid_alloc(void)
1714 malloc_mutex_lock(&next_thr_uid_mtx);
1715 thr_uid = next_thr_uid;
1717 malloc_mutex_unlock(&next_thr_uid_mtx);
1722 static prof_tdata_t *
1723 prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
1724 char *thread_name, bool active)
1726 prof_tdata_t *tdata;
1729 cassert(config_prof);
1731 /* Initialize an empty cache for this thread. */
1732 tcache = tcache_get(tsd, true);
1733 tdata = (prof_tdata_t *)iallocztm(tsd, sizeof(prof_tdata_t),
1734 size2index(sizeof(prof_tdata_t)), false, tcache, true, NULL, true);
1738 tdata->lock = prof_tdata_mutex_choose(thr_uid);
1739 tdata->thr_uid = thr_uid;
1740 tdata->thr_discrim = thr_discrim;
1741 tdata->thread_name = thread_name;
1742 tdata->attached = true;
1743 tdata->expired = false;
1744 tdata->tctx_uid_next = 0;
1746 if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS,
1747 prof_bt_hash, prof_bt_keycomp)) {
1748 idalloctm(tsd, tdata, tcache, true, true);
1752 tdata->prng_state = (uint64_t)(uintptr_t)tdata;
1753 prof_sample_threshold_update(tdata);
1756 tdata->enq_idump = false;
1757 tdata->enq_gdump = false;
1759 tdata->dumping = false;
1760 tdata->active = active;
1762 malloc_mutex_lock(&tdatas_mtx);
1763 tdata_tree_insert(&tdatas, tdata);
1764 malloc_mutex_unlock(&tdatas_mtx);
1770 prof_tdata_init(tsd_t *tsd)
1773 return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(), 0, NULL,
1774 prof_thread_active_init_get()));
1777 /* tdata->lock must be held. */
1779 prof_tdata_should_destroy(prof_tdata_t *tdata, bool even_if_attached)
1782 if (tdata->attached && !even_if_attached)
1784 if (ckh_count(&tdata->bt2tctx) != 0)
1789 /* tdatas_mtx must be held. */
1791 prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
1792 bool even_if_attached)
1796 assert(prof_tdata_should_destroy(tdata, even_if_attached));
1797 assert(tsd_prof_tdata_get(tsd) != tdata);
1799 tdata_tree_remove(&tdatas, tdata);
1801 tcache = tcache_get(tsd, false);
1802 if (tdata->thread_name != NULL)
1803 idalloctm(tsd, tdata->thread_name, tcache, true, true);
1804 ckh_delete(tsd, &tdata->bt2tctx);
1805 idalloctm(tsd, tdata, tcache, true, true);
1809 prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached)
1812 malloc_mutex_lock(&tdatas_mtx);
1813 prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
1814 malloc_mutex_unlock(&tdatas_mtx);
1818 prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
1822 malloc_mutex_lock(tdata->lock);
1823 if (tdata->attached) {
1824 destroy_tdata = prof_tdata_should_destroy(tdata, true);
1826 * Only detach if !destroy_tdata, because detaching would allow
1827 * another thread to win the race to destroy tdata.
1830 tdata->attached = false;
1831 tsd_prof_tdata_set(tsd, NULL);
1833 destroy_tdata = false;
1834 malloc_mutex_unlock(tdata->lock);
1836 prof_tdata_destroy(tsd, tdata, true);
1840 prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
1842 uint64_t thr_uid = tdata->thr_uid;
1843 uint64_t thr_discrim = tdata->thr_discrim + 1;
1844 char *thread_name = (tdata->thread_name != NULL) ?
1845 prof_thread_name_alloc(tsd, tdata->thread_name) : NULL;
1846 bool active = tdata->active;
1848 prof_tdata_detach(tsd, tdata);
1849 return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
1854 prof_tdata_expire(prof_tdata_t *tdata)
1858 malloc_mutex_lock(tdata->lock);
1859 if (!tdata->expired) {
1860 tdata->expired = true;
1861 destroy_tdata = tdata->attached ? false :
1862 prof_tdata_should_destroy(tdata, false);
1864 destroy_tdata = false;
1865 malloc_mutex_unlock(tdata->lock);
1867 return (destroy_tdata);
1870 static prof_tdata_t *
1871 prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
1874 return (prof_tdata_expire(tdata) ? tdata : NULL);
1878 prof_reset(tsd_t *tsd, size_t lg_sample)
1882 assert(lg_sample < (sizeof(uint64_t) << 3));
1884 malloc_mutex_lock(&prof_dump_mtx);
1885 malloc_mutex_lock(&tdatas_mtx);
1887 lg_prof_sample = lg_sample;
1891 prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
1892 prof_tdata_reset_iter, NULL);
1893 if (to_destroy != NULL) {
1894 next = tdata_tree_next(&tdatas, to_destroy);
1895 prof_tdata_destroy_locked(tsd, to_destroy, false);
1898 } while (next != NULL);
1900 malloc_mutex_unlock(&tdatas_mtx);
1901 malloc_mutex_unlock(&prof_dump_mtx);
1905 prof_tdata_cleanup(tsd_t *tsd)
1907 prof_tdata_t *tdata;
1912 tdata = tsd_prof_tdata_get(tsd);
1914 prof_tdata_detach(tsd, tdata);
1918 prof_active_get(void)
1920 bool prof_active_current;
1922 malloc_mutex_lock(&prof_active_mtx);
1923 prof_active_current = prof_active;
1924 malloc_mutex_unlock(&prof_active_mtx);
1925 return (prof_active_current);
1929 prof_active_set(bool active)
1931 bool prof_active_old;
1933 malloc_mutex_lock(&prof_active_mtx);
1934 prof_active_old = prof_active;
1935 prof_active = active;
1936 malloc_mutex_unlock(&prof_active_mtx);
1937 return (prof_active_old);
1941 prof_thread_name_get(void)
1944 prof_tdata_t *tdata;
1947 tdata = prof_tdata_get(tsd, true);
1950 return (tdata->thread_name != NULL ? tdata->thread_name : "");
1954 prof_thread_name_alloc(tsd_t *tsd, const char *thread_name)
1959 if (thread_name == NULL)
1962 size = strlen(thread_name) + 1;
1966 ret = iallocztm(tsd, size, size2index(size), false, tcache_get(tsd,
1967 true), true, NULL, true);
1970 memcpy(ret, thread_name, size);
1975 prof_thread_name_set(tsd_t *tsd, const char *thread_name)
1977 prof_tdata_t *tdata;
1981 tdata = prof_tdata_get(tsd, true);
1985 /* Validate input. */
1986 if (thread_name == NULL)
1988 for (i = 0; thread_name[i] != '\0'; i++) {
1989 char c = thread_name[i];
1990 if (!isgraph(c) && !isblank(c))
1994 s = prof_thread_name_alloc(tsd, thread_name);
1998 if (tdata->thread_name != NULL) {
1999 idalloctm(tsd, tdata->thread_name, tcache_get(tsd, false),
2001 tdata->thread_name = NULL;
2004 tdata->thread_name = s;
2009 prof_thread_active_get(void)
2012 prof_tdata_t *tdata;
2015 tdata = prof_tdata_get(tsd, true);
2018 return (tdata->active);
2022 prof_thread_active_set(bool active)
2025 prof_tdata_t *tdata;
2028 tdata = prof_tdata_get(tsd, true);
2031 tdata->active = active;
2036 prof_thread_active_init_get(void)
2040 malloc_mutex_lock(&prof_thread_active_init_mtx);
2041 active_init = prof_thread_active_init;
2042 malloc_mutex_unlock(&prof_thread_active_init_mtx);
2043 return (active_init);
2047 prof_thread_active_init_set(bool active_init)
2049 bool active_init_old;
2051 malloc_mutex_lock(&prof_thread_active_init_mtx);
2052 active_init_old = prof_thread_active_init;
2053 prof_thread_active_init = active_init;
2054 malloc_mutex_unlock(&prof_thread_active_init_mtx);
2055 return (active_init_old);
2059 prof_gdump_get(void)
2061 bool prof_gdump_current;
2063 malloc_mutex_lock(&prof_gdump_mtx);
2064 prof_gdump_current = prof_gdump_val;
2065 malloc_mutex_unlock(&prof_gdump_mtx);
2066 return (prof_gdump_current);
2070 prof_gdump_set(bool gdump)
2072 bool prof_gdump_old;
2074 malloc_mutex_lock(&prof_gdump_mtx);
2075 prof_gdump_old = prof_gdump_val;
2076 prof_gdump_val = gdump;
2077 malloc_mutex_unlock(&prof_gdump_mtx);
2078 return (prof_gdump_old);
2085 cassert(config_prof);
2087 memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
2088 sizeof(PROF_PREFIX_DEFAULT));
2095 cassert(config_prof);
2098 * opt_prof must be in its final state before any arenas are
2099 * initialized, so this function must be executed early.
2102 if (opt_prof_leak && !opt_prof) {
2104 * Enable opt_prof, but in such a way that profiles are never
2105 * automatically dumped.
2108 opt_prof_gdump = false;
2109 } else if (opt_prof) {
2110 if (opt_lg_prof_interval >= 0) {
2111 prof_interval = (((uint64_t)1U) <<
2112 opt_lg_prof_interval);
2121 cassert(config_prof);
2127 lg_prof_sample = opt_lg_prof_sample;
2129 prof_active = opt_prof_active;
2130 if (malloc_mutex_init(&prof_active_mtx))
2133 prof_gdump_val = opt_prof_gdump;
2134 if (malloc_mutex_init(&prof_gdump_mtx))
2137 prof_thread_active_init = opt_prof_thread_active_init;
2138 if (malloc_mutex_init(&prof_thread_active_init_mtx))
2142 if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
2145 if (malloc_mutex_init(&bt2gctx_mtx))
2148 tdata_tree_new(&tdatas);
2149 if (malloc_mutex_init(&tdatas_mtx))
2153 if (malloc_mutex_init(&next_thr_uid_mtx))
2156 if (malloc_mutex_init(&prof_dump_seq_mtx))
2158 if (malloc_mutex_init(&prof_dump_mtx))
2161 if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
2162 atexit(prof_fdump) != 0) {
2163 malloc_write("<jemalloc>: Error in atexit()\n");
2168 gctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS *
2169 sizeof(malloc_mutex_t));
2170 if (gctx_locks == NULL)
2172 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
2173 if (malloc_mutex_init(&gctx_locks[i]))
2177 tdata_locks = (malloc_mutex_t *)base_alloc(PROF_NTDATA_LOCKS *
2178 sizeof(malloc_mutex_t));
2179 if (tdata_locks == NULL)
2181 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
2182 if (malloc_mutex_init(&tdata_locks[i]))
2187 #ifdef JEMALLOC_PROF_LIBGCC
2189 * Cause the backtracing machinery to allocate its internal state
2190 * before enabling profiling.
2192 _Unwind_Backtrace(prof_unwind_init_callback, NULL);
2207 malloc_mutex_prefork(&tdatas_mtx);
2208 malloc_mutex_prefork(&bt2gctx_mtx);
2209 malloc_mutex_prefork(&next_thr_uid_mtx);
2210 malloc_mutex_prefork(&prof_dump_seq_mtx);
2211 for (i = 0; i < PROF_NCTX_LOCKS; i++)
2212 malloc_mutex_prefork(&gctx_locks[i]);
2213 for (i = 0; i < PROF_NTDATA_LOCKS; i++)
2214 malloc_mutex_prefork(&tdata_locks[i]);
2219 prof_postfork_parent(void)
2225 for (i = 0; i < PROF_NTDATA_LOCKS; i++)
2226 malloc_mutex_postfork_parent(&tdata_locks[i]);
2227 for (i = 0; i < PROF_NCTX_LOCKS; i++)
2228 malloc_mutex_postfork_parent(&gctx_locks[i]);
2229 malloc_mutex_postfork_parent(&prof_dump_seq_mtx);
2230 malloc_mutex_postfork_parent(&next_thr_uid_mtx);
2231 malloc_mutex_postfork_parent(&bt2gctx_mtx);
2232 malloc_mutex_postfork_parent(&tdatas_mtx);
2237 prof_postfork_child(void)
2243 for (i = 0; i < PROF_NTDATA_LOCKS; i++)
2244 malloc_mutex_postfork_child(&tdata_locks[i]);
2245 for (i = 0; i < PROF_NCTX_LOCKS; i++)
2246 malloc_mutex_postfork_child(&gctx_locks[i]);
2247 malloc_mutex_postfork_child(&prof_dump_seq_mtx);
2248 malloc_mutex_postfork_child(&next_thr_uid_mtx);
2249 malloc_mutex_postfork_child(&bt2gctx_mtx);
2250 malloc_mutex_postfork_child(&tdatas_mtx);
2254 /******************************************************************************/