1 #define JEMALLOC_PROF_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 /******************************************************************************/
5 #ifdef JEMALLOC_PROF_LIBUNWIND
10 #ifdef JEMALLOC_PROF_LIBGCC
14 /******************************************************************************/
17 bool opt_prof = false;
18 bool opt_prof_active = true;
19 bool opt_prof_thread_active_init = true;
20 size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
21 ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
22 bool opt_prof_gdump = false;
23 bool opt_prof_final = false;
24 bool opt_prof_leak = false;
25 bool opt_prof_accum = false;
27 /* Minimize memory bloat for non-prof builds. */
34 * Initialized as opt_prof_active, and accessed via
35 * prof_active_[gs]et{_unlocked,}().
38 static malloc_mutex_t prof_active_mtx;
41 * Initialized as opt_prof_thread_active_init, and accessed via
42 * prof_thread_active_init_[gs]et().
44 static bool prof_thread_active_init;
45 static malloc_mutex_t prof_thread_active_init_mtx;
48 * Initialized as opt_prof_gdump, and accessed via
49 * prof_gdump_[gs]et{_unlocked,}().
52 static malloc_mutex_t prof_gdump_mtx;
54 uint64_t prof_interval = 0;
56 size_t lg_prof_sample;
59 * Table of mutexes that are shared among gctx's. These are leaf locks, so
60 * there is no problem with using them for more than one gctx at the same time.
61 * The primary motivation for this sharing though is that gctx's are ephemeral,
62 * and destroying mutexes causes complications for systems that allocate when
63 * creating/destroying mutexes.
65 static malloc_mutex_t *gctx_locks;
66 static unsigned cum_gctxs; /* Atomic counter. */
69 * Table of mutexes that are shared among tdata's. No operations require
70 * holding multiple tdata locks, so there is no problem with using them for more
71 * than one tdata at the same time, even though a gctx lock may be acquired
72 * while holding a tdata lock.
74 static malloc_mutex_t *tdata_locks;
77 * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data
78 * structure that knows about all backtraces currently captured.
81 static malloc_mutex_t bt2gctx_mtx;
84 * Tree of all extant prof_tdata_t structures, regardless of state,
85 * {attached,detached,expired}.
87 static prof_tdata_tree_t tdatas;
88 static malloc_mutex_t tdatas_mtx;
90 static uint64_t next_thr_uid;
91 static malloc_mutex_t next_thr_uid_mtx;
93 static malloc_mutex_t prof_dump_seq_mtx;
94 static uint64_t prof_dump_seq;
95 static uint64_t prof_dump_iseq;
96 static uint64_t prof_dump_mseq;
97 static uint64_t prof_dump_useq;
100 * This buffer is rather large for stack allocation, so use a single buffer for
103 static malloc_mutex_t prof_dump_mtx;
104 static char prof_dump_buf[
105 /* Minimize memory bloat for non-prof builds. */
112 static size_t prof_dump_buf_end;
113 static int prof_dump_fd;
115 /* Do not dump any profiles until bootstrapping is complete. */
116 static bool prof_booted = false;
118 /******************************************************************************/
120 * Function prototypes for static functions that are referenced prior to
124 static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx);
125 static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
126 static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
127 bool even_if_attached);
128 static void prof_tdata_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
129 bool even_if_attached);
130 static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name);
132 /******************************************************************************/
133 /* Red-black trees. */
135 JEMALLOC_INLINE_C int
136 prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b)
138 uint64_t a_thr_uid = a->thr_uid;
139 uint64_t b_thr_uid = b->thr_uid;
140 int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
142 uint64_t a_thr_discrim = a->thr_discrim;
143 uint64_t b_thr_discrim = b->thr_discrim;
144 ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
147 uint64_t a_tctx_uid = a->tctx_uid;
148 uint64_t b_tctx_uid = b->tctx_uid;
149 ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
156 rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
157 tctx_link, prof_tctx_comp)
159 JEMALLOC_INLINE_C int
160 prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b)
162 unsigned a_len = a->bt.len;
163 unsigned b_len = b->bt.len;
164 unsigned comp_len = (a_len < b_len) ? a_len : b_len;
165 int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
167 ret = (a_len > b_len) - (a_len < b_len);
171 rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
174 JEMALLOC_INLINE_C int
175 prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b)
178 uint64_t a_uid = a->thr_uid;
179 uint64_t b_uid = b->thr_uid;
181 ret = ((a_uid > b_uid) - (a_uid < b_uid));
183 uint64_t a_discrim = a->thr_discrim;
184 uint64_t b_discrim = b->thr_discrim;
186 ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
191 rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
194 /******************************************************************************/
197 prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated)
201 cassert(config_prof);
205 * Compute a new sample threshold. This isn't very important in
206 * practice, because this function is rarely executed, so the
207 * potential for sample bias is minimal except in contrived
210 tdata = prof_tdata_get(tsd, true);
212 prof_sample_threshold_update(tdata);
215 if ((uintptr_t)tctx > (uintptr_t)1U) {
216 malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
217 tctx->prepared = false;
218 if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx))
219 prof_tctx_destroy(tsd, tctx);
221 malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
226 prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
230 prof_tctx_set(tsdn, ptr, usize, tctx);
232 malloc_mutex_lock(tsdn, tctx->tdata->lock);
233 tctx->cnts.curobjs++;
234 tctx->cnts.curbytes += usize;
235 if (opt_prof_accum) {
236 tctx->cnts.accumobjs++;
237 tctx->cnts.accumbytes += usize;
239 tctx->prepared = false;
240 malloc_mutex_unlock(tsdn, tctx->tdata->lock);
244 prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
247 malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
248 assert(tctx->cnts.curobjs > 0);
249 assert(tctx->cnts.curbytes >= usize);
250 tctx->cnts.curobjs--;
251 tctx->cnts.curbytes -= usize;
253 if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx))
254 prof_tctx_destroy(tsd, tctx);
256 malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
260 bt_init(prof_bt_t *bt, void **vec)
263 cassert(config_prof);
269 JEMALLOC_INLINE_C void
270 prof_enter(tsd_t *tsd, prof_tdata_t *tdata)
273 cassert(config_prof);
274 assert(tdata == prof_tdata_get(tsd, false));
281 malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
284 JEMALLOC_INLINE_C void
285 prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
288 cassert(config_prof);
289 assert(tdata == prof_tdata_get(tsd, false));
291 malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
298 idump = tdata->enq_idump;
299 tdata->enq_idump = false;
300 gdump = tdata->enq_gdump;
301 tdata->enq_gdump = false;
304 prof_idump(tsd_tsdn(tsd));
306 prof_gdump(tsd_tsdn(tsd));
310 #ifdef JEMALLOC_PROF_LIBUNWIND
312 prof_backtrace(prof_bt_t *bt)
316 cassert(config_prof);
317 assert(bt->len == 0);
318 assert(bt->vec != NULL);
320 nframes = unw_backtrace(bt->vec, PROF_BT_MAX);
325 #elif (defined(JEMALLOC_PROF_LIBGCC))
326 static _Unwind_Reason_Code
327 prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
330 cassert(config_prof);
332 return (_URC_NO_REASON);
335 static _Unwind_Reason_Code
336 prof_unwind_callback(struct _Unwind_Context *context, void *arg)
338 prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
341 cassert(config_prof);
343 ip = (void *)_Unwind_GetIP(context);
345 return (_URC_END_OF_STACK);
346 data->bt->vec[data->bt->len] = ip;
348 if (data->bt->len == data->max)
349 return (_URC_END_OF_STACK);
351 return (_URC_NO_REASON);
355 prof_backtrace(prof_bt_t *bt)
357 prof_unwind_data_t data = {bt, PROF_BT_MAX};
359 cassert(config_prof);
361 _Unwind_Backtrace(prof_unwind_callback, &data);
363 #elif (defined(JEMALLOC_PROF_GCC))
365 prof_backtrace(prof_bt_t *bt)
367 #define BT_FRAME(i) \
368 if ((i) < PROF_BT_MAX) { \
370 if (__builtin_frame_address(i) == 0) \
372 p = __builtin_return_address(i); \
380 cassert(config_prof);
526 prof_backtrace(prof_bt_t *bt)
529 cassert(config_prof);
534 static malloc_mutex_t *
535 prof_gctx_mutex_choose(void)
537 unsigned ngctxs = atomic_add_u(&cum_gctxs, 1);
539 return (&gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]);
542 static malloc_mutex_t *
543 prof_tdata_mutex_choose(uint64_t thr_uid)
546 return (&tdata_locks[thr_uid % PROF_NTDATA_LOCKS]);
550 prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt)
553 * Create a single allocation that has space for vec of length bt->len.
555 size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
556 prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
557 size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
561 gctx->lock = prof_gctx_mutex_choose();
563 * Set nlimbo to 1, in order to avoid a race condition with
564 * prof_tctx_destroy()/prof_gctx_try_destroy().
567 tctx_tree_new(&gctx->tctxs);
569 memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
570 gctx->bt.vec = gctx->vec;
571 gctx->bt.len = bt->len;
576 prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
580 cassert(config_prof);
583 * Check that gctx is still unused by any thread cache before destroying
584 * it. prof_lookup() increments gctx->nlimbo in order to avoid a race
585 * condition with this function, as does prof_tctx_destroy() in order to
586 * avoid a race between the main body of prof_tctx_destroy() and entry
587 * into this function.
589 prof_enter(tsd, tdata_self);
590 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
591 assert(gctx->nlimbo != 0);
592 if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
593 /* Remove gctx from bt2gctx. */
594 if (ckh_remove(tsd_tsdn(tsd), &bt2gctx, &gctx->bt, NULL, NULL))
596 prof_leave(tsd, tdata_self);
598 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
599 idalloctm(tsd_tsdn(tsd), gctx, NULL, true, true);
602 * Compensate for increment in prof_tctx_destroy() or
606 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
607 prof_leave(tsd, tdata_self);
612 prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx)
615 malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
619 if (tctx->cnts.curobjs != 0)
627 prof_gctx_should_destroy(prof_gctx_t *gctx)
632 if (!tctx_tree_empty(&gctx->tctxs))
634 if (gctx->nlimbo != 0)
640 prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
642 prof_tdata_t *tdata = tctx->tdata;
643 prof_gctx_t *gctx = tctx->gctx;
644 bool destroy_tdata, destroy_tctx, destroy_gctx;
646 malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
648 assert(tctx->cnts.curobjs == 0);
649 assert(tctx->cnts.curbytes == 0);
650 assert(!opt_prof_accum);
651 assert(tctx->cnts.accumobjs == 0);
652 assert(tctx->cnts.accumbytes == 0);
654 ckh_remove(tsd_tsdn(tsd), &tdata->bt2tctx, &gctx->bt, NULL, NULL);
655 destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false);
656 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
658 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
659 switch (tctx->state) {
660 case prof_tctx_state_nominal:
661 tctx_tree_remove(&gctx->tctxs, tctx);
663 if (prof_gctx_should_destroy(gctx)) {
665 * Increment gctx->nlimbo in order to keep another
666 * thread from winning the race to destroy gctx while
667 * this one has gctx->lock dropped. Without this, it
668 * would be possible for another thread to:
670 * 1) Sample an allocation associated with gctx.
671 * 2) Deallocate the sampled object.
672 * 3) Successfully prof_gctx_try_destroy(gctx).
674 * The result would be that gctx no longer exists by the
675 * time this thread accesses it in
676 * prof_gctx_try_destroy().
681 destroy_gctx = false;
683 case prof_tctx_state_dumping:
685 * A dumping thread needs tctx to remain valid until dumping
686 * has finished. Change state such that the dumping thread will
687 * complete destruction during a late dump iteration phase.
689 tctx->state = prof_tctx_state_purgatory;
690 destroy_tctx = false;
691 destroy_gctx = false;
695 destroy_tctx = false;
696 destroy_gctx = false;
698 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
700 prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
704 malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
707 prof_tdata_destroy(tsd_tsdn(tsd), tdata, false);
710 idalloctm(tsd_tsdn(tsd), tctx, NULL, true, true);
714 prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
715 void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx)
727 prof_enter(tsd, tdata);
728 if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
729 /* bt has never been seen before. Insert it. */
730 gctx.p = prof_gctx_create(tsd_tsdn(tsd), bt);
731 if (gctx.v == NULL) {
732 prof_leave(tsd, tdata);
735 btkey.p = &gctx.p->bt;
736 if (ckh_insert(tsd_tsdn(tsd), &bt2gctx, btkey.v, gctx.v)) {
738 prof_leave(tsd, tdata);
739 idalloctm(tsd_tsdn(tsd), gctx.v, NULL, true, true);
745 * Increment nlimbo, in order to avoid a race condition with
746 * prof_tctx_destroy()/prof_gctx_try_destroy().
748 malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock);
750 malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock);
753 prof_leave(tsd, tdata);
757 *p_new_gctx = new_gctx;
762 prof_lookup(tsd_t *tsd, prof_bt_t *bt)
771 cassert(config_prof);
773 tdata = prof_tdata_get(tsd, false);
777 malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
778 not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
779 if (!not_found) /* Note double negative! */
780 ret.p->prepared = true;
781 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
785 bool new_gctx, error;
788 * This thread's cache lacks bt. Look for it in the global
791 if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
795 /* Link a prof_tctx_t into gctx for this thread. */
796 ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
797 size2index(sizeof(prof_tctx_t)), false, NULL, true,
798 arena_ichoose(tsd_tsdn(tsd), NULL), true);
801 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
804 ret.p->tdata = tdata;
805 ret.p->thr_uid = tdata->thr_uid;
806 ret.p->thr_discrim = tdata->thr_discrim;
807 memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
809 ret.p->tctx_uid = tdata->tctx_uid_next++;
810 ret.p->prepared = true;
811 ret.p->state = prof_tctx_state_initializing;
812 malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
813 error = ckh_insert(tsd_tsdn(tsd), &tdata->bt2tctx, btkey,
815 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
818 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
819 idalloctm(tsd_tsdn(tsd), ret.v, NULL, true, true);
822 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
823 ret.p->state = prof_tctx_state_nominal;
824 tctx_tree_insert(&gctx->tctxs, ret.p);
826 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
833 * The bodies of this function and prof_leakcheck() are compiled out unless heap
834 * profiling is enabled, so that it is possible to compile jemalloc with
835 * floating point support completely disabled. Avoiding floating point code is
836 * important on memory-constrained systems, but it also enables a workaround for
837 * versions of glibc that don't properly save/restore floating point registers
838 * during dynamic lazy symbol loading (which internally calls into whatever
839 * malloc implementation happens to be integrated into the application). Note
840 * that some compilers (e.g. gcc 4.8) may use floating point registers for fast
841 * memory moves, so jemalloc must be compiled with such optimizations disabled
843 * -mno-sse) in order for the workaround to be complete.
846 prof_sample_threshold_update(prof_tdata_t *tdata)
855 if (lg_prof_sample == 0) {
856 tdata->bytes_until_sample = 0;
861 * Compute sample interval as a geometrically distributed random
862 * variable with mean (2^lg_prof_sample).
866 * tdata->bytes_until_sample = | -------- |, where p = ---------------
867 * | log(1-p) | lg_prof_sample
870 * For more information on the math, see:
872 * Non-Uniform Random Variate Generation
874 * Springer-Verlag, New York, 1986
876 * (http://luc.devroye.org/rnbookindex.html)
878 r = prng_lg_range(&tdata->prng_state, 53);
879 u = (double)r * (1.0/9007199254740992.0L);
880 tdata->bytes_until_sample = (uint64_t)(log(u) /
881 log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
887 static prof_tdata_t *
888 prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
890 size_t *tdata_count = (size_t *)arg;
898 prof_tdata_count(void)
900 size_t tdata_count = 0;
904 malloc_mutex_lock(tsdn, &tdatas_mtx);
905 tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
906 (void *)&tdata_count);
907 malloc_mutex_unlock(tsdn, &tdatas_mtx);
909 return (tdata_count);
922 tdata = prof_tdata_get(tsd, false);
926 malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
927 bt_count = ckh_count(&bt2gctx);
928 malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
935 #undef prof_dump_open
936 #define prof_dump_open JEMALLOC_N(prof_dump_open_impl)
939 prof_dump_open(bool propagate_err, const char *filename)
943 fd = creat(filename, 0644);
944 if (fd == -1 && !propagate_err) {
945 malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
954 #undef prof_dump_open
955 #define prof_dump_open JEMALLOC_N(prof_dump_open)
956 prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl);
960 prof_dump_flush(bool propagate_err)
965 cassert(config_prof);
967 err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
969 if (!propagate_err) {
970 malloc_write("<jemalloc>: write() failed during heap "
977 prof_dump_buf_end = 0;
983 prof_dump_close(bool propagate_err)
987 assert(prof_dump_fd != -1);
988 ret = prof_dump_flush(propagate_err);
996 prof_dump_write(bool propagate_err, const char *s)
1000 cassert(config_prof);
1005 /* Flush the buffer if it is full. */
1006 if (prof_dump_buf_end == PROF_DUMP_BUFSIZE)
1007 if (prof_dump_flush(propagate_err) && propagate_err)
1010 if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
1011 /* Finish writing. */
1014 /* Write as much of s as will fit. */
1015 n = PROF_DUMP_BUFSIZE - prof_dump_buf_end;
1017 memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n);
1018 prof_dump_buf_end += n;
1025 JEMALLOC_FORMAT_PRINTF(2, 3)
1027 prof_dump_printf(bool propagate_err, const char *format, ...)
1031 char buf[PROF_PRINTF_BUFSIZE];
1033 va_start(ap, format);
1034 malloc_vsnprintf(buf, sizeof(buf), format, ap);
1036 ret = prof_dump_write(propagate_err, buf);
1042 prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata)
1045 malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
1047 malloc_mutex_lock(tsdn, tctx->gctx->lock);
1049 switch (tctx->state) {
1050 case prof_tctx_state_initializing:
1051 malloc_mutex_unlock(tsdn, tctx->gctx->lock);
1053 case prof_tctx_state_nominal:
1054 tctx->state = prof_tctx_state_dumping;
1055 malloc_mutex_unlock(tsdn, tctx->gctx->lock);
1057 memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
1059 tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1060 tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1061 if (opt_prof_accum) {
1062 tdata->cnt_summed.accumobjs +=
1063 tctx->dump_cnts.accumobjs;
1064 tdata->cnt_summed.accumbytes +=
1065 tctx->dump_cnts.accumbytes;
1068 case prof_tctx_state_dumping:
1069 case prof_tctx_state_purgatory:
1075 prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx)
1078 malloc_mutex_assert_owner(tsdn, gctx->lock);
1080 gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1081 gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1082 if (opt_prof_accum) {
1083 gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
1084 gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
1088 static prof_tctx_t *
1089 prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
1091 tsdn_t *tsdn = (tsdn_t *)arg;
1093 malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
1095 switch (tctx->state) {
1096 case prof_tctx_state_nominal:
1097 /* New since dumping started; ignore. */
1099 case prof_tctx_state_dumping:
1100 case prof_tctx_state_purgatory:
1101 prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx);
1110 struct prof_tctx_dump_iter_arg_s {
1115 static prof_tctx_t *
1116 prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque)
1118 struct prof_tctx_dump_iter_arg_s *arg =
1119 (struct prof_tctx_dump_iter_arg_s *)opaque;
1121 malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock);
1123 switch (tctx->state) {
1124 case prof_tctx_state_initializing:
1125 case prof_tctx_state_nominal:
1126 /* Not captured by this dump. */
1128 case prof_tctx_state_dumping:
1129 case prof_tctx_state_purgatory:
1130 if (prof_dump_printf(arg->propagate_err,
1131 " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
1132 "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
1133 tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
1134 tctx->dump_cnts.accumbytes))
1143 static prof_tctx_t *
1144 prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
1146 tsdn_t *tsdn = (tsdn_t *)arg;
1149 malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
1151 switch (tctx->state) {
1152 case prof_tctx_state_nominal:
1153 /* New since dumping started; ignore. */
1155 case prof_tctx_state_dumping:
1156 tctx->state = prof_tctx_state_nominal;
1158 case prof_tctx_state_purgatory:
1171 prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
1174 cassert(config_prof);
1176 malloc_mutex_lock(tsdn, gctx->lock);
1179 * Increment nlimbo so that gctx won't go away before dump.
1180 * Additionally, link gctx into the dump list so that it is included in
1181 * prof_dump()'s second pass.
1184 gctx_tree_insert(gctxs, gctx);
1186 memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
1188 malloc_mutex_unlock(tsdn, gctx->lock);
1191 struct prof_gctx_merge_iter_arg_s {
1196 static prof_gctx_t *
1197 prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque)
1199 struct prof_gctx_merge_iter_arg_s *arg =
1200 (struct prof_gctx_merge_iter_arg_s *)opaque;
1202 malloc_mutex_lock(arg->tsdn, gctx->lock);
1203 tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter,
1205 if (gctx->cnt_summed.curobjs != 0)
1207 malloc_mutex_unlock(arg->tsdn, gctx->lock);
1213 prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
1215 prof_tdata_t *tdata = prof_tdata_get(tsd, false);
1219 * Standard tree iteration won't work here, because as soon as we
1220 * decrement gctx->nlimbo and unlock gctx, another thread can
1221 * concurrently destroy it, which will corrupt the tree. Therefore,
1222 * tear down the tree one node at a time during iteration.
1224 while ((gctx = gctx_tree_first(gctxs)) != NULL) {
1225 gctx_tree_remove(gctxs, gctx);
1226 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
1232 prof_tctx_t *to_destroy =
1233 tctx_tree_iter(&gctx->tctxs, next,
1234 prof_tctx_finish_iter,
1235 (void *)tsd_tsdn(tsd));
1236 if (to_destroy != NULL) {
1237 next = tctx_tree_next(&gctx->tctxs,
1239 tctx_tree_remove(&gctx->tctxs,
1241 idalloctm(tsd_tsdn(tsd), to_destroy,
1245 } while (next != NULL);
1248 if (prof_gctx_should_destroy(gctx)) {
1250 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
1251 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
1253 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
1257 struct prof_tdata_merge_iter_arg_s {
1262 static prof_tdata_t *
1263 prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
1266 struct prof_tdata_merge_iter_arg_s *arg =
1267 (struct prof_tdata_merge_iter_arg_s *)opaque;
1269 malloc_mutex_lock(arg->tsdn, tdata->lock);
1270 if (!tdata->expired) {
1277 tdata->dumping = true;
1278 memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
1279 for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
1281 prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata);
1283 arg->cnt_all.curobjs += tdata->cnt_summed.curobjs;
1284 arg->cnt_all.curbytes += tdata->cnt_summed.curbytes;
1285 if (opt_prof_accum) {
1286 arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs;
1287 arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes;
1290 tdata->dumping = false;
1291 malloc_mutex_unlock(arg->tsdn, tdata->lock);
1296 static prof_tdata_t *
1297 prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
1299 bool propagate_err = *(bool *)arg;
1301 if (!tdata->dumping)
1304 if (prof_dump_printf(propagate_err,
1305 " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n",
1306 tdata->thr_uid, tdata->cnt_summed.curobjs,
1307 tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs,
1308 tdata->cnt_summed.accumbytes,
1309 (tdata->thread_name != NULL) ? " " : "",
1310 (tdata->thread_name != NULL) ? tdata->thread_name : ""))
1316 #undef prof_dump_header
1317 #define prof_dump_header JEMALLOC_N(prof_dump_header_impl)
1320 prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all)
1324 if (prof_dump_printf(propagate_err,
1325 "heap_v2/%"FMTu64"\n"
1326 " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
1327 ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
1328 cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes))
1331 malloc_mutex_lock(tsdn, &tdatas_mtx);
1332 ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
1333 (void *)&propagate_err) != NULL);
1334 malloc_mutex_unlock(tsdn, &tdatas_mtx);
1338 #undef prof_dump_header
1339 #define prof_dump_header JEMALLOC_N(prof_dump_header)
1340 prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl);
1344 prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
1345 const prof_bt_t *bt, prof_gctx_tree_t *gctxs)
1349 struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg;
1351 cassert(config_prof);
1352 malloc_mutex_assert_owner(tsdn, gctx->lock);
1354 /* Avoid dumping such gctx's that have no useful data. */
1355 if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
1356 (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
1357 assert(gctx->cnt_summed.curobjs == 0);
1358 assert(gctx->cnt_summed.curbytes == 0);
1359 assert(gctx->cnt_summed.accumobjs == 0);
1360 assert(gctx->cnt_summed.accumbytes == 0);
1365 if (prof_dump_printf(propagate_err, "@")) {
1369 for (i = 0; i < bt->len; i++) {
1370 if (prof_dump_printf(propagate_err, " %#"FMTxPTR,
1371 (uintptr_t)bt->vec[i])) {
1377 if (prof_dump_printf(propagate_err,
1379 " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
1380 gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes,
1381 gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) {
1386 prof_tctx_dump_iter_arg.tsdn = tsdn;
1387 prof_tctx_dump_iter_arg.propagate_err = propagate_err;
1388 if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
1389 (void *)&prof_tctx_dump_iter_arg) != NULL) {
1400 JEMALLOC_FORMAT_PRINTF(1, 2)
1402 prof_open_maps(const char *format, ...)
1406 char filename[PATH_MAX + 1];
1408 va_start(ap, format);
1409 malloc_vsnprintf(filename, sizeof(filename), format, ap);
1411 mfd = open(filename, O_RDONLY);
1422 return (GetCurrentProcessId());
1429 prof_dump_maps(bool propagate_err)
1434 cassert(config_prof);
1436 mfd = prof_open_maps("/proc/curproc/map");
1437 #elif defined(_WIN32)
1438 mfd = -1; // Not implemented
1441 int pid = prof_getpid();
1443 mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
1445 mfd = prof_open_maps("/proc/%d/maps", pid);
1451 if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
1458 prof_dump_buf_end += nread;
1459 if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
1460 /* Make space in prof_dump_buf before read(). */
1461 if (prof_dump_flush(propagate_err) &&
1467 nread = read(mfd, &prof_dump_buf[prof_dump_buf_end],
1468 PROF_DUMP_BUFSIZE - prof_dump_buf_end);
1469 } while (nread > 0);
1483 * See prof_sample_threshold_update() comment for why the body of this function
1484 * is conditionally compiled.
1487 prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
1488 const char *filename)
1491 #ifdef JEMALLOC_PROF
1493 * Scaling is equivalent AdjustSamples() in jeprof, but the result may
1494 * differ slightly from what jeprof reports, because here we scale the
1495 * summary values, whereas jeprof scales each context individually and
1496 * reports the sums of the scaled values.
1498 if (cnt_all->curbytes != 0) {
1499 double sample_period = (double)((uint64_t)1 << lg_prof_sample);
1500 double ratio = (((double)cnt_all->curbytes) /
1501 (double)cnt_all->curobjs) / sample_period;
1502 double scale_factor = 1.0 / (1.0 - exp(-ratio));
1503 uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes)
1505 uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) *
1508 malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64
1509 " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n",
1510 curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs !=
1511 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : "");
1513 "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
1519 struct prof_gctx_dump_iter_arg_s {
1524 static prof_gctx_t *
1525 prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque)
1528 struct prof_gctx_dump_iter_arg_s *arg =
1529 (struct prof_gctx_dump_iter_arg_s *)opaque;
1531 malloc_mutex_lock(arg->tsdn, gctx->lock);
1533 if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt,
1541 malloc_mutex_unlock(arg->tsdn, gctx->lock);
1546 prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
1548 prof_tdata_t *tdata;
1549 struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
1555 struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
1556 struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg;
1557 prof_gctx_tree_t gctxs;
1559 cassert(config_prof);
1561 tdata = prof_tdata_get(tsd, true);
1565 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
1566 prof_enter(tsd, tdata);
1569 * Put gctx's in limbo and clear their counters in preparation for
1572 gctx_tree_new(&gctxs);
1573 for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);)
1574 prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, &gctxs);
1577 * Iterate over tdatas, and for the non-expired ones snapshot their tctx
1578 * stats and merge them into the associated gctx's.
1580 prof_tdata_merge_iter_arg.tsdn = tsd_tsdn(tsd);
1581 memset(&prof_tdata_merge_iter_arg.cnt_all, 0, sizeof(prof_cnt_t));
1582 malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
1583 tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter,
1584 (void *)&prof_tdata_merge_iter_arg);
1585 malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
1587 /* Merge tctx stats into gctx's. */
1588 prof_gctx_merge_iter_arg.tsdn = tsd_tsdn(tsd);
1589 prof_gctx_merge_iter_arg.leak_ngctx = 0;
1590 gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter,
1591 (void *)&prof_gctx_merge_iter_arg);
1593 prof_leave(tsd, tdata);
1595 /* Create dump file. */
1596 if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1)
1597 goto label_open_close_error;
1599 /* Dump profile header. */
1600 if (prof_dump_header(tsd_tsdn(tsd), propagate_err,
1601 &prof_tdata_merge_iter_arg.cnt_all))
1602 goto label_write_error;
1604 /* Dump per gctx profile stats. */
1605 prof_gctx_dump_iter_arg.tsdn = tsd_tsdn(tsd);
1606 prof_gctx_dump_iter_arg.propagate_err = propagate_err;
1607 if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter,
1608 (void *)&prof_gctx_dump_iter_arg) != NULL)
1609 goto label_write_error;
1611 /* Dump /proc/<pid>/maps if possible. */
1612 if (prof_dump_maps(propagate_err))
1613 goto label_write_error;
1615 if (prof_dump_close(propagate_err))
1616 goto label_open_close_error;
1618 prof_gctx_finish(tsd, &gctxs);
1619 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
1622 prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all,
1623 prof_gctx_merge_iter_arg.leak_ngctx, filename);
1627 prof_dump_close(propagate_err);
1628 label_open_close_error:
1629 prof_gctx_finish(tsd, &gctxs);
1630 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
1634 #define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
1635 #define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
1637 prof_dump_filename(char *filename, char v, uint64_t vseq)
1640 cassert(config_prof);
1642 if (vseq != VSEQ_INVALID) {
1643 /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
1644 malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
1645 "%s.%d.%"FMTu64".%c%"FMTu64".heap",
1646 opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq);
1648 /* "<prefix>.<pid>.<seq>.<v>.heap" */
1649 malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
1650 "%s.%d.%"FMTu64".%c.heap",
1651 opt_prof_prefix, prof_getpid(), prof_dump_seq, v);
1660 char filename[DUMP_FILENAME_BUFSIZE];
1662 cassert(config_prof);
1663 assert(opt_prof_final);
1664 assert(opt_prof_prefix[0] != '\0');
1670 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
1671 prof_dump_filename(filename, 'f', VSEQ_INVALID);
1672 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
1673 prof_dump(tsd, false, filename, opt_prof_leak);
1677 prof_idump(tsdn_t *tsdn)
1680 prof_tdata_t *tdata;
1682 cassert(config_prof);
1684 if (!prof_booted || tsdn_null(tsdn))
1686 tsd = tsdn_tsd(tsdn);
1687 tdata = prof_tdata_get(tsd, false);
1691 tdata->enq_idump = true;
1695 if (opt_prof_prefix[0] != '\0') {
1696 char filename[PATH_MAX + 1];
1697 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
1698 prof_dump_filename(filename, 'i', prof_dump_iseq);
1700 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
1701 prof_dump(tsd, false, filename, false);
1706 prof_mdump(tsd_t *tsd, const char *filename)
1708 char filename_buf[DUMP_FILENAME_BUFSIZE];
1710 cassert(config_prof);
1712 if (!opt_prof || !prof_booted)
1715 if (filename == NULL) {
1716 /* No filename specified, so automatically generate one. */
1717 if (opt_prof_prefix[0] == '\0')
1719 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
1720 prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
1722 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
1723 filename = filename_buf;
1725 return (prof_dump(tsd, true, filename, false));
1729 prof_gdump(tsdn_t *tsdn)
1732 prof_tdata_t *tdata;
1734 cassert(config_prof);
1736 if (!prof_booted || tsdn_null(tsdn))
1738 tsd = tsdn_tsd(tsdn);
1739 tdata = prof_tdata_get(tsd, false);
1743 tdata->enq_gdump = true;
1747 if (opt_prof_prefix[0] != '\0') {
1748 char filename[DUMP_FILENAME_BUFSIZE];
1749 malloc_mutex_lock(tsdn, &prof_dump_seq_mtx);
1750 prof_dump_filename(filename, 'u', prof_dump_useq);
1752 malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx);
1753 prof_dump(tsd, false, filename, false);
1758 prof_bt_hash(const void *key, size_t r_hash[2])
1760 prof_bt_t *bt = (prof_bt_t *)key;
1762 cassert(config_prof);
1764 hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
1768 prof_bt_keycomp(const void *k1, const void *k2)
1770 const prof_bt_t *bt1 = (prof_bt_t *)k1;
1771 const prof_bt_t *bt2 = (prof_bt_t *)k2;
1773 cassert(config_prof);
1775 if (bt1->len != bt2->len)
1777 return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
1780 JEMALLOC_INLINE_C uint64_t
1781 prof_thr_uid_alloc(tsdn_t *tsdn)
1785 malloc_mutex_lock(tsdn, &next_thr_uid_mtx);
1786 thr_uid = next_thr_uid;
1788 malloc_mutex_unlock(tsdn, &next_thr_uid_mtx);
1793 static prof_tdata_t *
1794 prof_tdata_init_impl(tsdn_t *tsdn, uint64_t thr_uid, uint64_t thr_discrim,
1795 char *thread_name, bool active)
1797 prof_tdata_t *tdata;
1799 cassert(config_prof);
1801 /* Initialize an empty cache for this thread. */
1802 tdata = (prof_tdata_t *)iallocztm(tsdn, sizeof(prof_tdata_t),
1803 size2index(sizeof(prof_tdata_t)), false, NULL, true,
1804 arena_get(TSDN_NULL, 0, true), true);
1808 tdata->lock = prof_tdata_mutex_choose(thr_uid);
1809 tdata->thr_uid = thr_uid;
1810 tdata->thr_discrim = thr_discrim;
1811 tdata->thread_name = thread_name;
1812 tdata->attached = true;
1813 tdata->expired = false;
1814 tdata->tctx_uid_next = 0;
1816 if (ckh_new(tsdn, &tdata->bt2tctx, PROF_CKH_MINITEMS,
1817 prof_bt_hash, prof_bt_keycomp)) {
1818 idalloctm(tsdn, tdata, NULL, true, true);
1822 tdata->prng_state = (uint64_t)(uintptr_t)tdata;
1823 prof_sample_threshold_update(tdata);
1826 tdata->enq_idump = false;
1827 tdata->enq_gdump = false;
1829 tdata->dumping = false;
1830 tdata->active = active;
1832 malloc_mutex_lock(tsdn, &tdatas_mtx);
1833 tdata_tree_insert(&tdatas, tdata);
1834 malloc_mutex_unlock(tsdn, &tdatas_mtx);
1840 prof_tdata_init(tsdn_t *tsdn)
1843 return (prof_tdata_init_impl(tsdn, prof_thr_uid_alloc(tsdn), 0, NULL,
1844 prof_thread_active_init_get(tsdn)));
1848 prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached)
1851 if (tdata->attached && !even_if_attached)
1853 if (ckh_count(&tdata->bt2tctx) != 0)
1859 prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
1860 bool even_if_attached)
1863 malloc_mutex_assert_owner(tsdn, tdata->lock);
1865 return (prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
1869 prof_tdata_destroy_locked(tsdn_t *tsdn, prof_tdata_t *tdata,
1870 bool even_if_attached)
1873 malloc_mutex_assert_owner(tsdn, &tdatas_mtx);
1875 assert(tsdn_null(tsdn) || tsd_prof_tdata_get(tsdn_tsd(tsdn)) != tdata);
1877 tdata_tree_remove(&tdatas, tdata);
1879 assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
1881 if (tdata->thread_name != NULL)
1882 idalloctm(tsdn, tdata->thread_name, NULL, true, true);
1883 ckh_delete(tsdn, &tdata->bt2tctx);
1884 idalloctm(tsdn, tdata, NULL, true, true);
1888 prof_tdata_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, bool even_if_attached)
1891 malloc_mutex_lock(tsdn, &tdatas_mtx);
1892 prof_tdata_destroy_locked(tsdn, tdata, even_if_attached);
1893 malloc_mutex_unlock(tsdn, &tdatas_mtx);
1897 prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
1901 malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
1902 if (tdata->attached) {
1903 destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata,
1906 * Only detach if !destroy_tdata, because detaching would allow
1907 * another thread to win the race to destroy tdata.
1910 tdata->attached = false;
1911 tsd_prof_tdata_set(tsd, NULL);
1913 destroy_tdata = false;
1914 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
1916 prof_tdata_destroy(tsd_tsdn(tsd), tdata, true);
1920 prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
1922 uint64_t thr_uid = tdata->thr_uid;
1923 uint64_t thr_discrim = tdata->thr_discrim + 1;
1924 char *thread_name = (tdata->thread_name != NULL) ?
1925 prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL;
1926 bool active = tdata->active;
1928 prof_tdata_detach(tsd, tdata);
1929 return (prof_tdata_init_impl(tsd_tsdn(tsd), thr_uid, thr_discrim,
1930 thread_name, active));
1934 prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata)
1938 malloc_mutex_lock(tsdn, tdata->lock);
1939 if (!tdata->expired) {
1940 tdata->expired = true;
1941 destroy_tdata = tdata->attached ? false :
1942 prof_tdata_should_destroy(tsdn, tdata, false);
1944 destroy_tdata = false;
1945 malloc_mutex_unlock(tsdn, tdata->lock);
1947 return (destroy_tdata);
1950 static prof_tdata_t *
1951 prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
1953 tsdn_t *tsdn = (tsdn_t *)arg;
1955 return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL);
1959 prof_reset(tsdn_t *tsdn, size_t lg_sample)
1963 assert(lg_sample < (sizeof(uint64_t) << 3));
1965 malloc_mutex_lock(tsdn, &prof_dump_mtx);
1966 malloc_mutex_lock(tsdn, &tdatas_mtx);
1968 lg_prof_sample = lg_sample;
1972 prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
1973 prof_tdata_reset_iter, (void *)tsdn);
1974 if (to_destroy != NULL) {
1975 next = tdata_tree_next(&tdatas, to_destroy);
1976 prof_tdata_destroy_locked(tsdn, to_destroy, false);
1979 } while (next != NULL);
1981 malloc_mutex_unlock(tsdn, &tdatas_mtx);
1982 malloc_mutex_unlock(tsdn, &prof_dump_mtx);
1986 prof_tdata_cleanup(tsd_t *tsd)
1988 prof_tdata_t *tdata;
1993 tdata = tsd_prof_tdata_get(tsd);
1995 prof_tdata_detach(tsd, tdata);
1999 prof_active_get(tsdn_t *tsdn)
2001 bool prof_active_current;
2003 malloc_mutex_lock(tsdn, &prof_active_mtx);
2004 prof_active_current = prof_active;
2005 malloc_mutex_unlock(tsdn, &prof_active_mtx);
2006 return (prof_active_current);
2010 prof_active_set(tsdn_t *tsdn, bool active)
2012 bool prof_active_old;
2014 malloc_mutex_lock(tsdn, &prof_active_mtx);
2015 prof_active_old = prof_active;
2016 prof_active = active;
2017 malloc_mutex_unlock(tsdn, &prof_active_mtx);
2018 return (prof_active_old);
2022 prof_thread_name_get(tsd_t *tsd)
2024 prof_tdata_t *tdata;
2026 tdata = prof_tdata_get(tsd, true);
2029 return (tdata->thread_name != NULL ? tdata->thread_name : "");
2033 prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name)
2038 if (thread_name == NULL)
2041 size = strlen(thread_name) + 1;
2045 ret = iallocztm(tsdn, size, size2index(size), false, NULL, true,
2046 arena_get(TSDN_NULL, 0, true), true);
2049 memcpy(ret, thread_name, size);
2054 prof_thread_name_set(tsd_t *tsd, const char *thread_name)
2056 prof_tdata_t *tdata;
2060 tdata = prof_tdata_get(tsd, true);
2064 /* Validate input. */
2065 if (thread_name == NULL)
2067 for (i = 0; thread_name[i] != '\0'; i++) {
2068 char c = thread_name[i];
2069 if (!isgraph(c) && !isblank(c))
2073 s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name);
2077 if (tdata->thread_name != NULL) {
2078 idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true);
2079 tdata->thread_name = NULL;
2082 tdata->thread_name = s;
2087 prof_thread_active_get(tsd_t *tsd)
2089 prof_tdata_t *tdata;
2091 tdata = prof_tdata_get(tsd, true);
2094 return (tdata->active);
2098 prof_thread_active_set(tsd_t *tsd, bool active)
2100 prof_tdata_t *tdata;
2102 tdata = prof_tdata_get(tsd, true);
2105 tdata->active = active;
2110 prof_thread_active_init_get(tsdn_t *tsdn)
2114 malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
2115 active_init = prof_thread_active_init;
2116 malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
2117 return (active_init);
2121 prof_thread_active_init_set(tsdn_t *tsdn, bool active_init)
2123 bool active_init_old;
2125 malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
2126 active_init_old = prof_thread_active_init;
2127 prof_thread_active_init = active_init;
2128 malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
2129 return (active_init_old);
2133 prof_gdump_get(tsdn_t *tsdn)
2135 bool prof_gdump_current;
2137 malloc_mutex_lock(tsdn, &prof_gdump_mtx);
2138 prof_gdump_current = prof_gdump_val;
2139 malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
2140 return (prof_gdump_current);
2144 prof_gdump_set(tsdn_t *tsdn, bool gdump)
2146 bool prof_gdump_old;
2148 malloc_mutex_lock(tsdn, &prof_gdump_mtx);
2149 prof_gdump_old = prof_gdump_val;
2150 prof_gdump_val = gdump;
2151 malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
2152 return (prof_gdump_old);
2159 cassert(config_prof);
2161 memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
2162 sizeof(PROF_PREFIX_DEFAULT));
2169 cassert(config_prof);
2172 * opt_prof must be in its final state before any arenas are
2173 * initialized, so this function must be executed early.
2176 if (opt_prof_leak && !opt_prof) {
2178 * Enable opt_prof, but in such a way that profiles are never
2179 * automatically dumped.
2182 opt_prof_gdump = false;
2183 } else if (opt_prof) {
2184 if (opt_lg_prof_interval >= 0) {
2185 prof_interval = (((uint64_t)1U) <<
2186 opt_lg_prof_interval);
2192 prof_boot2(tsdn_t *tsdn)
2195 cassert(config_prof);
2200 lg_prof_sample = opt_lg_prof_sample;
2202 prof_active = opt_prof_active;
2203 if (malloc_mutex_init(&prof_active_mtx, "prof_active",
2204 WITNESS_RANK_PROF_ACTIVE))
2207 prof_gdump_val = opt_prof_gdump;
2208 if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
2209 WITNESS_RANK_PROF_GDUMP))
2212 prof_thread_active_init = opt_prof_thread_active_init;
2213 if (malloc_mutex_init(&prof_thread_active_init_mtx,
2214 "prof_thread_active_init",
2215 WITNESS_RANK_PROF_THREAD_ACTIVE_INIT))
2218 if (ckh_new(tsdn, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
2221 if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
2222 WITNESS_RANK_PROF_BT2GCTX))
2225 tdata_tree_new(&tdatas);
2226 if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
2227 WITNESS_RANK_PROF_TDATAS))
2231 if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
2232 WITNESS_RANK_PROF_NEXT_THR_UID))
2235 if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq",
2236 WITNESS_RANK_PROF_DUMP_SEQ))
2238 if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
2239 WITNESS_RANK_PROF_DUMP))
2242 if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
2243 atexit(prof_fdump) != 0) {
2244 malloc_write("<jemalloc>: Error in atexit()\n");
2249 gctx_locks = (malloc_mutex_t *)base_alloc(tsdn, PROF_NCTX_LOCKS
2250 * sizeof(malloc_mutex_t));
2251 if (gctx_locks == NULL)
2253 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
2254 if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
2255 WITNESS_RANK_PROF_GCTX))
2259 tdata_locks = (malloc_mutex_t *)base_alloc(tsdn,
2260 PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t));
2261 if (tdata_locks == NULL)
2263 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
2264 if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
2265 WITNESS_RANK_PROF_TDATA))
2270 #ifdef JEMALLOC_PROF_LIBGCC
2272 * Cause the backtracing machinery to allocate its internal state
2273 * before enabling profiling.
2275 _Unwind_Backtrace(prof_unwind_init_callback, NULL);
2284 prof_prefork0(tsdn_t *tsdn)
2290 malloc_mutex_prefork(tsdn, &prof_dump_mtx);
2291 malloc_mutex_prefork(tsdn, &bt2gctx_mtx);
2292 malloc_mutex_prefork(tsdn, &tdatas_mtx);
2293 for (i = 0; i < PROF_NTDATA_LOCKS; i++)
2294 malloc_mutex_prefork(tsdn, &tdata_locks[i]);
2295 for (i = 0; i < PROF_NCTX_LOCKS; i++)
2296 malloc_mutex_prefork(tsdn, &gctx_locks[i]);
2301 prof_prefork1(tsdn_t *tsdn)
2305 malloc_mutex_prefork(tsdn, &prof_active_mtx);
2306 malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx);
2307 malloc_mutex_prefork(tsdn, &prof_gdump_mtx);
2308 malloc_mutex_prefork(tsdn, &next_thr_uid_mtx);
2309 malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx);
2314 prof_postfork_parent(tsdn_t *tsdn)
2320 malloc_mutex_postfork_parent(tsdn,
2321 &prof_thread_active_init_mtx);
2322 malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx);
2323 malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx);
2324 malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx);
2325 malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
2326 for (i = 0; i < PROF_NCTX_LOCKS; i++)
2327 malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
2328 for (i = 0; i < PROF_NTDATA_LOCKS; i++)
2329 malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]);
2330 malloc_mutex_postfork_parent(tsdn, &tdatas_mtx);
2331 malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx);
2332 malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx);
2337 prof_postfork_child(tsdn_t *tsdn)
2343 malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx);
2344 malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx);
2345 malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx);
2346 malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx);
2347 malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
2348 for (i = 0; i < PROF_NCTX_LOCKS; i++)
2349 malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
2350 for (i = 0; i < PROF_NTDATA_LOCKS; i++)
2351 malloc_mutex_postfork_child(tsdn, &tdata_locks[i]);
2352 malloc_mutex_postfork_child(tsdn, &tdatas_mtx);
2353 malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx);
2354 malloc_mutex_postfork_child(tsdn, &prof_dump_mtx);
2358 /******************************************************************************/