1 #define JEMALLOC_PROF_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 /******************************************************************************/
5 #ifdef JEMALLOC_PROF_LIBUNWIND
10 #ifdef JEMALLOC_PROF_LIBGCC
14 /******************************************************************************/
17 bool opt_prof = false;
18 bool opt_prof_active = true;
19 bool opt_prof_thread_active_init = true;
20 size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
21 ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
22 bool opt_prof_gdump = false;
23 bool opt_prof_final = false;
24 bool opt_prof_leak = false;
25 bool opt_prof_accum = false;
27 /* Minimize memory bloat for non-prof builds. */
34 * Initialized as opt_prof_active, and accessed via
35 * prof_active_[gs]et{_unlocked,}().
38 static malloc_mutex_t prof_active_mtx;
41 * Initialized as opt_prof_thread_active_init, and accessed via
42 * prof_thread_active_init_[gs]et().
44 static bool prof_thread_active_init;
45 static malloc_mutex_t prof_thread_active_init_mtx;
48 * Initialized as opt_prof_gdump, and accessed via
49 * prof_gdump_[gs]et{_unlocked,}().
52 static malloc_mutex_t prof_gdump_mtx;
54 uint64_t prof_interval = 0;
56 size_t lg_prof_sample;
59 * Table of mutexes that are shared among gctx's. These are leaf locks, so
60 * there is no problem with using them for more than one gctx at the same time.
61 * The primary motivation for this sharing though is that gctx's are ephemeral,
62 * and destroying mutexes causes complications for systems that allocate when
63 * creating/destroying mutexes.
65 static malloc_mutex_t *gctx_locks;
66 static unsigned cum_gctxs; /* Atomic counter. */
69 * Table of mutexes that are shared among tdata's. No operations require
70 * holding multiple tdata locks, so there is no problem with using them for more
71 * than one tdata at the same time, even though a gctx lock may be acquired
72 * while holding a tdata lock.
74 static malloc_mutex_t *tdata_locks;
77 * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data
78 * structure that knows about all backtraces currently captured.
81 static malloc_mutex_t bt2gctx_mtx;
84 * Tree of all extant prof_tdata_t structures, regardless of state,
85 * {attached,detached,expired}.
87 static prof_tdata_tree_t tdatas;
88 static malloc_mutex_t tdatas_mtx;
90 static uint64_t next_thr_uid;
91 static malloc_mutex_t next_thr_uid_mtx;
93 static malloc_mutex_t prof_dump_seq_mtx;
94 static uint64_t prof_dump_seq;
95 static uint64_t prof_dump_iseq;
96 static uint64_t prof_dump_mseq;
97 static uint64_t prof_dump_useq;
100 * This buffer is rather large for stack allocation, so use a single buffer for
103 static malloc_mutex_t prof_dump_mtx;
104 static char prof_dump_buf[
105 /* Minimize memory bloat for non-prof builds. */
112 static unsigned prof_dump_buf_end;
113 static int prof_dump_fd;
115 /* Do not dump any profiles until bootstrapping is complete. */
116 static bool prof_booted = false;
118 /******************************************************************************/
120 * Function prototypes for static functions that are referenced prior to
124 static bool prof_tctx_should_destroy(prof_tctx_t *tctx);
125 static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
126 static bool prof_tdata_should_destroy(prof_tdata_t *tdata,
127 bool even_if_attached);
128 static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
129 bool even_if_attached);
130 static char *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name);
132 /******************************************************************************/
133 /* Red-black trees. */
135 JEMALLOC_INLINE_C int
136 prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b)
138 uint64_t a_thr_uid = a->thr_uid;
139 uint64_t b_thr_uid = b->thr_uid;
140 int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
142 uint64_t a_tctx_uid = a->tctx_uid;
143 uint64_t b_tctx_uid = b->tctx_uid;
144 ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid < b_tctx_uid);
149 rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
150 tctx_link, prof_tctx_comp)
152 JEMALLOC_INLINE_C int
153 prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b)
155 unsigned a_len = a->bt.len;
156 unsigned b_len = b->bt.len;
157 unsigned comp_len = (a_len < b_len) ? a_len : b_len;
158 int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
160 ret = (a_len > b_len) - (a_len < b_len);
164 rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
167 JEMALLOC_INLINE_C int
168 prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b)
171 uint64_t a_uid = a->thr_uid;
172 uint64_t b_uid = b->thr_uid;
174 ret = ((a_uid > b_uid) - (a_uid < b_uid));
176 uint64_t a_discrim = a->thr_discrim;
177 uint64_t b_discrim = b->thr_discrim;
179 ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
184 rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
187 /******************************************************************************/
190 prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated)
194 cassert(config_prof);
198 * Compute a new sample threshold. This isn't very important in
199 * practice, because this function is rarely executed, so the
200 * potential for sample bias is minimal except in contrived
203 tdata = prof_tdata_get(tsd, true);
205 prof_sample_threshold_update(tctx->tdata);
208 if ((uintptr_t)tctx > (uintptr_t)1U) {
209 malloc_mutex_lock(tctx->tdata->lock);
210 tctx->prepared = false;
211 if (prof_tctx_should_destroy(tctx))
212 prof_tctx_destroy(tsd, tctx);
214 malloc_mutex_unlock(tctx->tdata->lock);
219 prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx)
222 prof_tctx_set(ptr, tctx);
224 malloc_mutex_lock(tctx->tdata->lock);
225 tctx->cnts.curobjs++;
226 tctx->cnts.curbytes += usize;
227 if (opt_prof_accum) {
228 tctx->cnts.accumobjs++;
229 tctx->cnts.accumbytes += usize;
231 tctx->prepared = false;
232 malloc_mutex_unlock(tctx->tdata->lock);
236 prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
239 malloc_mutex_lock(tctx->tdata->lock);
240 assert(tctx->cnts.curobjs > 0);
241 assert(tctx->cnts.curbytes >= usize);
242 tctx->cnts.curobjs--;
243 tctx->cnts.curbytes -= usize;
245 if (prof_tctx_should_destroy(tctx))
246 prof_tctx_destroy(tsd, tctx);
248 malloc_mutex_unlock(tctx->tdata->lock);
252 bt_init(prof_bt_t *bt, void **vec)
255 cassert(config_prof);
261 JEMALLOC_INLINE_C void
262 prof_enter(tsd_t *tsd, prof_tdata_t *tdata)
265 cassert(config_prof);
266 assert(tdata == prof_tdata_get(tsd, false));
273 malloc_mutex_lock(&bt2gctx_mtx);
276 JEMALLOC_INLINE_C void
277 prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
280 cassert(config_prof);
281 assert(tdata == prof_tdata_get(tsd, false));
283 malloc_mutex_unlock(&bt2gctx_mtx);
290 idump = tdata->enq_idump;
291 tdata->enq_idump = false;
292 gdump = tdata->enq_gdump;
293 tdata->enq_gdump = false;
302 #ifdef JEMALLOC_PROF_LIBUNWIND
304 prof_backtrace(prof_bt_t *bt)
308 cassert(config_prof);
309 assert(bt->len == 0);
310 assert(bt->vec != NULL);
312 nframes = unw_backtrace(bt->vec, PROF_BT_MAX);
317 #elif (defined(JEMALLOC_PROF_LIBGCC))
318 static _Unwind_Reason_Code
319 prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
322 cassert(config_prof);
324 return (_URC_NO_REASON);
327 static _Unwind_Reason_Code
328 prof_unwind_callback(struct _Unwind_Context *context, void *arg)
330 prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
333 cassert(config_prof);
335 ip = (void *)_Unwind_GetIP(context);
337 return (_URC_END_OF_STACK);
338 data->bt->vec[data->bt->len] = ip;
340 if (data->bt->len == data->max)
341 return (_URC_END_OF_STACK);
343 return (_URC_NO_REASON);
347 prof_backtrace(prof_bt_t *bt)
349 prof_unwind_data_t data = {bt, PROF_BT_MAX};
351 cassert(config_prof);
353 _Unwind_Backtrace(prof_unwind_callback, &data);
355 #elif (defined(JEMALLOC_PROF_GCC))
357 prof_backtrace(prof_bt_t *bt)
359 #define BT_FRAME(i) \
360 if ((i) < PROF_BT_MAX) { \
362 if (__builtin_frame_address(i) == 0) \
364 p = __builtin_return_address(i); \
372 cassert(config_prof);
518 prof_backtrace(prof_bt_t *bt)
521 cassert(config_prof);
526 static malloc_mutex_t *
527 prof_gctx_mutex_choose(void)
529 unsigned ngctxs = atomic_add_u(&cum_gctxs, 1);
531 return (&gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]);
534 static malloc_mutex_t *
535 prof_tdata_mutex_choose(uint64_t thr_uid)
538 return (&tdata_locks[thr_uid % PROF_NTDATA_LOCKS]);
542 prof_gctx_create(tsd_t *tsd, prof_bt_t *bt)
545 * Create a single allocation that has space for vec of length bt->len.
547 prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsd, offsetof(prof_gctx_t,
548 vec) + (bt->len * sizeof(void *)), false, tcache_get(tsd, true),
552 gctx->lock = prof_gctx_mutex_choose();
554 * Set nlimbo to 1, in order to avoid a race condition with
555 * prof_tctx_destroy()/prof_gctx_try_destroy().
558 tctx_tree_new(&gctx->tctxs);
560 memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
561 gctx->bt.vec = gctx->vec;
562 gctx->bt.len = bt->len;
567 prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
571 cassert(config_prof);
574 * Check that gctx is still unused by any thread cache before destroying
575 * it. prof_lookup() increments gctx->nlimbo in order to avoid a race
576 * condition with this function, as does prof_tctx_destroy() in order to
577 * avoid a race between the main body of prof_tctx_destroy() and entry
578 * into this function.
580 prof_enter(tsd, tdata_self);
581 malloc_mutex_lock(gctx->lock);
582 assert(gctx->nlimbo != 0);
583 if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
584 /* Remove gctx from bt2gctx. */
585 if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL))
587 prof_leave(tsd, tdata_self);
589 malloc_mutex_unlock(gctx->lock);
590 idalloctm(tsd, gctx, tcache_get(tsd, false), true);
593 * Compensate for increment in prof_tctx_destroy() or
597 malloc_mutex_unlock(gctx->lock);
598 prof_leave(tsd, tdata_self);
602 /* tctx->tdata->lock must be held. */
604 prof_tctx_should_destroy(prof_tctx_t *tctx)
609 if (tctx->cnts.curobjs != 0)
617 prof_gctx_should_destroy(prof_gctx_t *gctx)
622 if (!tctx_tree_empty(&gctx->tctxs))
624 if (gctx->nlimbo != 0)
629 /* tctx->tdata->lock is held upon entry, and released before return. */
631 prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
633 prof_tdata_t *tdata = tctx->tdata;
634 prof_gctx_t *gctx = tctx->gctx;
635 bool destroy_tdata, destroy_tctx, destroy_gctx;
637 assert(tctx->cnts.curobjs == 0);
638 assert(tctx->cnts.curbytes == 0);
639 assert(!opt_prof_accum);
640 assert(tctx->cnts.accumobjs == 0);
641 assert(tctx->cnts.accumbytes == 0);
643 ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
644 destroy_tdata = prof_tdata_should_destroy(tdata, false);
645 malloc_mutex_unlock(tdata->lock);
647 malloc_mutex_lock(gctx->lock);
648 switch (tctx->state) {
649 case prof_tctx_state_nominal:
650 tctx_tree_remove(&gctx->tctxs, tctx);
652 if (prof_gctx_should_destroy(gctx)) {
654 * Increment gctx->nlimbo in order to keep another
655 * thread from winning the race to destroy gctx while
656 * this one has gctx->lock dropped. Without this, it
657 * would be possible for another thread to:
659 * 1) Sample an allocation associated with gctx.
660 * 2) Deallocate the sampled object.
661 * 3) Successfully prof_gctx_try_destroy(gctx).
663 * The result would be that gctx no longer exists by the
664 * time this thread accesses it in
665 * prof_gctx_try_destroy().
670 destroy_gctx = false;
672 case prof_tctx_state_dumping:
674 * A dumping thread needs tctx to remain valid until dumping
675 * has finished. Change state such that the dumping thread will
676 * complete destruction during a late dump iteration phase.
678 tctx->state = prof_tctx_state_purgatory;
679 destroy_tctx = false;
680 destroy_gctx = false;
684 destroy_tctx = false;
685 destroy_gctx = false;
687 malloc_mutex_unlock(gctx->lock);
689 prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
694 prof_tdata_destroy(tsd, tdata, false);
697 idalloctm(tsd, tctx, tcache_get(tsd, false), true);
701 prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
702 void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx)
714 prof_enter(tsd, tdata);
715 if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
716 /* bt has never been seen before. Insert it. */
717 gctx.p = prof_gctx_create(tsd, bt);
718 if (gctx.v == NULL) {
719 prof_leave(tsd, tdata);
722 btkey.p = &gctx.p->bt;
723 if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
725 prof_leave(tsd, tdata);
726 idalloctm(tsd, gctx.v, tcache_get(tsd, false), true);
732 * Increment nlimbo, in order to avoid a race condition with
733 * prof_tctx_destroy()/prof_gctx_try_destroy().
735 malloc_mutex_lock(gctx.p->lock);
737 malloc_mutex_unlock(gctx.p->lock);
740 prof_leave(tsd, tdata);
744 *p_new_gctx = new_gctx;
749 prof_lookup(tsd_t *tsd, prof_bt_t *bt)
758 cassert(config_prof);
760 tdata = prof_tdata_get(tsd, false);
764 malloc_mutex_lock(tdata->lock);
765 not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
766 if (!not_found) /* Note double negative! */
767 ret.p->prepared = true;
768 malloc_mutex_unlock(tdata->lock);
773 bool new_gctx, error;
776 * This thread's cache lacks bt. Look for it in the global
779 if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
783 /* Link a prof_tctx_t into gctx for this thread. */
784 tcache = tcache_get(tsd, true);
785 ret.v = iallocztm(tsd, sizeof(prof_tctx_t), false, tcache, true,
789 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
792 ret.p->tdata = tdata;
793 ret.p->thr_uid = tdata->thr_uid;
794 memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
796 ret.p->tctx_uid = tdata->tctx_uid_next++;
797 ret.p->prepared = true;
798 ret.p->state = prof_tctx_state_initializing;
799 malloc_mutex_lock(tdata->lock);
800 error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
801 malloc_mutex_unlock(tdata->lock);
804 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
805 idalloctm(tsd, ret.v, tcache, true);
808 malloc_mutex_lock(gctx->lock);
809 ret.p->state = prof_tctx_state_nominal;
810 tctx_tree_insert(&gctx->tctxs, ret.p);
812 malloc_mutex_unlock(gctx->lock);
819 prof_sample_threshold_update(prof_tdata_t *tdata)
822 * The body of this function is compiled out unless heap profiling is
823 * enabled, so that it is possible to compile jemalloc with floating
824 * point support completely disabled. Avoiding floating point code is
825 * important on memory-constrained systems, but it also enables a
826 * workaround for versions of glibc that don't properly save/restore
827 * floating point registers during dynamic lazy symbol loading (which
828 * internally calls into whatever malloc implementation happens to be
829 * integrated into the application). Note that some compilers (e.g.
830 * gcc 4.8) may use floating point registers for fast memory moves, so
831 * jemalloc must be compiled with such optimizations disabled (e.g.
832 * -mno-sse) in order for the workaround to be complete.
841 if (lg_prof_sample == 0) {
842 tdata->bytes_until_sample = 0;
847 * Compute sample interval as a geometrically distributed random
848 * variable with mean (2^lg_prof_sample).
852 * tdata->bytes_until_sample = | -------- |, where p = ---------------
853 * | log(1-p) | lg_prof_sample
856 * For more information on the math, see:
858 * Non-Uniform Random Variate Generation
860 * Springer-Verlag, New York, 1986
862 * (http://luc.devroye.org/rnbookindex.html)
864 prng64(r, 53, tdata->prng_state, UINT64_C(6364136223846793005),
865 UINT64_C(1442695040888963407));
866 u = (double)r * (1.0/9007199254740992.0L);
867 tdata->bytes_until_sample = (uint64_t)(log(u) /
868 log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
874 static prof_tdata_t *
875 prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
877 size_t *tdata_count = (size_t *)arg;
885 prof_tdata_count(void)
887 size_t tdata_count = 0;
889 malloc_mutex_lock(&tdatas_mtx);
890 tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
891 (void *)&tdata_count);
892 malloc_mutex_unlock(&tdatas_mtx);
894 return (tdata_count);
907 tdata = prof_tdata_get(tsd, false);
911 malloc_mutex_lock(&bt2gctx_mtx);
912 bt_count = ckh_count(&bt2gctx);
913 malloc_mutex_unlock(&bt2gctx_mtx);
920 #undef prof_dump_open
921 #define prof_dump_open JEMALLOC_N(prof_dump_open_impl)
924 prof_dump_open(bool propagate_err, const char *filename)
928 fd = creat(filename, 0644);
929 if (fd == -1 && !propagate_err) {
930 malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
939 #undef prof_dump_open
940 #define prof_dump_open JEMALLOC_N(prof_dump_open)
941 prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl);
945 prof_dump_flush(bool propagate_err)
950 cassert(config_prof);
952 err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
954 if (!propagate_err) {
955 malloc_write("<jemalloc>: write() failed during heap "
962 prof_dump_buf_end = 0;
968 prof_dump_close(bool propagate_err)
972 assert(prof_dump_fd != -1);
973 ret = prof_dump_flush(propagate_err);
981 prof_dump_write(bool propagate_err, const char *s)
985 cassert(config_prof);
990 /* Flush the buffer if it is full. */
991 if (prof_dump_buf_end == PROF_DUMP_BUFSIZE)
992 if (prof_dump_flush(propagate_err) && propagate_err)
995 if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
996 /* Finish writing. */
999 /* Write as much of s as will fit. */
1000 n = PROF_DUMP_BUFSIZE - prof_dump_buf_end;
1002 memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n);
1003 prof_dump_buf_end += n;
1010 JEMALLOC_FORMAT_PRINTF(2, 3)
1012 prof_dump_printf(bool propagate_err, const char *format, ...)
1016 char buf[PROF_PRINTF_BUFSIZE];
1018 va_start(ap, format);
1019 malloc_vsnprintf(buf, sizeof(buf), format, ap);
1021 ret = prof_dump_write(propagate_err, buf);
1026 /* tctx->tdata->lock is held. */
1028 prof_tctx_merge_tdata(prof_tctx_t *tctx, prof_tdata_t *tdata)
1031 malloc_mutex_lock(tctx->gctx->lock);
1033 switch (tctx->state) {
1034 case prof_tctx_state_initializing:
1035 malloc_mutex_unlock(tctx->gctx->lock);
1037 case prof_tctx_state_nominal:
1038 tctx->state = prof_tctx_state_dumping;
1039 malloc_mutex_unlock(tctx->gctx->lock);
1041 memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
1043 tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1044 tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1045 if (opt_prof_accum) {
1046 tdata->cnt_summed.accumobjs +=
1047 tctx->dump_cnts.accumobjs;
1048 tdata->cnt_summed.accumbytes +=
1049 tctx->dump_cnts.accumbytes;
1052 case prof_tctx_state_dumping:
1053 case prof_tctx_state_purgatory:
1058 /* gctx->lock is held. */
1060 prof_tctx_merge_gctx(prof_tctx_t *tctx, prof_gctx_t *gctx)
1063 gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1064 gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1065 if (opt_prof_accum) {
1066 gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
1067 gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
1071 /* tctx->gctx is held. */
1072 static prof_tctx_t *
1073 prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
1076 switch (tctx->state) {
1077 case prof_tctx_state_nominal:
1078 /* New since dumping started; ignore. */
1080 case prof_tctx_state_dumping:
1081 case prof_tctx_state_purgatory:
1082 prof_tctx_merge_gctx(tctx, tctx->gctx);
1091 /* gctx->lock is held. */
1092 static prof_tctx_t *
1093 prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
1095 bool propagate_err = *(bool *)arg;
1097 if (prof_dump_printf(propagate_err,
1098 " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
1099 tctx->thr_uid, tctx->dump_cnts.curobjs, tctx->dump_cnts.curbytes,
1100 tctx->dump_cnts.accumobjs, tctx->dump_cnts.accumbytes))
1105 /* tctx->gctx is held. */
1106 static prof_tctx_t *
1107 prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
1111 switch (tctx->state) {
1112 case prof_tctx_state_nominal:
1113 /* New since dumping started; ignore. */
1115 case prof_tctx_state_dumping:
1116 tctx->state = prof_tctx_state_nominal;
1118 case prof_tctx_state_purgatory:
1131 prof_dump_gctx_prep(prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
1134 cassert(config_prof);
1136 malloc_mutex_lock(gctx->lock);
1139 * Increment nlimbo so that gctx won't go away before dump.
1140 * Additionally, link gctx into the dump list so that it is included in
1141 * prof_dump()'s second pass.
1144 gctx_tree_insert(gctxs, gctx);
1146 memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
1148 malloc_mutex_unlock(gctx->lock);
1151 static prof_gctx_t *
1152 prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
1154 size_t *leak_ngctx = (size_t *)arg;
1156 malloc_mutex_lock(gctx->lock);
1157 tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, NULL);
1158 if (gctx->cnt_summed.curobjs != 0)
1160 malloc_mutex_unlock(gctx->lock);
1166 prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
1168 prof_tdata_t *tdata = prof_tdata_get(tsd, false);
1172 * Standard tree iteration won't work here, because as soon as we
1173 * decrement gctx->nlimbo and unlock gctx, another thread can
1174 * concurrently destroy it, which will corrupt the tree. Therefore,
1175 * tear down the tree one node at a time during iteration.
1177 while ((gctx = gctx_tree_first(gctxs)) != NULL) {
1178 gctx_tree_remove(gctxs, gctx);
1179 malloc_mutex_lock(gctx->lock);
1185 prof_tctx_t *to_destroy =
1186 tctx_tree_iter(&gctx->tctxs, next,
1187 prof_tctx_finish_iter, NULL);
1188 if (to_destroy != NULL) {
1189 next = tctx_tree_next(&gctx->tctxs,
1191 tctx_tree_remove(&gctx->tctxs,
1193 idalloctm(tsd, to_destroy,
1194 tcache_get(tsd, false), true);
1197 } while (next != NULL);
1200 if (prof_gctx_should_destroy(gctx)) {
1202 malloc_mutex_unlock(gctx->lock);
1203 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
1205 malloc_mutex_unlock(gctx->lock);
1209 static prof_tdata_t *
1210 prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
1212 prof_cnt_t *cnt_all = (prof_cnt_t *)arg;
1214 malloc_mutex_lock(tdata->lock);
1215 if (!tdata->expired) {
1222 tdata->dumping = true;
1223 memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
1224 for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
1226 prof_tctx_merge_tdata(tctx.p, tdata);
1228 cnt_all->curobjs += tdata->cnt_summed.curobjs;
1229 cnt_all->curbytes += tdata->cnt_summed.curbytes;
1230 if (opt_prof_accum) {
1231 cnt_all->accumobjs += tdata->cnt_summed.accumobjs;
1232 cnt_all->accumbytes += tdata->cnt_summed.accumbytes;
1235 tdata->dumping = false;
1236 malloc_mutex_unlock(tdata->lock);
1241 static prof_tdata_t *
1242 prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
1244 bool propagate_err = *(bool *)arg;
1246 if (!tdata->dumping)
1249 if (prof_dump_printf(propagate_err,
1250 " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n",
1251 tdata->thr_uid, tdata->cnt_summed.curobjs,
1252 tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs,
1253 tdata->cnt_summed.accumbytes,
1254 (tdata->thread_name != NULL) ? " " : "",
1255 (tdata->thread_name != NULL) ? tdata->thread_name : ""))
1261 #undef prof_dump_header
1262 #define prof_dump_header JEMALLOC_N(prof_dump_header_impl)
1265 prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all)
1269 if (prof_dump_printf(propagate_err,
1270 "heap_v2/%"FMTu64"\n"
1271 " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
1272 ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
1273 cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes))
1276 malloc_mutex_lock(&tdatas_mtx);
1277 ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
1278 (void *)&propagate_err) != NULL);
1279 malloc_mutex_unlock(&tdatas_mtx);
1283 #undef prof_dump_header
1284 #define prof_dump_header JEMALLOC_N(prof_dump_header)
1285 prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl);
1288 /* gctx->lock is held. */
1290 prof_dump_gctx(bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt,
1291 prof_gctx_tree_t *gctxs)
1296 cassert(config_prof);
1298 /* Avoid dumping such gctx's that have no useful data. */
1299 if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
1300 (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
1301 assert(gctx->cnt_summed.curobjs == 0);
1302 assert(gctx->cnt_summed.curbytes == 0);
1303 assert(gctx->cnt_summed.accumobjs == 0);
1304 assert(gctx->cnt_summed.accumbytes == 0);
1309 if (prof_dump_printf(propagate_err, "@")) {
1313 for (i = 0; i < bt->len; i++) {
1314 if (prof_dump_printf(propagate_err, " %#"FMTxPTR,
1315 (uintptr_t)bt->vec[i])) {
1321 if (prof_dump_printf(propagate_err,
1323 " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
1324 gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes,
1325 gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) {
1330 if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
1331 (void *)&propagate_err) != NULL) {
1341 JEMALLOC_FORMAT_PRINTF(1, 2)
1343 prof_open_maps(const char *format, ...)
1347 char filename[PATH_MAX + 1];
1349 va_start(ap, format);
1350 malloc_vsnprintf(filename, sizeof(filename), format, ap);
1352 mfd = open(filename, O_RDONLY);
1358 prof_dump_maps(bool propagate_err)
1363 cassert(config_prof);
1365 mfd = prof_open_maps("/proc/curproc/map");
1370 mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
1372 mfd = prof_open_maps("/proc/%d/maps", pid);
1378 if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
1385 prof_dump_buf_end += nread;
1386 if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
1387 /* Make space in prof_dump_buf before read(). */
1388 if (prof_dump_flush(propagate_err) &&
1394 nread = read(mfd, &prof_dump_buf[prof_dump_buf_end],
1395 PROF_DUMP_BUFSIZE - prof_dump_buf_end);
1396 } while (nread > 0);
1410 prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
1411 const char *filename)
1414 if (cnt_all->curbytes != 0) {
1415 malloc_printf("<jemalloc>: Leak summary: %"FMTu64" byte%s, %"
1416 FMTu64" object%s, %zu context%s\n",
1417 cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "",
1418 cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "",
1419 leak_ngctx, (leak_ngctx != 1) ? "s" : "");
1421 "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
1426 static prof_gctx_t *
1427 prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
1430 bool propagate_err = *(bool *)arg;
1432 malloc_mutex_lock(gctx->lock);
1434 if (prof_dump_gctx(propagate_err, gctx, &gctx->bt, gctxs)) {
1441 malloc_mutex_unlock(gctx->lock);
1446 prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
1448 prof_tdata_t *tdata;
1456 prof_gctx_tree_t gctxs;
1458 cassert(config_prof);
1460 tdata = prof_tdata_get(tsd, true);
1464 malloc_mutex_lock(&prof_dump_mtx);
1465 prof_enter(tsd, tdata);
1468 * Put gctx's in limbo and clear their counters in preparation for
1471 gctx_tree_new(&gctxs);
1472 for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);)
1473 prof_dump_gctx_prep(gctx.p, &gctxs);
1476 * Iterate over tdatas, and for the non-expired ones snapshot their tctx
1477 * stats and merge them into the associated gctx's.
1479 memset(&cnt_all, 0, sizeof(prof_cnt_t));
1480 malloc_mutex_lock(&tdatas_mtx);
1481 tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, (void *)&cnt_all);
1482 malloc_mutex_unlock(&tdatas_mtx);
1484 /* Merge tctx stats into gctx's. */
1486 gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter, (void *)&leak_ngctx);
1488 prof_leave(tsd, tdata);
1490 /* Create dump file. */
1491 if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1)
1492 goto label_open_close_error;
1494 /* Dump profile header. */
1495 if (prof_dump_header(propagate_err, &cnt_all))
1496 goto label_write_error;
1498 /* Dump per gctx profile stats. */
1499 if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter,
1500 (void *)&propagate_err) != NULL)
1501 goto label_write_error;
1503 /* Dump /proc/<pid>/maps if possible. */
1504 if (prof_dump_maps(propagate_err))
1505 goto label_write_error;
1507 if (prof_dump_close(propagate_err))
1508 goto label_open_close_error;
1510 prof_gctx_finish(tsd, &gctxs);
1511 malloc_mutex_unlock(&prof_dump_mtx);
1514 prof_leakcheck(&cnt_all, leak_ngctx, filename);
1518 prof_dump_close(propagate_err);
1519 label_open_close_error:
1520 prof_gctx_finish(tsd, &gctxs);
1521 malloc_mutex_unlock(&prof_dump_mtx);
1525 #define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
1526 #define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
1528 prof_dump_filename(char *filename, char v, uint64_t vseq)
1531 cassert(config_prof);
1533 if (vseq != VSEQ_INVALID) {
1534 /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
1535 malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
1536 "%s.%d.%"FMTu64".%c%"FMTu64".heap",
1537 opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq);
1539 /* "<prefix>.<pid>.<seq>.<v>.heap" */
1540 malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
1541 "%s.%d.%"FMTu64".%c.heap",
1542 opt_prof_prefix, (int)getpid(), prof_dump_seq, v);
1551 char filename[DUMP_FILENAME_BUFSIZE];
1553 cassert(config_prof);
1554 assert(opt_prof_final);
1555 assert(opt_prof_prefix[0] != '\0');
1561 malloc_mutex_lock(&prof_dump_seq_mtx);
1562 prof_dump_filename(filename, 'f', VSEQ_INVALID);
1563 malloc_mutex_unlock(&prof_dump_seq_mtx);
1564 prof_dump(tsd, false, filename, opt_prof_leak);
1571 prof_tdata_t *tdata;
1572 char filename[PATH_MAX + 1];
1574 cassert(config_prof);
1579 tdata = prof_tdata_get(tsd, false);
1583 tdata->enq_idump = true;
1587 if (opt_prof_prefix[0] != '\0') {
1588 malloc_mutex_lock(&prof_dump_seq_mtx);
1589 prof_dump_filename(filename, 'i', prof_dump_iseq);
1591 malloc_mutex_unlock(&prof_dump_seq_mtx);
1592 prof_dump(tsd, false, filename, false);
1597 prof_mdump(const char *filename)
1600 char filename_buf[DUMP_FILENAME_BUFSIZE];
1602 cassert(config_prof);
1604 if (!opt_prof || !prof_booted)
1608 if (filename == NULL) {
1609 /* No filename specified, so automatically generate one. */
1610 if (opt_prof_prefix[0] == '\0')
1612 malloc_mutex_lock(&prof_dump_seq_mtx);
1613 prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
1615 malloc_mutex_unlock(&prof_dump_seq_mtx);
1616 filename = filename_buf;
1618 return (prof_dump(tsd, true, filename, false));
1625 prof_tdata_t *tdata;
1626 char filename[DUMP_FILENAME_BUFSIZE];
1628 cassert(config_prof);
1633 tdata = prof_tdata_get(tsd, false);
1637 tdata->enq_gdump = true;
1641 if (opt_prof_prefix[0] != '\0') {
1642 malloc_mutex_lock(&prof_dump_seq_mtx);
1643 prof_dump_filename(filename, 'u', prof_dump_useq);
1645 malloc_mutex_unlock(&prof_dump_seq_mtx);
1646 prof_dump(tsd, false, filename, false);
1651 prof_bt_hash(const void *key, size_t r_hash[2])
1653 prof_bt_t *bt = (prof_bt_t *)key;
1655 cassert(config_prof);
1657 hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
1661 prof_bt_keycomp(const void *k1, const void *k2)
1663 const prof_bt_t *bt1 = (prof_bt_t *)k1;
1664 const prof_bt_t *bt2 = (prof_bt_t *)k2;
1666 cassert(config_prof);
1668 if (bt1->len != bt2->len)
1670 return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
1673 JEMALLOC_INLINE_C uint64_t
1674 prof_thr_uid_alloc(void)
1678 malloc_mutex_lock(&next_thr_uid_mtx);
1679 thr_uid = next_thr_uid;
1681 malloc_mutex_unlock(&next_thr_uid_mtx);
1686 static prof_tdata_t *
1687 prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
1688 char *thread_name, bool active)
1690 prof_tdata_t *tdata;
1693 cassert(config_prof);
1695 /* Initialize an empty cache for this thread. */
1696 tcache = tcache_get(tsd, true);
1697 tdata = (prof_tdata_t *)iallocztm(tsd, sizeof(prof_tdata_t), false,
1698 tcache, true, NULL);
1702 tdata->lock = prof_tdata_mutex_choose(thr_uid);
1703 tdata->thr_uid = thr_uid;
1704 tdata->thr_discrim = thr_discrim;
1705 tdata->thread_name = thread_name;
1706 tdata->attached = true;
1707 tdata->expired = false;
1708 tdata->tctx_uid_next = 0;
1710 if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS,
1711 prof_bt_hash, prof_bt_keycomp)) {
1712 idalloctm(tsd, tdata, tcache, true);
1716 tdata->prng_state = (uint64_t)(uintptr_t)tdata;
1717 prof_sample_threshold_update(tdata);
1720 tdata->enq_idump = false;
1721 tdata->enq_gdump = false;
1723 tdata->dumping = false;
1724 tdata->active = active;
1726 malloc_mutex_lock(&tdatas_mtx);
1727 tdata_tree_insert(&tdatas, tdata);
1728 malloc_mutex_unlock(&tdatas_mtx);
1734 prof_tdata_init(tsd_t *tsd)
1737 return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(), 0, NULL,
1738 prof_thread_active_init_get()));
1741 /* tdata->lock must be held. */
1743 prof_tdata_should_destroy(prof_tdata_t *tdata, bool even_if_attached)
1746 if (tdata->attached && !even_if_attached)
1748 if (ckh_count(&tdata->bt2tctx) != 0)
1753 /* tdatas_mtx must be held. */
1755 prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
1756 bool even_if_attached)
1760 assert(prof_tdata_should_destroy(tdata, even_if_attached));
1761 assert(tsd_prof_tdata_get(tsd) != tdata);
1763 tdata_tree_remove(&tdatas, tdata);
1765 tcache = tcache_get(tsd, false);
1766 if (tdata->thread_name != NULL)
1767 idalloctm(tsd, tdata->thread_name, tcache, true);
1768 ckh_delete(tsd, &tdata->bt2tctx);
1769 idalloctm(tsd, tdata, tcache, true);
1773 prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached)
1776 malloc_mutex_lock(&tdatas_mtx);
1777 prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
1778 malloc_mutex_unlock(&tdatas_mtx);
1782 prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
1786 malloc_mutex_lock(tdata->lock);
1787 if (tdata->attached) {
1788 destroy_tdata = prof_tdata_should_destroy(tdata, true);
1790 * Only detach if !destroy_tdata, because detaching would allow
1791 * another thread to win the race to destroy tdata.
1794 tdata->attached = false;
1795 tsd_prof_tdata_set(tsd, NULL);
1797 destroy_tdata = false;
1798 malloc_mutex_unlock(tdata->lock);
1800 prof_tdata_destroy(tsd, tdata, true);
1804 prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
1806 uint64_t thr_uid = tdata->thr_uid;
1807 uint64_t thr_discrim = tdata->thr_discrim + 1;
1808 char *thread_name = (tdata->thread_name != NULL) ?
1809 prof_thread_name_alloc(tsd, tdata->thread_name) : NULL;
1810 bool active = tdata->active;
1812 prof_tdata_detach(tsd, tdata);
1813 return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
1818 prof_tdata_expire(prof_tdata_t *tdata)
1822 malloc_mutex_lock(tdata->lock);
1823 if (!tdata->expired) {
1824 tdata->expired = true;
1825 destroy_tdata = tdata->attached ? false :
1826 prof_tdata_should_destroy(tdata, false);
1828 destroy_tdata = false;
1829 malloc_mutex_unlock(tdata->lock);
1831 return (destroy_tdata);
1834 static prof_tdata_t *
1835 prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
1838 return (prof_tdata_expire(tdata) ? tdata : NULL);
1842 prof_reset(tsd_t *tsd, size_t lg_sample)
1846 assert(lg_sample < (sizeof(uint64_t) << 3));
1848 malloc_mutex_lock(&prof_dump_mtx);
1849 malloc_mutex_lock(&tdatas_mtx);
1851 lg_prof_sample = lg_sample;
1855 prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
1856 prof_tdata_reset_iter, NULL);
1857 if (to_destroy != NULL) {
1858 next = tdata_tree_next(&tdatas, to_destroy);
1859 prof_tdata_destroy_locked(tsd, to_destroy, false);
1862 } while (next != NULL);
1864 malloc_mutex_unlock(&tdatas_mtx);
1865 malloc_mutex_unlock(&prof_dump_mtx);
1869 prof_tdata_cleanup(tsd_t *tsd)
1871 prof_tdata_t *tdata;
1876 tdata = tsd_prof_tdata_get(tsd);
1878 prof_tdata_detach(tsd, tdata);
1882 prof_active_get(void)
1884 bool prof_active_current;
1886 malloc_mutex_lock(&prof_active_mtx);
1887 prof_active_current = prof_active;
1888 malloc_mutex_unlock(&prof_active_mtx);
1889 return (prof_active_current);
1893 prof_active_set(bool active)
1895 bool prof_active_old;
1897 malloc_mutex_lock(&prof_active_mtx);
1898 prof_active_old = prof_active;
1899 prof_active = active;
1900 malloc_mutex_unlock(&prof_active_mtx);
1901 return (prof_active_old);
1905 prof_thread_name_get(void)
1908 prof_tdata_t *tdata;
1911 tdata = prof_tdata_get(tsd, true);
1914 return (tdata->thread_name != NULL ? tdata->thread_name : "");
1918 prof_thread_name_alloc(tsd_t *tsd, const char *thread_name)
1923 if (thread_name == NULL)
1926 size = strlen(thread_name) + 1;
1930 ret = iallocztm(tsd, size, false, tcache_get(tsd, true), true, NULL);
1933 memcpy(ret, thread_name, size);
1938 prof_thread_name_set(tsd_t *tsd, const char *thread_name)
1940 prof_tdata_t *tdata;
1944 tdata = prof_tdata_get(tsd, true);
1948 /* Validate input. */
1949 if (thread_name == NULL)
1951 for (i = 0; thread_name[i] != '\0'; i++) {
1952 char c = thread_name[i];
1953 if (!isgraph(c) && !isblank(c))
1957 s = prof_thread_name_alloc(tsd, thread_name);
1961 if (tdata->thread_name != NULL) {
1962 idalloctm(tsd, tdata->thread_name, tcache_get(tsd, false),
1964 tdata->thread_name = NULL;
1967 tdata->thread_name = s;
1972 prof_thread_active_get(void)
1975 prof_tdata_t *tdata;
1978 tdata = prof_tdata_get(tsd, true);
1981 return (tdata->active);
1985 prof_thread_active_set(bool active)
1988 prof_tdata_t *tdata;
1991 tdata = prof_tdata_get(tsd, true);
1994 tdata->active = active;
1999 prof_thread_active_init_get(void)
2003 malloc_mutex_lock(&prof_thread_active_init_mtx);
2004 active_init = prof_thread_active_init;
2005 malloc_mutex_unlock(&prof_thread_active_init_mtx);
2006 return (active_init);
2010 prof_thread_active_init_set(bool active_init)
2012 bool active_init_old;
2014 malloc_mutex_lock(&prof_thread_active_init_mtx);
2015 active_init_old = prof_thread_active_init;
2016 prof_thread_active_init = active_init;
2017 malloc_mutex_unlock(&prof_thread_active_init_mtx);
2018 return (active_init_old);
2022 prof_gdump_get(void)
2024 bool prof_gdump_current;
2026 malloc_mutex_lock(&prof_gdump_mtx);
2027 prof_gdump_current = prof_gdump_val;
2028 malloc_mutex_unlock(&prof_gdump_mtx);
2029 return (prof_gdump_current);
2033 prof_gdump_set(bool gdump)
2035 bool prof_gdump_old;
2037 malloc_mutex_lock(&prof_gdump_mtx);
2038 prof_gdump_old = prof_gdump_val;
2039 prof_gdump_val = gdump;
2040 malloc_mutex_unlock(&prof_gdump_mtx);
2041 return (prof_gdump_old);
2048 cassert(config_prof);
2050 memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
2051 sizeof(PROF_PREFIX_DEFAULT));
2058 cassert(config_prof);
2061 * opt_prof must be in its final state before any arenas are
2062 * initialized, so this function must be executed early.
2065 if (opt_prof_leak && !opt_prof) {
2067 * Enable opt_prof, but in such a way that profiles are never
2068 * automatically dumped.
2071 opt_prof_gdump = false;
2072 } else if (opt_prof) {
2073 if (opt_lg_prof_interval >= 0) {
2074 prof_interval = (((uint64_t)1U) <<
2075 opt_lg_prof_interval);
2084 cassert(config_prof);
2090 lg_prof_sample = opt_lg_prof_sample;
2092 prof_active = opt_prof_active;
2093 if (malloc_mutex_init(&prof_active_mtx))
2096 prof_gdump_val = opt_prof_gdump;
2097 if (malloc_mutex_init(&prof_gdump_mtx))
2100 prof_thread_active_init = opt_prof_thread_active_init;
2101 if (malloc_mutex_init(&prof_thread_active_init_mtx))
2105 if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
2108 if (malloc_mutex_init(&bt2gctx_mtx))
2111 tdata_tree_new(&tdatas);
2112 if (malloc_mutex_init(&tdatas_mtx))
2116 if (malloc_mutex_init(&next_thr_uid_mtx))
2119 if (malloc_mutex_init(&prof_dump_seq_mtx))
2121 if (malloc_mutex_init(&prof_dump_mtx))
2124 if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
2125 atexit(prof_fdump) != 0) {
2126 malloc_write("<jemalloc>: Error in atexit()\n");
2131 gctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS *
2132 sizeof(malloc_mutex_t));
2133 if (gctx_locks == NULL)
2135 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
2136 if (malloc_mutex_init(&gctx_locks[i]))
2140 tdata_locks = (malloc_mutex_t *)base_alloc(PROF_NTDATA_LOCKS *
2141 sizeof(malloc_mutex_t));
2142 if (tdata_locks == NULL)
2144 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
2145 if (malloc_mutex_init(&tdata_locks[i]))
2150 #ifdef JEMALLOC_PROF_LIBGCC
2152 * Cause the backtracing machinery to allocate its internal state
2153 * before enabling profiling.
2155 _Unwind_Backtrace(prof_unwind_init_callback, NULL);
2170 malloc_mutex_prefork(&tdatas_mtx);
2171 malloc_mutex_prefork(&bt2gctx_mtx);
2172 malloc_mutex_prefork(&next_thr_uid_mtx);
2173 malloc_mutex_prefork(&prof_dump_seq_mtx);
2174 for (i = 0; i < PROF_NCTX_LOCKS; i++)
2175 malloc_mutex_prefork(&gctx_locks[i]);
2176 for (i = 0; i < PROF_NTDATA_LOCKS; i++)
2177 malloc_mutex_prefork(&tdata_locks[i]);
2182 prof_postfork_parent(void)
2188 for (i = 0; i < PROF_NTDATA_LOCKS; i++)
2189 malloc_mutex_postfork_parent(&tdata_locks[i]);
2190 for (i = 0; i < PROF_NCTX_LOCKS; i++)
2191 malloc_mutex_postfork_parent(&gctx_locks[i]);
2192 malloc_mutex_postfork_parent(&prof_dump_seq_mtx);
2193 malloc_mutex_postfork_parent(&next_thr_uid_mtx);
2194 malloc_mutex_postfork_parent(&bt2gctx_mtx);
2195 malloc_mutex_postfork_parent(&tdatas_mtx);
2200 prof_postfork_child(void)
2206 for (i = 0; i < PROF_NTDATA_LOCKS; i++)
2207 malloc_mutex_postfork_child(&tdata_locks[i]);
2208 for (i = 0; i < PROF_NCTX_LOCKS; i++)
2209 malloc_mutex_postfork_child(&gctx_locks[i]);
2210 malloc_mutex_postfork_child(&prof_dump_seq_mtx);
2211 malloc_mutex_postfork_child(&next_thr_uid_mtx);
2212 malloc_mutex_postfork_child(&bt2gctx_mtx);
2213 malloc_mutex_postfork_child(&tdatas_mtx);
2217 /******************************************************************************/