1 #define JEMALLOC_PROF_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 /******************************************************************************/
5 #ifdef JEMALLOC_PROF_LIBUNWIND
10 #ifdef JEMALLOC_PROF_LIBGCC
14 /******************************************************************************/
17 malloc_tsd_data(, prof_tdata, prof_tdata_t *, NULL)
19 bool opt_prof = false;
20 bool opt_prof_active = true;
21 size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
22 ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
23 bool opt_prof_gdump = false;
24 bool opt_prof_final = true;
25 bool opt_prof_leak = false;
26 bool opt_prof_accum = false;
27 char opt_prof_prefix[PATH_MAX + 1];
29 uint64_t prof_interval = 0;
33 * Table of mutexes that are shared among ctx's. These are leaf locks, so
34 * there is no problem with using them for more than one ctx at the same time.
35 * The primary motivation for this sharing though is that ctx's are ephemeral,
36 * and destroying mutexes causes complications for systems that allocate when
37 * creating/destroying mutexes.
39 static malloc_mutex_t *ctx_locks;
40 static unsigned cum_ctxs; /* Atomic counter. */
43 * Global hash of (prof_bt_t *)-->(prof_ctx_t *). This is the master data
44 * structure that knows about all backtraces currently captured.
47 static malloc_mutex_t bt2ctx_mtx;
49 static malloc_mutex_t prof_dump_seq_mtx;
50 static uint64_t prof_dump_seq;
51 static uint64_t prof_dump_iseq;
52 static uint64_t prof_dump_mseq;
53 static uint64_t prof_dump_useq;
56 * This buffer is rather large for stack allocation, so use a single buffer for
57 * all profile dumps. The buffer is implicitly protected by bt2ctx_mtx, since
58 * it must be locked anyway during dumping.
60 static char prof_dump_buf[PROF_DUMP_BUFSIZE];
61 static unsigned prof_dump_buf_end;
62 static int prof_dump_fd;
64 /* Do not dump any profiles until bootstrapping is complete. */
65 static bool prof_booted = false;
67 /******************************************************************************/
68 /* Function prototypes for non-inline static functions. */
70 static prof_bt_t *bt_dup(prof_bt_t *bt);
71 static void bt_destroy(prof_bt_t *bt);
72 #ifdef JEMALLOC_PROF_LIBGCC
73 static _Unwind_Reason_Code prof_unwind_init_callback(
74 struct _Unwind_Context *context, void *arg);
75 static _Unwind_Reason_Code prof_unwind_callback(
76 struct _Unwind_Context *context, void *arg);
78 static bool prof_flush(bool propagate_err);
79 static bool prof_write(bool propagate_err, const char *s);
80 static bool prof_printf(bool propagate_err, const char *format, ...)
81 JEMALLOC_ATTR(format(printf, 2, 3));
82 static void prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all,
84 static void prof_ctx_destroy(prof_ctx_t *ctx);
85 static void prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt);
86 static bool prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx,
88 static bool prof_dump_maps(bool propagate_err);
89 static bool prof_dump(bool propagate_err, const char *filename,
91 static void prof_dump_filename(char *filename, char v, int64_t vseq);
92 static void prof_fdump(void);
93 static void prof_bt_hash(const void *key, size_t r_hash[2]);
94 static bool prof_bt_keycomp(const void *k1, const void *k2);
95 static malloc_mutex_t *prof_ctx_mutex_choose(void);
97 /******************************************************************************/
100 bt_init(prof_bt_t *bt, void **vec)
103 cassert(config_prof);
110 bt_destroy(prof_bt_t *bt)
113 cassert(config_prof);
119 bt_dup(prof_bt_t *bt)
123 cassert(config_prof);
126 * Create a single allocation that has space for vec immediately
127 * following the prof_bt_t structure. The backtraces that get
128 * stored in the backtrace caches are copied from stack-allocated
129 * temporary variables, so size is known at creation time. Making this
130 * a contiguous object improves cache locality.
132 ret = (prof_bt_t *)imalloc(QUANTUM_CEILING(sizeof(prof_bt_t)) +
133 (bt->len * sizeof(void *)));
136 ret->vec = (void **)((uintptr_t)ret +
137 QUANTUM_CEILING(sizeof(prof_bt_t)));
138 memcpy(ret->vec, bt->vec, bt->len * sizeof(void *));
145 prof_enter(prof_tdata_t *prof_tdata)
148 cassert(config_prof);
150 assert(prof_tdata->enq == false);
151 prof_tdata->enq = true;
153 malloc_mutex_lock(&bt2ctx_mtx);
157 prof_leave(prof_tdata_t *prof_tdata)
161 cassert(config_prof);
163 malloc_mutex_unlock(&bt2ctx_mtx);
165 assert(prof_tdata->enq);
166 prof_tdata->enq = false;
167 idump = prof_tdata->enq_idump;
168 prof_tdata->enq_idump = false;
169 gdump = prof_tdata->enq_gdump;
170 prof_tdata->enq_gdump = false;
178 #ifdef JEMALLOC_PROF_LIBUNWIND
180 prof_backtrace(prof_bt_t *bt, unsigned nignore)
187 cassert(config_prof);
188 assert(bt->len == 0);
189 assert(bt->vec != NULL);
192 unw_init_local(&cursor, &uc);
194 /* Throw away (nignore+1) stack frames, if that many exist. */
195 for (i = 0; i < nignore + 1; i++) {
196 err = unw_step(&cursor);
202 * Iterate over stack frames until there are no more, or until no space
205 for (i = 0; i < PROF_BT_MAX; i++) {
206 unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *)&bt->vec[i]);
208 err = unw_step(&cursor);
213 #elif (defined(JEMALLOC_PROF_LIBGCC))
214 static _Unwind_Reason_Code
215 prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
218 cassert(config_prof);
220 return (_URC_NO_REASON);
223 static _Unwind_Reason_Code
224 prof_unwind_callback(struct _Unwind_Context *context, void *arg)
226 prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
228 cassert(config_prof);
230 if (data->nignore > 0)
233 data->bt->vec[data->bt->len] = (void *)_Unwind_GetIP(context);
235 if (data->bt->len == data->max)
236 return (_URC_END_OF_STACK);
239 return (_URC_NO_REASON);
243 prof_backtrace(prof_bt_t *bt, unsigned nignore)
245 prof_unwind_data_t data = {bt, nignore, PROF_BT_MAX};
247 cassert(config_prof);
249 _Unwind_Backtrace(prof_unwind_callback, &data);
251 #elif (defined(JEMALLOC_PROF_GCC))
253 prof_backtrace(prof_bt_t *bt, unsigned nignore)
255 #define BT_FRAME(i) \
256 if ((i) < nignore + PROF_BT_MAX) { \
258 if (__builtin_frame_address(i) == 0) \
260 p = __builtin_return_address(i); \
263 if (i >= nignore) { \
264 bt->vec[(i) - nignore] = p; \
265 bt->len = (i) - nignore + 1; \
270 cassert(config_prof);
271 assert(nignore <= 3);
414 /* Extras to compensate for nignore. */
422 prof_backtrace(prof_bt_t *bt, unsigned nignore)
425 cassert(config_prof);
431 prof_lookup(prof_bt_t *bt)
437 prof_tdata_t *prof_tdata;
439 cassert(config_prof);
441 prof_tdata = prof_tdata_get(false);
442 if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
445 if (ckh_search(&prof_tdata->bt2cnt, bt, NULL, &ret.v)) {
457 * This thread's cache lacks bt. Look for it in the global
460 prof_enter(prof_tdata);
461 if (ckh_search(&bt2ctx, bt, &btkey.v, &ctx.v)) {
462 /* bt has never been seen before. Insert it. */
463 ctx.v = imalloc(sizeof(prof_ctx_t));
465 prof_leave(prof_tdata);
468 btkey.p = bt_dup(bt);
469 if (btkey.v == NULL) {
470 prof_leave(prof_tdata);
475 ctx.p->lock = prof_ctx_mutex_choose();
477 * Set nlimbo to 1, in order to avoid a race condition
478 * with prof_ctx_merge()/prof_ctx_destroy().
481 memset(&ctx.p->cnt_merged, 0, sizeof(prof_cnt_t));
482 ql_new(&ctx.p->cnts_ql);
483 if (ckh_insert(&bt2ctx, btkey.v, ctx.v)) {
485 prof_leave(prof_tdata);
493 * Increment nlimbo, in order to avoid a race condition
494 * with prof_ctx_merge()/prof_ctx_destroy().
496 malloc_mutex_lock(ctx.p->lock);
498 malloc_mutex_unlock(ctx.p->lock);
501 prof_leave(prof_tdata);
503 /* Link a prof_thd_cnt_t into ctx for this thread. */
504 if (ckh_count(&prof_tdata->bt2cnt) == PROF_TCMAX) {
505 assert(ckh_count(&prof_tdata->bt2cnt) > 0);
507 * Flush the least recently used cnt in order to keep
508 * bt2cnt from becoming too large.
510 ret.p = ql_last(&prof_tdata->lru_ql, lru_link);
511 assert(ret.v != NULL);
512 if (ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt,
515 ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
516 prof_ctx_merge(ret.p->ctx, ret.p);
517 /* ret can now be re-used. */
519 assert(ckh_count(&prof_tdata->bt2cnt) < PROF_TCMAX);
520 /* Allocate and partially initialize a new cnt. */
521 ret.v = imalloc(sizeof(prof_thr_cnt_t));
524 prof_ctx_destroy(ctx.p);
527 ql_elm_new(ret.p, cnts_link);
528 ql_elm_new(ret.p, lru_link);
530 /* Finish initializing ret. */
533 memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
534 if (ckh_insert(&prof_tdata->bt2cnt, btkey.v, ret.v)) {
536 prof_ctx_destroy(ctx.p);
540 ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
541 malloc_mutex_lock(ctx.p->lock);
542 ql_tail_insert(&ctx.p->cnts_ql, ret.p, cnts_link);
544 malloc_mutex_unlock(ctx.p->lock);
546 /* Move ret to the front of the LRU. */
547 ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
548 ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
555 prof_flush(bool propagate_err)
560 cassert(config_prof);
562 err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
564 if (propagate_err == false) {
565 malloc_write("<jemalloc>: write() failed during heap "
572 prof_dump_buf_end = 0;
578 prof_write(bool propagate_err, const char *s)
582 cassert(config_prof);
587 /* Flush the buffer if it is full. */
588 if (prof_dump_buf_end == PROF_DUMP_BUFSIZE)
589 if (prof_flush(propagate_err) && propagate_err)
592 if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
593 /* Finish writing. */
596 /* Write as much of s as will fit. */
597 n = PROF_DUMP_BUFSIZE - prof_dump_buf_end;
599 memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n);
600 prof_dump_buf_end += n;
607 JEMALLOC_ATTR(format(printf, 2, 3))
609 prof_printf(bool propagate_err, const char *format, ...)
613 char buf[PROF_PRINTF_BUFSIZE];
615 va_start(ap, format);
616 malloc_vsnprintf(buf, sizeof(buf), format, ap);
618 ret = prof_write(propagate_err, buf);
624 prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx)
626 prof_thr_cnt_t *thr_cnt;
629 cassert(config_prof);
631 malloc_mutex_lock(ctx->lock);
633 memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t));
634 ql_foreach(thr_cnt, &ctx->cnts_ql, cnts_link) {
635 volatile unsigned *epoch = &thr_cnt->epoch;
638 unsigned epoch0 = *epoch;
640 /* Make sure epoch is even. */
644 memcpy(&tcnt, &thr_cnt->cnts, sizeof(prof_cnt_t));
646 /* Terminate if epoch didn't change while reading. */
647 if (*epoch == epoch0)
651 ctx->cnt_summed.curobjs += tcnt.curobjs;
652 ctx->cnt_summed.curbytes += tcnt.curbytes;
653 if (opt_prof_accum) {
654 ctx->cnt_summed.accumobjs += tcnt.accumobjs;
655 ctx->cnt_summed.accumbytes += tcnt.accumbytes;
659 if (ctx->cnt_summed.curobjs != 0)
662 /* Add to cnt_all. */
663 cnt_all->curobjs += ctx->cnt_summed.curobjs;
664 cnt_all->curbytes += ctx->cnt_summed.curbytes;
665 if (opt_prof_accum) {
666 cnt_all->accumobjs += ctx->cnt_summed.accumobjs;
667 cnt_all->accumbytes += ctx->cnt_summed.accumbytes;
670 malloc_mutex_unlock(ctx->lock);
674 prof_ctx_destroy(prof_ctx_t *ctx)
676 prof_tdata_t *prof_tdata;
678 cassert(config_prof);
681 * Check that ctx is still unused by any thread cache before destroying
682 * it. prof_lookup() increments ctx->nlimbo in order to avoid a race
683 * condition with this function, as does prof_ctx_merge() in order to
684 * avoid a race between the main body of prof_ctx_merge() and entry
685 * into this function.
687 prof_tdata = prof_tdata_get(false);
688 assert((uintptr_t)prof_tdata > (uintptr_t)PROF_TDATA_STATE_MAX);
689 prof_enter(prof_tdata);
690 malloc_mutex_lock(ctx->lock);
691 if (ql_first(&ctx->cnts_ql) == NULL && ctx->cnt_merged.curobjs == 0 &&
693 assert(ctx->cnt_merged.curbytes == 0);
694 assert(ctx->cnt_merged.accumobjs == 0);
695 assert(ctx->cnt_merged.accumbytes == 0);
696 /* Remove ctx from bt2ctx. */
697 if (ckh_remove(&bt2ctx, ctx->bt, NULL, NULL))
699 prof_leave(prof_tdata);
701 malloc_mutex_unlock(ctx->lock);
706 * Compensate for increment in prof_ctx_merge() or
710 malloc_mutex_unlock(ctx->lock);
711 prof_leave(prof_tdata);
716 prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt)
720 cassert(config_prof);
722 /* Merge cnt stats and detach from ctx. */
723 malloc_mutex_lock(ctx->lock);
724 ctx->cnt_merged.curobjs += cnt->cnts.curobjs;
725 ctx->cnt_merged.curbytes += cnt->cnts.curbytes;
726 ctx->cnt_merged.accumobjs += cnt->cnts.accumobjs;
727 ctx->cnt_merged.accumbytes += cnt->cnts.accumbytes;
728 ql_remove(&ctx->cnts_ql, cnt, cnts_link);
729 if (opt_prof_accum == false && ql_first(&ctx->cnts_ql) == NULL &&
730 ctx->cnt_merged.curobjs == 0 && ctx->nlimbo == 0) {
732 * Increment ctx->nlimbo in order to keep another thread from
733 * winning the race to destroy ctx while this one has ctx->lock
734 * dropped. Without this, it would be possible for another
737 * 1) Sample an allocation associated with ctx.
738 * 2) Deallocate the sampled object.
739 * 3) Successfully prof_ctx_destroy(ctx).
741 * The result would be that ctx no longer exists by the time
742 * this thread accesses it in prof_ctx_destroy().
748 malloc_mutex_unlock(ctx->lock);
750 prof_ctx_destroy(ctx);
754 prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, prof_bt_t *bt)
758 cassert(config_prof);
761 * Current statistics can sum to 0 as a result of unmerged per thread
762 * statistics. Additionally, interval- and growth-triggered dumps can
763 * occur between the time a ctx is created and when its statistics are
764 * filled in. Avoid dumping any ctx that is an artifact of either
765 * implementation detail.
767 if ((opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) ||
768 (opt_prof_accum && ctx->cnt_summed.accumobjs == 0)) {
769 assert(ctx->cnt_summed.curobjs == 0);
770 assert(ctx->cnt_summed.curbytes == 0);
771 assert(ctx->cnt_summed.accumobjs == 0);
772 assert(ctx->cnt_summed.accumbytes == 0);
776 if (prof_printf(propagate_err, "%"PRId64": %"PRId64
777 " [%"PRIu64": %"PRIu64"] @",
778 ctx->cnt_summed.curobjs, ctx->cnt_summed.curbytes,
779 ctx->cnt_summed.accumobjs, ctx->cnt_summed.accumbytes))
782 for (i = 0; i < bt->len; i++) {
783 if (prof_printf(propagate_err, " %#"PRIxPTR,
784 (uintptr_t)bt->vec[i]))
788 if (prof_write(propagate_err, "\n"))
795 prof_dump_maps(bool propagate_err)
798 char filename[PATH_MAX + 1];
800 cassert(config_prof);
802 malloc_snprintf(filename, sizeof(filename), "/proc/%d/maps",
804 mfd = open(filename, O_RDONLY);
808 if (prof_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
813 prof_dump_buf_end += nread;
814 if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
815 /* Make space in prof_dump_buf before read(). */
816 if (prof_flush(propagate_err) && propagate_err)
819 nread = read(mfd, &prof_dump_buf[prof_dump_buf_end],
820 PROF_DUMP_BUFSIZE - prof_dump_buf_end);
830 prof_dump(bool propagate_err, const char *filename, bool leakcheck)
832 prof_tdata_t *prof_tdata;
845 cassert(config_prof);
847 prof_tdata = prof_tdata_get(false);
848 if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
850 prof_enter(prof_tdata);
851 prof_dump_fd = creat(filename, 0644);
852 if (prof_dump_fd == -1) {
853 if (propagate_err == false) {
855 "<jemalloc>: creat(\"%s\"), 0644) failed\n",
863 /* Merge per thread profile stats, and sum them in cnt_all. */
864 memset(&cnt_all, 0, sizeof(prof_cnt_t));
866 for (tabind = 0; ckh_iter(&bt2ctx, &tabind, NULL, &ctx.v) == false;)
867 prof_ctx_sum(ctx.p, &cnt_all, &leak_nctx);
869 /* Dump profile header. */
870 if (opt_lg_prof_sample == 0) {
871 if (prof_printf(propagate_err,
872 "heap profile: %"PRId64": %"PRId64
873 " [%"PRIu64": %"PRIu64"] @ heapprofile\n",
874 cnt_all.curobjs, cnt_all.curbytes,
875 cnt_all.accumobjs, cnt_all.accumbytes))
878 if (prof_printf(propagate_err,
879 "heap profile: %"PRId64": %"PRId64
880 " [%"PRIu64": %"PRIu64"] @ heap_v2/%"PRIu64"\n",
881 cnt_all.curobjs, cnt_all.curbytes,
882 cnt_all.accumobjs, cnt_all.accumbytes,
883 ((uint64_t)1U << opt_lg_prof_sample)))
887 /* Dump per ctx profile stats. */
888 for (tabind = 0; ckh_iter(&bt2ctx, &tabind, &bt.v, &ctx.v)
890 if (prof_dump_ctx(propagate_err, ctx.p, bt.p))
894 /* Dump /proc/<pid>/maps if possible. */
895 if (prof_dump_maps(propagate_err))
898 if (prof_flush(propagate_err))
901 prof_leave(prof_tdata);
903 if (leakcheck && cnt_all.curbytes != 0) {
904 malloc_printf("<jemalloc>: Leak summary: %"PRId64" byte%s, %"
905 PRId64" object%s, %zu context%s\n",
906 cnt_all.curbytes, (cnt_all.curbytes != 1) ? "s" : "",
907 cnt_all.curobjs, (cnt_all.curobjs != 1) ? "s" : "",
908 leak_nctx, (leak_nctx != 1) ? "s" : "");
910 "<jemalloc>: Run pprof on \"%s\" for leak detail\n",
916 prof_leave(prof_tdata);
920 #define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
922 prof_dump_filename(char *filename, char v, int64_t vseq)
925 cassert(config_prof);
927 if (vseq != UINT64_C(0xffffffffffffffff)) {
928 /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
929 malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
930 "%s.%d.%"PRIu64".%c%"PRId64".heap",
931 opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq);
933 /* "<prefix>.<pid>.<seq>.<v>.heap" */
934 malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
935 "%s.%d.%"PRIu64".%c.heap",
936 opt_prof_prefix, (int)getpid(), prof_dump_seq, v);
944 char filename[DUMP_FILENAME_BUFSIZE];
946 cassert(config_prof);
948 if (prof_booted == false)
951 if (opt_prof_final && opt_prof_prefix[0] != '\0') {
952 malloc_mutex_lock(&prof_dump_seq_mtx);
953 prof_dump_filename(filename, 'f', UINT64_C(0xffffffffffffffff));
954 malloc_mutex_unlock(&prof_dump_seq_mtx);
955 prof_dump(false, filename, opt_prof_leak);
962 prof_tdata_t *prof_tdata;
963 char filename[PATH_MAX + 1];
965 cassert(config_prof);
967 if (prof_booted == false)
969 prof_tdata = prof_tdata_get(false);
970 if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
972 if (prof_tdata->enq) {
973 prof_tdata->enq_idump = true;
977 if (opt_prof_prefix[0] != '\0') {
978 malloc_mutex_lock(&prof_dump_seq_mtx);
979 prof_dump_filename(filename, 'i', prof_dump_iseq);
981 malloc_mutex_unlock(&prof_dump_seq_mtx);
982 prof_dump(false, filename, false);
987 prof_mdump(const char *filename)
989 char filename_buf[DUMP_FILENAME_BUFSIZE];
991 cassert(config_prof);
993 if (opt_prof == false || prof_booted == false)
996 if (filename == NULL) {
997 /* No filename specified, so automatically generate one. */
998 if (opt_prof_prefix[0] == '\0')
1000 malloc_mutex_lock(&prof_dump_seq_mtx);
1001 prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
1003 malloc_mutex_unlock(&prof_dump_seq_mtx);
1004 filename = filename_buf;
1006 return (prof_dump(true, filename, false));
1012 prof_tdata_t *prof_tdata;
1013 char filename[DUMP_FILENAME_BUFSIZE];
1015 cassert(config_prof);
1017 if (prof_booted == false)
1019 prof_tdata = prof_tdata_get(false);
1020 if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
1022 if (prof_tdata->enq) {
1023 prof_tdata->enq_gdump = true;
1027 if (opt_prof_prefix[0] != '\0') {
1028 malloc_mutex_lock(&prof_dump_seq_mtx);
1029 prof_dump_filename(filename, 'u', prof_dump_useq);
1031 malloc_mutex_unlock(&prof_dump_seq_mtx);
1032 prof_dump(false, filename, false);
1037 prof_bt_hash(const void *key, size_t r_hash[2])
1039 prof_bt_t *bt = (prof_bt_t *)key;
1041 cassert(config_prof);
1043 hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
1047 prof_bt_keycomp(const void *k1, const void *k2)
1049 const prof_bt_t *bt1 = (prof_bt_t *)k1;
1050 const prof_bt_t *bt2 = (prof_bt_t *)k2;
1052 cassert(config_prof);
1054 if (bt1->len != bt2->len)
1056 return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
1059 static malloc_mutex_t *
1060 prof_ctx_mutex_choose(void)
1062 unsigned nctxs = atomic_add_u(&cum_ctxs, 1);
1064 return (&ctx_locks[(nctxs - 1) % PROF_NCTX_LOCKS]);
1068 prof_tdata_init(void)
1070 prof_tdata_t *prof_tdata;
1072 cassert(config_prof);
1074 /* Initialize an empty cache for this thread. */
1075 prof_tdata = (prof_tdata_t *)imalloc(sizeof(prof_tdata_t));
1076 if (prof_tdata == NULL)
1079 if (ckh_new(&prof_tdata->bt2cnt, PROF_CKH_MINITEMS,
1080 prof_bt_hash, prof_bt_keycomp)) {
1081 idalloc(prof_tdata);
1084 ql_new(&prof_tdata->lru_ql);
1086 prof_tdata->vec = imalloc(sizeof(void *) * PROF_BT_MAX);
1087 if (prof_tdata->vec == NULL) {
1088 ckh_delete(&prof_tdata->bt2cnt);
1089 idalloc(prof_tdata);
1093 prof_tdata->prng_state = 0;
1094 prof_tdata->threshold = 0;
1095 prof_tdata->accum = 0;
1097 prof_tdata->enq = false;
1098 prof_tdata->enq_idump = false;
1099 prof_tdata->enq_gdump = false;
1101 prof_tdata_tsd_set(&prof_tdata);
1103 return (prof_tdata);
1107 prof_tdata_cleanup(void *arg)
1109 prof_thr_cnt_t *cnt;
1110 prof_tdata_t *prof_tdata = *(prof_tdata_t **)arg;
1112 cassert(config_prof);
1114 if (prof_tdata == PROF_TDATA_STATE_REINCARNATED) {
1116 * Another destructor deallocated memory after this destructor
1117 * was called. Reset prof_tdata to PROF_TDATA_STATE_PURGATORY
1118 * in order to receive another callback.
1120 prof_tdata = PROF_TDATA_STATE_PURGATORY;
1121 prof_tdata_tsd_set(&prof_tdata);
1122 } else if (prof_tdata == PROF_TDATA_STATE_PURGATORY) {
1124 * The previous time this destructor was called, we set the key
1125 * to PROF_TDATA_STATE_PURGATORY so that other destructors
1126 * wouldn't cause re-creation of the prof_tdata. This time, do
1127 * nothing, so that the destructor will not be called again.
1129 } else if (prof_tdata != NULL) {
1131 * Delete the hash table. All of its contents can still be
1132 * iterated over via the LRU.
1134 ckh_delete(&prof_tdata->bt2cnt);
1136 * Iteratively merge cnt's into the global stats and delete
1139 while ((cnt = ql_last(&prof_tdata->lru_ql, lru_link)) != NULL) {
1140 ql_remove(&prof_tdata->lru_ql, cnt, lru_link);
1141 prof_ctx_merge(cnt->ctx, cnt);
1144 idalloc(prof_tdata->vec);
1145 idalloc(prof_tdata);
1146 prof_tdata = PROF_TDATA_STATE_PURGATORY;
1147 prof_tdata_tsd_set(&prof_tdata);
1155 cassert(config_prof);
1157 memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
1158 sizeof(PROF_PREFIX_DEFAULT));
1165 cassert(config_prof);
1168 * opt_prof and prof_promote must be in their final state before any
1169 * arenas are initialized, so this function must be executed early.
1172 if (opt_prof_leak && opt_prof == false) {
1174 * Enable opt_prof, but in such a way that profiles are never
1175 * automatically dumped.
1178 opt_prof_gdump = false;
1179 } else if (opt_prof) {
1180 if (opt_lg_prof_interval >= 0) {
1181 prof_interval = (((uint64_t)1U) <<
1182 opt_lg_prof_interval);
1186 prof_promote = (opt_prof && opt_lg_prof_sample > LG_PAGE);
1193 cassert(config_prof);
1198 if (ckh_new(&bt2ctx, PROF_CKH_MINITEMS, prof_bt_hash,
1201 if (malloc_mutex_init(&bt2ctx_mtx))
1203 if (prof_tdata_tsd_boot()) {
1205 "<jemalloc>: Error in pthread_key_create()\n");
1209 if (malloc_mutex_init(&prof_dump_seq_mtx))
1212 if (atexit(prof_fdump) != 0) {
1213 malloc_write("<jemalloc>: Error in atexit()\n");
1218 ctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS *
1219 sizeof(malloc_mutex_t));
1220 if (ctx_locks == NULL)
1222 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
1223 if (malloc_mutex_init(&ctx_locks[i]))
1228 #ifdef JEMALLOC_PROF_LIBGCC
1230 * Cause the backtracing machinery to allocate its internal state
1231 * before enabling profiling.
1233 _Unwind_Backtrace(prof_unwind_init_callback, NULL);
1248 malloc_mutex_lock(&bt2ctx_mtx);
1249 malloc_mutex_lock(&prof_dump_seq_mtx);
1250 for (i = 0; i < PROF_NCTX_LOCKS; i++)
1251 malloc_mutex_lock(&ctx_locks[i]);
1256 prof_postfork_parent(void)
1262 for (i = 0; i < PROF_NCTX_LOCKS; i++)
1263 malloc_mutex_postfork_parent(&ctx_locks[i]);
1264 malloc_mutex_postfork_parent(&prof_dump_seq_mtx);
1265 malloc_mutex_postfork_parent(&bt2ctx_mtx);
1270 prof_postfork_child(void)
1276 for (i = 0; i < PROF_NCTX_LOCKS; i++)
1277 malloc_mutex_postfork_child(&ctx_locks[i]);
1278 malloc_mutex_postfork_child(&prof_dump_seq_mtx);
1279 malloc_mutex_postfork_child(&bt2ctx_mtx);
1283 /******************************************************************************/