2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 malloc_tsd_data(, arenas, arena_t *, NULL)
8 malloc_tsd_data(, thread_allocated, thread_allocated_t,
9 THREAD_ALLOCATED_INITIALIZER)
11 /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */
12 const char *__malloc_options_1_0 = NULL;
13 __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
15 /* Runtime configuration options. */
16 const char *je_malloc_conf;
25 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
31 size_t opt_quarantine = ZU(0);
32 bool opt_redzone = false;
33 bool opt_utrace = false;
34 bool opt_valgrind = false;
35 bool opt_xmalloc = false;
36 bool opt_zero = false;
37 size_t opt_narenas = 0;
41 malloc_mutex_t arenas_lock;
43 unsigned narenas_total;
44 unsigned narenas_auto;
46 /* Set to true once the allocator has been initialized. */
47 static bool malloc_initialized = false;
49 #ifdef JEMALLOC_THREADED_INIT
50 /* Used to let the initializing thread recursively allocate. */
51 # define NO_INITIALIZER ((unsigned long)0)
52 # define INITIALIZER pthread_self()
53 # define IS_INITIALIZER (malloc_initializer == pthread_self())
54 static pthread_t malloc_initializer = NO_INITIALIZER;
56 # define NO_INITIALIZER false
57 # define INITIALIZER true
58 # define IS_INITIALIZER malloc_initializer
59 static bool malloc_initializer = NO_INITIALIZER;
62 /* Used to avoid initialization races. */
64 static malloc_mutex_t init_lock;
66 JEMALLOC_ATTR(constructor)
71 malloc_mutex_init(&init_lock);
75 # pragma section(".CRT$XCU", read)
76 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
77 static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
81 static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
85 void *p; /* Input pointer (as in realloc(p, s)). */
86 size_t s; /* Request size. */
87 void *r; /* Result pointer. */
90 #ifdef JEMALLOC_UTRACE
91 # define UTRACE(a, b, c) do { \
93 int utrace_serrno = errno; \
98 utrace(&ut, sizeof(ut)); \
99 errno = utrace_serrno; \
103 # define UTRACE(a, b, c)
106 /******************************************************************************/
107 /* Function prototypes for non-inline static functions. */
109 static void stats_print_atexit(void);
110 static unsigned malloc_ncpus(void);
111 static bool malloc_conf_next(char const **opts_p, char const **k_p,
112 size_t *klen_p, char const **v_p, size_t *vlen_p);
113 static void malloc_conf_error(const char *msg, const char *k, size_t klen,
114 const char *v, size_t vlen);
115 static void malloc_conf_init(void);
116 static bool malloc_init_hard(void);
117 static int imemalign(void **memptr, size_t alignment, size_t size,
118 size_t min_alignment);
120 /******************************************************************************/
122 * Begin miscellaneous support functions.
125 /* Create a new arena and insert it into the arenas array at index ind. */
127 arenas_extend(unsigned ind)
131 ret = (arena_t *)base_alloc(sizeof(arena_t));
132 if (ret != NULL && arena_new(ret, ind) == false) {
136 /* Only reached if there is an OOM error. */
139 * OOM here is quite inconvenient to propagate, since dealing with it
140 * would require a check for failure in the fast path. Instead, punt
141 * by using arenas[0]. In practice, this is an extremely unlikely
144 malloc_write("<jemalloc>: Error initializing arena\n");
151 /* Slow path, called only by choose_arena(). */
153 choose_arena_hard(void)
157 if (narenas_auto > 1) {
158 unsigned i, choose, first_null;
161 first_null = narenas_auto;
162 malloc_mutex_lock(&arenas_lock);
163 assert(arenas[0] != NULL);
164 for (i = 1; i < narenas_auto; i++) {
165 if (arenas[i] != NULL) {
167 * Choose the first arena that has the lowest
168 * number of threads assigned to it.
170 if (arenas[i]->nthreads <
171 arenas[choose]->nthreads)
173 } else if (first_null == narenas_auto) {
175 * Record the index of the first uninitialized
176 * arena, in case all extant arenas are in use.
178 * NB: It is possible for there to be
179 * discontinuities in terms of initialized
180 * versus uninitialized arenas, due to the
181 * "thread.arena" mallctl.
187 if (arenas[choose]->nthreads == 0
188 || first_null == narenas_auto) {
190 * Use an unloaded arena, or the least loaded arena if
191 * all arenas are already initialized.
193 ret = arenas[choose];
195 /* Initialize a new arena. */
196 ret = arenas_extend(first_null);
199 malloc_mutex_unlock(&arenas_lock);
202 malloc_mutex_lock(&arenas_lock);
204 malloc_mutex_unlock(&arenas_lock);
207 arenas_tsd_set(&ret);
213 stats_print_atexit(void)
216 if (config_tcache && config_stats) {
220 * Merge stats from extant threads. This is racy, since
221 * individual threads do not lock when recording tcache stats
222 * events. As a consequence, the final stats may be slightly
223 * out of date by the time they are reported, if other threads
224 * continue to allocate.
226 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
227 arena_t *arena = arenas[i];
232 * tcache_stats_merge() locks bins, so if any
233 * code is introduced that acquires both arena
234 * and bin locks in the opposite order,
235 * deadlocks may result.
237 malloc_mutex_lock(&arena->lock);
238 ql_foreach(tcache, &arena->tcache_ql, link) {
239 tcache_stats_merge(tcache, arena);
241 malloc_mutex_unlock(&arena->lock);
245 je_malloc_stats_print(NULL, NULL, NULL);
249 * End miscellaneous support functions.
251 /******************************************************************************/
253 * Begin initialization functions.
265 result = si.dwNumberOfProcessors;
267 result = sysconf(_SC_NPROCESSORS_ONLN);
273 ret = (unsigned)result;
280 arenas_cleanup(void *arg)
282 arena_t *arena = *(arena_t **)arg;
284 malloc_mutex_lock(&arenas_lock);
286 malloc_mutex_unlock(&arenas_lock);
289 static JEMALLOC_ATTR(always_inline) void
290 malloc_thread_init(void)
294 * TSD initialization can't be safely done as a side effect of
295 * deallocation, because it is possible for a thread to do nothing but
296 * deallocate its TLS data via free(), in which case writing to TLS
297 * would cause write-after-free memory corruption. The quarantine
298 * facility *only* gets used as a side effect of deallocation, so make
299 * a best effort attempt at initializing its TSD by hooking all
302 if (config_fill && opt_quarantine)
303 quarantine_alloc_hook();
306 static JEMALLOC_ATTR(always_inline) bool
310 if (malloc_initialized == false && malloc_init_hard())
312 malloc_thread_init();
318 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
319 char const **v_p, size_t *vlen_p)
322 const char *opts = *opts_p;
326 for (accept = false; accept == false;) {
328 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
329 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
330 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
331 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
333 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
334 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
335 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
336 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
338 case '0': case '1': case '2': case '3': case '4': case '5':
339 case '6': case '7': case '8': case '9':
345 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
350 if (opts != *opts_p) {
351 malloc_write("<jemalloc>: Conf string ends "
356 malloc_write("<jemalloc>: Malformed conf string\n");
361 for (accept = false; accept == false;) {
366 * Look ahead one character here, because the next time
367 * this function is called, it will assume that end of
368 * input has been cleanly reached if no input remains,
369 * but we have optimistically already consumed the
370 * comma if one exists.
373 malloc_write("<jemalloc>: Conf string ends "
376 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
380 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
394 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
398 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
403 malloc_conf_init(void)
406 char buf[PATH_MAX + 1];
407 const char *opts, *k, *v;
411 * Automatically configure valgrind before processing options. The
412 * valgrind option remains in jemalloc 3.x for compatibility reasons.
414 if (config_valgrind) {
415 opt_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
416 if (config_fill && opt_valgrind) {
418 assert(opt_zero == false);
419 opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
422 if (config_tcache && opt_valgrind)
426 for (i = 0; i < 3; i++) {
427 /* Get runtime configuration. */
430 if (je_malloc_conf != NULL) {
432 * Use options that were compiled into the
435 opts = je_malloc_conf;
437 /* No configuration specified. */
445 const char *linkname =
446 # ifdef JEMALLOC_PREFIX
447 "/etc/"JEMALLOC_PREFIX"malloc.conf"
453 if ((linklen = readlink(linkname, buf,
454 sizeof(buf) - 1)) != -1) {
456 * Use the contents of the "/etc/malloc.conf"
457 * symbolic link's name.
464 /* No configuration specified. */
470 const char *envname =
471 #ifdef JEMALLOC_PREFIX
472 JEMALLOC_CPREFIX"MALLOC_CONF"
478 if (issetugid() == 0 && (opts = getenv(envname)) !=
481 * Do nothing; opts is already initialized to
482 * the value of the MALLOC_CONF environment
486 /* No configuration specified. */
498 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
500 #define CONF_HANDLE_BOOL(o, n) \
501 if (sizeof(n)-1 == klen && strncmp(n, k, \
503 if (strncmp("true", v, vlen) == 0 && \
504 vlen == sizeof("true")-1) \
506 else if (strncmp("false", v, vlen) == \
507 0 && vlen == sizeof("false")-1) \
511 "Invalid conf value", \
516 #define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
517 if (sizeof(n)-1 == klen && strncmp(n, k, \
523 um = malloc_strtoumax(v, &end, 0); \
524 if (get_errno() != 0 || (uintptr_t)end -\
525 (uintptr_t)v != vlen) { \
527 "Invalid conf value", \
537 if (um < min || um > max) { \
547 #define CONF_HANDLE_SSIZE_T(o, n, min, max) \
548 if (sizeof(n)-1 == klen && strncmp(n, k, \
554 l = strtol(v, &end, 0); \
555 if (get_errno() != 0 || (uintptr_t)end -\
556 (uintptr_t)v != vlen) { \
558 "Invalid conf value", \
560 } else if (l < (ssize_t)min || l > \
563 "Out-of-range conf value", \
569 #define CONF_HANDLE_CHAR_P(o, n, d) \
570 if (sizeof(n)-1 == klen && strncmp(n, k, \
572 size_t cpylen = (vlen <= \
573 sizeof(o)-1) ? vlen : \
575 strncpy(o, v, cpylen); \
580 CONF_HANDLE_BOOL(opt_abort, "abort")
582 * Chunks always require at least one header page, plus
583 * one data page in the absence of redzones, or three
584 * pages in the presence of redzones. In order to
585 * simplify options processing, fix the limit based on
588 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
589 (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1,
591 if (strncmp("dss", k, klen) == 0) {
594 for (i = 0; i < dss_prec_limit; i++) {
595 if (strncmp(dss_prec_names[i], v, vlen)
597 if (chunk_dss_prec_set(i)) {
609 if (match == false) {
610 malloc_conf_error("Invalid conf value",
615 CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
617 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
618 -1, (sizeof(size_t) << 3) - 1)
619 CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
621 CONF_HANDLE_BOOL(opt_junk, "junk")
622 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
623 0, SIZE_T_MAX, false)
624 CONF_HANDLE_BOOL(opt_redzone, "redzone")
625 CONF_HANDLE_BOOL(opt_zero, "zero")
628 CONF_HANDLE_BOOL(opt_utrace, "utrace")
630 if (config_valgrind) {
631 CONF_HANDLE_BOOL(opt_valgrind, "valgrind")
633 if (config_xmalloc) {
634 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
637 CONF_HANDLE_BOOL(opt_tcache, "tcache")
638 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
640 (sizeof(size_t) << 3) - 1)
643 CONF_HANDLE_BOOL(opt_prof, "prof")
644 CONF_HANDLE_CHAR_P(opt_prof_prefix,
645 "prof_prefix", "jeprof")
646 CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
647 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
649 (sizeof(uint64_t) << 3) - 1)
650 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
651 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
652 "lg_prof_interval", -1,
653 (sizeof(uint64_t) << 3) - 1)
654 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
655 CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
656 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
658 malloc_conf_error("Invalid conf pair", k, klen, v,
660 #undef CONF_HANDLE_BOOL
661 #undef CONF_HANDLE_SIZE_T
662 #undef CONF_HANDLE_SSIZE_T
663 #undef CONF_HANDLE_CHAR_P
669 malloc_init_hard(void)
671 arena_t *init_arenas[1];
673 malloc_mutex_lock(&init_lock);
674 if (malloc_initialized || IS_INITIALIZER) {
676 * Another thread initialized the allocator before this one
677 * acquired init_lock, or this thread is the initializing
678 * thread, and it is recursively allocating.
680 malloc_mutex_unlock(&init_lock);
683 #ifdef JEMALLOC_THREADED_INIT
684 if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
685 /* Busy-wait until the initializing thread completes. */
687 malloc_mutex_unlock(&init_lock);
689 malloc_mutex_lock(&init_lock);
690 } while (malloc_initialized == false);
691 malloc_mutex_unlock(&init_lock);
695 malloc_initializer = INITIALIZER;
703 #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
705 /* Register fork handlers. */
706 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
707 jemalloc_postfork_child) != 0) {
708 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
714 if (opt_stats_print) {
715 /* Print statistics at exit. */
716 if (atexit(stats_print_atexit) != 0) {
717 malloc_write("<jemalloc>: Error in atexit()\n");
724 malloc_mutex_unlock(&init_lock);
729 malloc_mutex_unlock(&init_lock);
734 malloc_mutex_unlock(&init_lock);
743 if (config_tcache && tcache_boot0()) {
744 malloc_mutex_unlock(&init_lock);
749 malloc_mutex_unlock(&init_lock);
753 if (malloc_mutex_init(&arenas_lock))
757 * Create enough scaffolding to allow recursive allocation in
760 narenas_total = narenas_auto = 1;
761 arenas = init_arenas;
762 memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
765 * Initialize one arena here. The rest are lazily created in
766 * choose_arena_hard().
769 if (arenas[0] == NULL) {
770 malloc_mutex_unlock(&init_lock);
774 /* Initialize allocation counters before any allocations can occur. */
775 if (config_stats && thread_allocated_tsd_boot()) {
776 malloc_mutex_unlock(&init_lock);
780 if (arenas_tsd_boot()) {
781 malloc_mutex_unlock(&init_lock);
785 if (config_tcache && tcache_boot1()) {
786 malloc_mutex_unlock(&init_lock);
790 if (config_fill && quarantine_boot()) {
791 malloc_mutex_unlock(&init_lock);
795 if (config_prof && prof_boot2()) {
796 malloc_mutex_unlock(&init_lock);
800 /* Get number of CPUs. */
801 malloc_mutex_unlock(&init_lock);
802 ncpus = malloc_ncpus();
803 malloc_mutex_lock(&init_lock);
806 malloc_mutex_unlock(&init_lock);
810 if (opt_narenas == 0) {
812 * For SMP systems, create more than one arena per CPU by
816 opt_narenas = ncpus << 2;
820 narenas_auto = opt_narenas;
822 * Make sure that the arenas array can be allocated. In practice, this
823 * limit is enough to allow the allocator to function, but the ctl
824 * machinery will fail to allocate memory at far lower limits.
826 if (narenas_auto > chunksize / sizeof(arena_t *)) {
827 narenas_auto = chunksize / sizeof(arena_t *);
828 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
831 narenas_total = narenas_auto;
833 /* Allocate and initialize arenas. */
834 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
835 if (arenas == NULL) {
836 malloc_mutex_unlock(&init_lock);
840 * Zero the array. In practice, this should always be pre-zeroed,
841 * since it was just mmap()ed, but let's be sure.
843 memset(arenas, 0, sizeof(arena_t *) * narenas_total);
844 /* Copy the pointer to the one arena that was already initialized. */
845 arenas[0] = init_arenas[0];
847 malloc_initialized = true;
848 malloc_mutex_unlock(&init_lock);
853 * End initialization functions.
855 /******************************************************************************/
857 * Begin malloc(3)-compatible functions.
861 je_malloc(size_t size)
864 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
865 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
875 if (config_prof && opt_prof) {
877 PROF_ALLOC_PREP(1, usize, cnt);
882 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
884 ret = imalloc(SMALL_MAXCLASS+1);
886 arena_prof_promoted(ret, usize);
890 if (config_stats || (config_valgrind && opt_valgrind))
897 if (config_xmalloc && opt_xmalloc) {
898 malloc_write("<jemalloc>: Error in malloc(): "
904 if (config_prof && opt_prof && ret != NULL)
905 prof_malloc(ret, usize, cnt);
906 if (config_stats && ret != NULL) {
907 assert(usize == isalloc(ret, config_prof));
908 thread_allocated_tsd_get()->allocated += usize;
910 UTRACE(0, size, ret);
911 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
915 JEMALLOC_ATTR(nonnull(1))
918 * Avoid any uncertainty as to how many backtrace frames to ignore in
924 imemalign(void **memptr, size_t alignment, size_t size,
925 size_t min_alignment)
930 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
932 assert(min_alignment != 0);
940 /* Make sure that alignment is a large enough power of 2. */
941 if (((alignment - 1) & alignment) != 0
942 || (alignment < min_alignment)) {
943 if (config_xmalloc && opt_xmalloc) {
944 malloc_write("<jemalloc>: Error allocating "
945 "aligned memory: invalid alignment\n");
953 usize = sa2u(size, alignment);
960 if (config_prof && opt_prof) {
961 PROF_ALLOC_PREP(2, usize, cnt);
966 if (prof_promote && (uintptr_t)cnt !=
967 (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
968 assert(sa2u(SMALL_MAXCLASS+1,
970 result = ipalloc(sa2u(SMALL_MAXCLASS+1,
971 alignment), alignment, false);
972 if (result != NULL) {
973 arena_prof_promoted(result,
977 result = ipalloc(usize, alignment,
982 result = ipalloc(usize, alignment, false);
985 if (result == NULL) {
986 if (config_xmalloc && opt_xmalloc) {
987 malloc_write("<jemalloc>: Error allocating aligned "
988 "memory: out of memory\n");
999 if (config_stats && result != NULL) {
1000 assert(usize == isalloc(result, config_prof));
1001 thread_allocated_tsd_get()->allocated += usize;
1003 if (config_prof && opt_prof && result != NULL)
1004 prof_malloc(result, usize, cnt);
1005 UTRACE(0, size, result);
1010 je_posix_memalign(void **memptr, size_t alignment, size_t size)
1012 int ret = imemalign(memptr, alignment, size, sizeof(void *));
1013 JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
1014 config_prof), false);
1019 je_aligned_alloc(size_t alignment, size_t size)
1024 if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
1028 JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
1034 je_calloc(size_t num, size_t size)
1038 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1039 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
1041 if (malloc_init()) {
1047 num_size = num * size;
1048 if (num_size == 0) {
1049 if (num == 0 || size == 0)
1056 * Try to avoid division here. We know that it isn't possible to
1057 * overflow during multiplication if neither operand uses any of the
1058 * most significant half of the bits in a size_t.
1060 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
1061 && (num_size / size != num)) {
1062 /* size_t overflow. */
1067 if (config_prof && opt_prof) {
1068 usize = s2u(num_size);
1069 PROF_ALLOC_PREP(1, usize, cnt);
1074 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
1075 <= SMALL_MAXCLASS) {
1076 ret = icalloc(SMALL_MAXCLASS+1);
1078 arena_prof_promoted(ret, usize);
1080 ret = icalloc(num_size);
1082 if (config_stats || (config_valgrind && opt_valgrind))
1083 usize = s2u(num_size);
1084 ret = icalloc(num_size);
1089 if (config_xmalloc && opt_xmalloc) {
1090 malloc_write("<jemalloc>: Error in calloc(): out of "
1097 if (config_prof && opt_prof && ret != NULL)
1098 prof_malloc(ret, usize, cnt);
1099 if (config_stats && ret != NULL) {
1100 assert(usize == isalloc(ret, config_prof));
1101 thread_allocated_tsd_get()->allocated += usize;
1103 UTRACE(0, num_size, ret);
1104 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
1109 je_realloc(void *ptr, size_t size)
1112 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1113 size_t old_size = 0;
1114 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1115 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
1116 prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
1120 /* realloc(ptr, 0) is equivalent to free(p). */
1121 assert(malloc_initialized || IS_INITIALIZER);
1123 old_size = isalloc(ptr, true);
1124 if (config_valgrind && opt_valgrind)
1125 old_rzsize = p2rz(ptr);
1126 } else if (config_stats) {
1127 old_size = isalloc(ptr, false);
1128 if (config_valgrind && opt_valgrind)
1129 old_rzsize = u2rz(old_size);
1130 } else if (config_valgrind && opt_valgrind) {
1131 old_size = isalloc(ptr, false);
1132 old_rzsize = u2rz(old_size);
1134 if (config_prof && opt_prof) {
1135 old_ctx = prof_ctx_get(ptr);
1146 assert(malloc_initialized || IS_INITIALIZER);
1147 malloc_thread_init();
1150 old_size = isalloc(ptr, true);
1151 if (config_valgrind && opt_valgrind)
1152 old_rzsize = p2rz(ptr);
1153 } else if (config_stats) {
1154 old_size = isalloc(ptr, false);
1155 if (config_valgrind && opt_valgrind)
1156 old_rzsize = u2rz(old_size);
1157 } else if (config_valgrind && opt_valgrind) {
1158 old_size = isalloc(ptr, false);
1159 old_rzsize = u2rz(old_size);
1161 if (config_prof && opt_prof) {
1163 old_ctx = prof_ctx_get(ptr);
1164 PROF_ALLOC_PREP(1, usize, cnt);
1170 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
1171 usize <= SMALL_MAXCLASS) {
1172 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
1175 arena_prof_promoted(ret, usize);
1179 ret = iralloc(ptr, size, 0, 0, false, false);
1184 if (config_stats || (config_valgrind && opt_valgrind))
1186 ret = iralloc(ptr, size, 0, 0, false, false);
1191 if (config_xmalloc && opt_xmalloc) {
1192 malloc_write("<jemalloc>: Error in realloc(): "
1199 /* realloc(NULL, size) is equivalent to malloc(size). */
1200 if (config_prof && opt_prof)
1202 if (malloc_init()) {
1203 if (config_prof && opt_prof)
1207 if (config_prof && opt_prof) {
1209 PROF_ALLOC_PREP(1, usize, cnt);
1213 if (prof_promote && (uintptr_t)cnt !=
1214 (uintptr_t)1U && usize <=
1216 ret = imalloc(SMALL_MAXCLASS+1);
1218 arena_prof_promoted(ret,
1222 ret = imalloc(size);
1225 if (config_stats || (config_valgrind &&
1228 ret = imalloc(size);
1233 if (config_xmalloc && opt_xmalloc) {
1234 malloc_write("<jemalloc>: Error in realloc(): "
1243 if (config_prof && opt_prof)
1244 prof_realloc(ret, usize, cnt, old_size, old_ctx);
1245 if (config_stats && ret != NULL) {
1246 thread_allocated_t *ta;
1247 assert(usize == isalloc(ret, config_prof));
1248 ta = thread_allocated_tsd_get();
1249 ta->allocated += usize;
1250 ta->deallocated += old_size;
1252 UTRACE(ptr, size, ret);
1253 JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false);
1264 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1266 assert(malloc_initialized || IS_INITIALIZER);
1268 if (config_prof && opt_prof) {
1269 usize = isalloc(ptr, config_prof);
1270 prof_free(ptr, usize);
1271 } else if (config_stats || config_valgrind)
1272 usize = isalloc(ptr, config_prof);
1274 thread_allocated_tsd_get()->deallocated += usize;
1275 if (config_valgrind && opt_valgrind)
1278 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1283 * End malloc(3)-compatible functions.
1285 /******************************************************************************/
1287 * Begin non-standard override functions.
1290 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
1292 je_memalign(size_t alignment, size_t size)
1294 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1295 imemalign(&ret, alignment, size, 1);
1296 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1301 #ifdef JEMALLOC_OVERRIDE_VALLOC
1303 je_valloc(size_t size)
1305 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1306 imemalign(&ret, PAGE, size, 1);
1307 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1313 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1314 * #define je_malloc malloc
1316 #define malloc_is_malloc 1
1317 #define is_malloc_(a) malloc_is_ ## a
1318 #define is_malloc(a) is_malloc_(a)
1320 #if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
1322 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1323 * to inconsistently reference libc's malloc(3)-compatible functions
1324 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1326 * These definitions interpose hooks in glibc. The functions are actually
1327 * passed an extra argument for the caller return address, which will be
1330 JEMALLOC_EXPORT void (* __free_hook)(void *ptr) = je_free;
1331 JEMALLOC_EXPORT void *(* __malloc_hook)(size_t size) = je_malloc;
1332 JEMALLOC_EXPORT void *(* __realloc_hook)(void *ptr, size_t size) = je_realloc;
1333 JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) =
1338 * End non-standard override functions.
1340 /******************************************************************************/
1342 * Begin non-standard functions.
1346 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
1350 assert(malloc_initialized || IS_INITIALIZER);
1351 malloc_thread_init();
1353 if (config_ivsalloc)
1354 ret = ivsalloc(ptr, config_prof);
1356 ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
1362 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1366 stats_print(write_cb, cbopaque, opts);
1370 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
1377 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1381 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
1387 return (ctl_nametomib(name, mibp, miblenp));
1391 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1392 void *newp, size_t newlen)
1398 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1402 * End non-standard functions.
1404 /******************************************************************************/
1406 * Begin experimental functions.
1408 #ifdef JEMALLOC_EXPERIMENTAL
1410 static JEMALLOC_ATTR(always_inline) void *
1411 iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache,
1415 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
1419 return (ipallocx(usize, alignment, zero, try_tcache, arena));
1421 return (icallocx(usize, try_tcache, arena));
1423 return (imallocx(usize, try_tcache, arena));
1427 je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
1431 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1433 bool zero = flags & ALLOCM_ZERO;
1434 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1438 assert(ptr != NULL);
1444 if (arena_ind != UINT_MAX) {
1445 arena = arenas[arena_ind];
1452 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1456 if (config_prof && opt_prof) {
1457 prof_thr_cnt_t *cnt;
1459 PROF_ALLOC_PREP(1, usize, cnt);
1462 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
1464 size_t usize_promoted = (alignment == 0) ?
1465 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
1467 assert(usize_promoted != 0);
1468 p = iallocm(usize_promoted, alignment, zero,
1472 arena_prof_promoted(p, usize);
1474 p = iallocm(usize, alignment, zero, try_tcache, arena);
1478 prof_malloc(p, usize, cnt);
1480 p = iallocm(usize, alignment, zero, try_tcache, arena);
1489 assert(usize == isalloc(p, config_prof));
1490 thread_allocated_tsd_get()->allocated += usize;
1493 JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
1494 return (ALLOCM_SUCCESS);
1496 if (config_xmalloc && opt_xmalloc) {
1497 malloc_write("<jemalloc>: Error in allocm(): "
1503 return (ALLOCM_ERR_OOM);
1507 je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
1512 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1513 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1515 bool zero = flags & ALLOCM_ZERO;
1516 bool no_move = flags & ALLOCM_NO_MOVE;
1517 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1518 bool try_tcache_alloc, try_tcache_dalloc;
1521 assert(ptr != NULL);
1522 assert(*ptr != NULL);
1524 assert(SIZE_T_MAX - size >= extra);
1525 assert(malloc_initialized || IS_INITIALIZER);
1526 malloc_thread_init();
1528 if (arena_ind != UINT_MAX) {
1529 arena_chunk_t *chunk;
1530 try_tcache_alloc = true;
1531 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(*ptr);
1532 try_tcache_dalloc = (chunk == *ptr || chunk->arena !=
1534 arena = arenas[arena_ind];
1536 try_tcache_alloc = true;
1537 try_tcache_dalloc = true;
1542 if (config_prof && opt_prof) {
1543 prof_thr_cnt_t *cnt;
1546 * usize isn't knowable before iralloc() returns when extra is
1547 * non-zero. Therefore, compute its maximum possible value and
1548 * use that in PROF_ALLOC_PREP() to decide whether to capture a
1549 * backtrace. prof_realloc() will use the actual usize to
1550 * decide whether to sample.
1552 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1553 sa2u(size+extra, alignment);
1554 prof_ctx_t *old_ctx = prof_ctx_get(p);
1555 old_size = isalloc(p, true);
1556 if (config_valgrind && opt_valgrind)
1557 old_rzsize = p2rz(p);
1558 PROF_ALLOC_PREP(1, max_usize, cnt);
1562 * Use minimum usize to determine whether promotion may happen.
1564 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1565 && ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
1566 <= SMALL_MAXCLASS) {
1567 q = irallocx(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1568 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
1569 alignment, zero, no_move, try_tcache_alloc,
1570 try_tcache_dalloc, arena);
1573 if (max_usize < PAGE) {
1575 arena_prof_promoted(q, usize);
1577 usize = isalloc(q, config_prof);
1579 q = irallocx(p, size, extra, alignment, zero, no_move,
1580 try_tcache_alloc, try_tcache_dalloc, arena);
1583 usize = isalloc(q, config_prof);
1585 prof_realloc(q, usize, cnt, old_size, old_ctx);
1590 old_size = isalloc(p, false);
1591 if (config_valgrind && opt_valgrind)
1592 old_rzsize = u2rz(old_size);
1593 } else if (config_valgrind && opt_valgrind) {
1594 old_size = isalloc(p, false);
1595 old_rzsize = u2rz(old_size);
1597 q = irallocx(p, size, extra, alignment, zero, no_move,
1598 try_tcache_alloc, try_tcache_dalloc, arena);
1602 usize = isalloc(q, config_prof);
1603 if (rsize != NULL) {
1604 if (config_stats == false)
1605 usize = isalloc(q, config_prof);
1612 thread_allocated_t *ta;
1613 ta = thread_allocated_tsd_get();
1614 ta->allocated += usize;
1615 ta->deallocated += old_size;
1618 JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero);
1619 return (ALLOCM_SUCCESS);
1623 return (ALLOCM_ERR_NOT_MOVED);
1626 if (config_xmalloc && opt_xmalloc) {
1627 malloc_write("<jemalloc>: Error in rallocm(): "
1632 return (ALLOCM_ERR_OOM);
1636 je_sallocm(const void *ptr, size_t *rsize, int flags)
1640 assert(malloc_initialized || IS_INITIALIZER);
1641 malloc_thread_init();
1643 if (config_ivsalloc)
1644 sz = ivsalloc(ptr, config_prof);
1646 assert(ptr != NULL);
1647 sz = isalloc(ptr, config_prof);
1649 assert(rsize != NULL);
1652 return (ALLOCM_SUCCESS);
1656 je_dallocm(void *ptr, int flags)
1659 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1660 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1663 assert(ptr != NULL);
1664 assert(malloc_initialized || IS_INITIALIZER);
1666 if (arena_ind != UINT_MAX) {
1667 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1668 try_tcache = (chunk == ptr || chunk->arena !=
1674 if (config_stats || config_valgrind)
1675 usize = isalloc(ptr, config_prof);
1676 if (config_prof && opt_prof) {
1677 if (config_stats == false && config_valgrind == false)
1678 usize = isalloc(ptr, config_prof);
1679 prof_free(ptr, usize);
1682 thread_allocated_tsd_get()->deallocated += usize;
1683 if (config_valgrind && opt_valgrind)
1685 iqallocx(ptr, try_tcache);
1686 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1688 return (ALLOCM_SUCCESS);
1692 je_nallocm(size_t *rsize, size_t size, int flags)
1695 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1701 return (ALLOCM_ERR_OOM);
1703 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1705 return (ALLOCM_ERR_OOM);
1709 return (ALLOCM_SUCCESS);
1714 * End experimental functions.
1716 /******************************************************************************/
1718 * The following functions are used by threading libraries for protection of
1719 * malloc during fork().
1723 * If an application creates a thread before doing any allocation in the main
1724 * thread, then calls fork(2) in the main thread followed by memory allocation
1725 * in the child process, a race can occur that results in deadlock within the
1726 * child: the main thread may have forked while the created thread had
1727 * partially initialized the allocator. Ordinarily jemalloc prevents
1728 * fork/malloc races via the following functions it registers during
1729 * initialization using pthread_atfork(), but of course that does no good if
1730 * the allocator isn't fully initialized at fork time. The following library
1731 * constructor is a partial solution to this problem. It may still possible to
1732 * trigger the deadlock described above, but doing so would involve forking via
1733 * a library constructor that runs before jemalloc's runs.
1735 JEMALLOC_ATTR(constructor)
1737 jemalloc_constructor(void)
1743 #ifndef JEMALLOC_MUTEX_INIT_CB
1745 jemalloc_prefork(void)
1747 JEMALLOC_EXPORT void
1748 _malloc_prefork(void)
1753 #ifdef JEMALLOC_MUTEX_INIT_CB
1754 if (malloc_initialized == false)
1757 assert(malloc_initialized);
1759 /* Acquire all mutexes in a safe order. */
1762 malloc_mutex_prefork(&arenas_lock);
1763 for (i = 0; i < narenas_total; i++) {
1764 if (arenas[i] != NULL)
1765 arena_prefork(arenas[i]);
1772 #ifndef JEMALLOC_MUTEX_INIT_CB
1774 jemalloc_postfork_parent(void)
1776 JEMALLOC_EXPORT void
1777 _malloc_postfork(void)
1782 #ifdef JEMALLOC_MUTEX_INIT_CB
1783 if (malloc_initialized == false)
1786 assert(malloc_initialized);
1788 /* Release all mutexes, now that fork() has completed. */
1789 huge_postfork_parent();
1790 base_postfork_parent();
1791 chunk_postfork_parent();
1792 for (i = 0; i < narenas_total; i++) {
1793 if (arenas[i] != NULL)
1794 arena_postfork_parent(arenas[i]);
1796 malloc_mutex_postfork_parent(&arenas_lock);
1797 prof_postfork_parent();
1798 ctl_postfork_parent();
1802 jemalloc_postfork_child(void)
1806 assert(malloc_initialized);
1808 /* Release all mutexes, now that fork() has completed. */
1809 huge_postfork_child();
1810 base_postfork_child();
1811 chunk_postfork_child();
1812 for (i = 0; i < narenas_total; i++) {
1813 if (arenas[i] != NULL)
1814 arena_postfork_child(arenas[i]);
1816 malloc_mutex_postfork_child(&arenas_lock);
1817 prof_postfork_child();
1818 ctl_postfork_child();
1822 _malloc_first_thread(void)
1825 (void)malloc_mutex_first_thread();
1828 /******************************************************************************/
1830 * The following functions are used for TLS allocation/deallocation in static
1831 * binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
1832 * is that these avoid accessing TLS variables.
1836 a0alloc(size_t size, bool zero)
1845 if (size <= arena_maxclass)
1846 return (arena_malloc(arenas[0], size, zero, false));
1848 return (huge_malloc(size, zero));
1852 a0malloc(size_t size)
1855 return (a0alloc(size, false));
1859 a0calloc(size_t num, size_t size)
1862 return (a0alloc(num * size, true));
1868 arena_chunk_t *chunk;
1873 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1875 arena_dalloc(chunk->arena, chunk, ptr, false);
1877 huge_dalloc(ptr, true);
1880 /******************************************************************************/