2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 malloc_tsd_data(, arenas, arena_t *, NULL)
8 malloc_tsd_data(, thread_allocated, thread_allocated_t,
9 THREAD_ALLOCATED_INITIALIZER)
11 const char *__malloc_options_1_0;
12 __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
14 /* Runtime configuration options. */
15 const char *je_malloc_conf JEMALLOC_ATTR(visibility("default"));
17 bool opt_abort = true;
21 bool opt_junk = false;
24 bool opt_abort = false;
25 bool opt_junk = false;
27 size_t opt_quarantine = ZU(0);
28 bool opt_redzone = false;
29 bool opt_utrace = false;
30 bool opt_valgrind = false;
31 bool opt_xmalloc = false;
32 bool opt_zero = false;
33 size_t opt_narenas = 0;
37 malloc_mutex_t arenas_lock;
41 /* Set to true once the allocator has been initialized. */
42 static bool malloc_initialized = false;
44 #ifdef JEMALLOC_THREADED_INIT
45 /* Used to let the initializing thread recursively allocate. */
46 # define NO_INITIALIZER ((unsigned long)0)
47 # define INITIALIZER pthread_self()
48 # define IS_INITIALIZER (malloc_initializer == pthread_self())
49 static pthread_t malloc_initializer = NO_INITIALIZER;
51 # define NO_INITIALIZER false
52 # define INITIALIZER true
53 # define IS_INITIALIZER malloc_initializer
54 static bool malloc_initializer = NO_INITIALIZER;
57 /* Used to avoid initialization races. */
58 static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
61 void *p; /* Input pointer (as in realloc(p, s)). */
62 size_t s; /* Request size. */
63 void *r; /* Result pointer. */
66 #ifdef JEMALLOC_UTRACE
67 # define UTRACE(a, b, c) do { \
73 utrace(&ut, sizeof(ut)); \
77 # define UTRACE(a, b, c)
80 /******************************************************************************/
81 /* Function prototypes for non-inline static functions. */
83 static void stats_print_atexit(void);
84 static unsigned malloc_ncpus(void);
85 static bool malloc_conf_next(char const **opts_p, char const **k_p,
86 size_t *klen_p, char const **v_p, size_t *vlen_p);
87 static void malloc_conf_error(const char *msg, const char *k, size_t klen,
88 const char *v, size_t vlen);
89 static void malloc_conf_init(void);
90 static bool malloc_init_hard(void);
91 static int imemalign(void **memptr, size_t alignment, size_t size,
92 size_t min_alignment);
94 /******************************************************************************/
96 * Begin miscellaneous support functions.
99 /* Create a new arena and insert it into the arenas array at index ind. */
101 arenas_extend(unsigned ind)
105 ret = (arena_t *)base_alloc(sizeof(arena_t));
106 if (ret != NULL && arena_new(ret, ind) == false) {
110 /* Only reached if there is an OOM error. */
113 * OOM here is quite inconvenient to propagate, since dealing with it
114 * would require a check for failure in the fast path. Instead, punt
115 * by using arenas[0]. In practice, this is an extremely unlikely
118 malloc_write("<jemalloc>: Error initializing arena\n");
125 /* Slow path, called only by choose_arena(). */
127 choose_arena_hard(void)
132 unsigned i, choose, first_null;
135 first_null = narenas;
136 malloc_mutex_lock(&arenas_lock);
137 assert(arenas[0] != NULL);
138 for (i = 1; i < narenas; i++) {
139 if (arenas[i] != NULL) {
141 * Choose the first arena that has the lowest
142 * number of threads assigned to it.
144 if (arenas[i]->nthreads <
145 arenas[choose]->nthreads)
147 } else if (first_null == narenas) {
149 * Record the index of the first uninitialized
150 * arena, in case all extant arenas are in use.
152 * NB: It is possible for there to be
153 * discontinuities in terms of initialized
154 * versus uninitialized arenas, due to the
155 * "thread.arena" mallctl.
161 if (arenas[choose]->nthreads == 0 || first_null == narenas) {
163 * Use an unloaded arena, or the least loaded arena if
164 * all arenas are already initialized.
166 ret = arenas[choose];
168 /* Initialize a new arena. */
169 ret = arenas_extend(first_null);
172 malloc_mutex_unlock(&arenas_lock);
175 malloc_mutex_lock(&arenas_lock);
177 malloc_mutex_unlock(&arenas_lock);
180 arenas_tsd_set(&ret);
186 stats_print_atexit(void)
189 if (config_tcache && config_stats) {
193 * Merge stats from extant threads. This is racy, since
194 * individual threads do not lock when recording tcache stats
195 * events. As a consequence, the final stats may be slightly
196 * out of date by the time they are reported, if other threads
197 * continue to allocate.
199 for (i = 0; i < narenas; i++) {
200 arena_t *arena = arenas[i];
205 * tcache_stats_merge() locks bins, so if any
206 * code is introduced that acquires both arena
207 * and bin locks in the opposite order,
208 * deadlocks may result.
210 malloc_mutex_lock(&arena->lock);
211 ql_foreach(tcache, &arena->tcache_ql, link) {
212 tcache_stats_merge(tcache, arena);
214 malloc_mutex_unlock(&arena->lock);
218 je_malloc_stats_print(NULL, NULL, NULL);
222 * End miscellaneous support functions.
224 /******************************************************************************/
226 * Begin initialization functions.
235 result = sysconf(_SC_NPROCESSORS_ONLN);
240 ret = (unsigned)result;
246 arenas_cleanup(void *arg)
248 arena_t *arena = *(arena_t **)arg;
250 malloc_mutex_lock(&arenas_lock);
252 malloc_mutex_unlock(&arenas_lock);
259 if (malloc_initialized == false)
260 return (malloc_init_hard());
266 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
267 char const **v_p, size_t *vlen_p)
270 const char *opts = *opts_p;
274 for (accept = false; accept == false;) {
276 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
277 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
278 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
279 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
281 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
282 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
283 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
284 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
286 case '0': case '1': case '2': case '3': case '4': case '5':
287 case '6': case '7': case '8': case '9':
293 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
298 if (opts != *opts_p) {
299 malloc_write("<jemalloc>: Conf string ends "
304 malloc_write("<jemalloc>: Malformed conf string\n");
309 for (accept = false; accept == false;) {
314 * Look ahead one character here, because the next time
315 * this function is called, it will assume that end of
316 * input has been cleanly reached if no input remains,
317 * but we have optimistically already consumed the
318 * comma if one exists.
321 malloc_write("<jemalloc>: Conf string ends "
324 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
328 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
342 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
346 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
351 malloc_conf_init(void)
354 char buf[PATH_MAX + 1];
355 const char *opts, *k, *v;
358 for (i = 0; i < 3; i++) {
359 /* Get runtime configuration. */
362 if (je_malloc_conf != NULL) {
364 * Use options that were compiled into the
367 opts = je_malloc_conf;
369 /* No configuration specified. */
376 const char *linkname =
377 #ifdef JEMALLOC_PREFIX
378 "/etc/"JEMALLOC_PREFIX"malloc.conf"
384 if ((linklen = readlink(linkname, buf,
385 sizeof(buf) - 1)) != -1) {
387 * Use the contents of the "/etc/malloc.conf"
388 * symbolic link's name.
393 /* No configuration specified. */
399 const char *envname =
400 #ifdef JEMALLOC_PREFIX
401 JEMALLOC_CPREFIX"MALLOC_CONF"
407 if (issetugid() == 0 && (opts = getenv(envname)) !=
410 * Do nothing; opts is already initialized to
411 * the value of the MALLOC_CONF environment
415 /* No configuration specified. */
427 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
429 #define CONF_HANDLE_BOOL_HIT(o, n, hit) \
430 if (sizeof(n)-1 == klen && strncmp(n, k, \
432 if (strncmp("true", v, vlen) == 0 && \
433 vlen == sizeof("true")-1) \
435 else if (strncmp("false", v, vlen) == \
436 0 && vlen == sizeof("false")-1) \
440 "Invalid conf value", \
446 #define CONF_HANDLE_BOOL(o, n) { \
448 CONF_HANDLE_BOOL_HIT(o, n, hit); \
452 #define CONF_HANDLE_SIZE_T(o, n, min, max) \
453 if (sizeof(n)-1 == klen && strncmp(n, k, \
459 um = malloc_strtoumax(v, &end, 0); \
460 if (errno != 0 || (uintptr_t)end - \
461 (uintptr_t)v != vlen) { \
463 "Invalid conf value", \
465 } else if (um < min || um > max) { \
467 "Out-of-range conf value", \
473 #define CONF_HANDLE_SSIZE_T(o, n, min, max) \
474 if (sizeof(n)-1 == klen && strncmp(n, k, \
480 l = strtol(v, &end, 0); \
481 if (errno != 0 || (uintptr_t)end - \
482 (uintptr_t)v != vlen) { \
484 "Invalid conf value", \
486 } else if (l < (ssize_t)min || l > \
489 "Out-of-range conf value", \
495 #define CONF_HANDLE_CHAR_P(o, n, d) \
496 if (sizeof(n)-1 == klen && strncmp(n, k, \
498 size_t cpylen = (vlen <= \
499 sizeof(o)-1) ? vlen : \
501 strncpy(o, v, cpylen); \
506 CONF_HANDLE_BOOL(opt_abort, "abort")
508 * Chunks always require at least one header page, plus
509 * one data page in the absence of redzones, or three
510 * pages in the presence of redzones. In order to
511 * simplify options processing, fix the limit based on
514 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
515 (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1)
516 CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
518 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
519 -1, (sizeof(size_t) << 3) - 1)
520 CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
522 CONF_HANDLE_BOOL(opt_junk, "junk")
523 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
525 CONF_HANDLE_BOOL(opt_redzone, "redzone")
526 CONF_HANDLE_BOOL(opt_zero, "zero")
529 CONF_HANDLE_BOOL(opt_utrace, "utrace")
531 if (config_valgrind) {
533 CONF_HANDLE_BOOL_HIT(opt_valgrind,
535 if (config_fill && opt_valgrind && hit) {
538 if (opt_quarantine == 0) {
540 JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
547 if (config_xmalloc) {
548 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
551 CONF_HANDLE_BOOL(opt_tcache, "tcache")
552 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
554 (sizeof(size_t) << 3) - 1)
557 CONF_HANDLE_BOOL(opt_prof, "prof")
558 CONF_HANDLE_CHAR_P(opt_prof_prefix,
559 "prof_prefix", "jeprof")
560 CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
561 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
563 (sizeof(uint64_t) << 3) - 1)
564 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
565 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
566 "lg_prof_interval", -1,
567 (sizeof(uint64_t) << 3) - 1)
568 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
569 CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
570 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
572 malloc_conf_error("Invalid conf pair", k, klen, v,
574 #undef CONF_HANDLE_BOOL
575 #undef CONF_HANDLE_SIZE_T
576 #undef CONF_HANDLE_SSIZE_T
577 #undef CONF_HANDLE_CHAR_P
583 malloc_init_hard(void)
585 arena_t *init_arenas[1];
587 malloc_mutex_lock(&init_lock);
588 if (malloc_initialized || IS_INITIALIZER) {
590 * Another thread initialized the allocator before this one
591 * acquired init_lock, or this thread is the initializing
592 * thread, and it is recursively allocating.
594 malloc_mutex_unlock(&init_lock);
597 #ifdef JEMALLOC_THREADED_INIT
598 if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
599 /* Busy-wait until the initializing thread completes. */
601 malloc_mutex_unlock(&init_lock);
603 malloc_mutex_lock(&init_lock);
604 } while (malloc_initialized == false);
605 malloc_mutex_unlock(&init_lock);
609 malloc_initializer = INITIALIZER;
617 #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE))
618 /* Register fork handlers. */
619 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
620 jemalloc_postfork_child) != 0) {
621 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
627 if (opt_stats_print) {
628 /* Print statistics at exit. */
629 if (atexit(stats_print_atexit) != 0) {
630 malloc_write("<jemalloc>: Error in atexit()\n");
637 malloc_mutex_unlock(&init_lock);
642 malloc_mutex_unlock(&init_lock);
647 malloc_mutex_unlock(&init_lock);
656 if (config_tcache && tcache_boot0()) {
657 malloc_mutex_unlock(&init_lock);
662 malloc_mutex_unlock(&init_lock);
666 if (malloc_mutex_init(&arenas_lock))
670 * Create enough scaffolding to allow recursive allocation in
674 arenas = init_arenas;
675 memset(arenas, 0, sizeof(arena_t *) * narenas);
678 * Initialize one arena here. The rest are lazily created in
679 * choose_arena_hard().
682 if (arenas[0] == NULL) {
683 malloc_mutex_unlock(&init_lock);
687 /* Initialize allocation counters before any allocations can occur. */
688 if (config_stats && thread_allocated_tsd_boot()) {
689 malloc_mutex_unlock(&init_lock);
693 if (arenas_tsd_boot()) {
694 malloc_mutex_unlock(&init_lock);
698 if (config_tcache && tcache_boot1()) {
699 malloc_mutex_unlock(&init_lock);
703 if (config_fill && quarantine_boot()) {
704 malloc_mutex_unlock(&init_lock);
708 if (config_prof && prof_boot2()) {
709 malloc_mutex_unlock(&init_lock);
713 /* Get number of CPUs. */
714 malloc_mutex_unlock(&init_lock);
715 ncpus = malloc_ncpus();
716 malloc_mutex_lock(&init_lock);
719 malloc_mutex_unlock(&init_lock);
723 if (opt_narenas == 0) {
725 * For SMP systems, create more than one arena per CPU by
729 opt_narenas = ncpus << 2;
733 narenas = opt_narenas;
735 * Make sure that the arenas array can be allocated. In practice, this
736 * limit is enough to allow the allocator to function, but the ctl
737 * machinery will fail to allocate memory at far lower limits.
739 if (narenas > chunksize / sizeof(arena_t *)) {
740 narenas = chunksize / sizeof(arena_t *);
741 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
745 /* Allocate and initialize arenas. */
746 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
747 if (arenas == NULL) {
748 malloc_mutex_unlock(&init_lock);
752 * Zero the array. In practice, this should always be pre-zeroed,
753 * since it was just mmap()ed, but let's be sure.
755 memset(arenas, 0, sizeof(arena_t *) * narenas);
756 /* Copy the pointer to the one arena that was already initialized. */
757 arenas[0] = init_arenas[0];
759 malloc_initialized = true;
760 malloc_mutex_unlock(&init_lock);
765 * End initialization functions.
767 /******************************************************************************/
769 * Begin malloc(3)-compatible functions.
772 JEMALLOC_ATTR(malloc)
773 JEMALLOC_ATTR(visibility("default"))
775 je_malloc(size_t size)
779 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
789 if (config_prof && opt_prof) {
791 PROF_ALLOC_PREP(1, usize, cnt);
796 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
798 ret = imalloc(SMALL_MAXCLASS+1);
800 arena_prof_promoted(ret, usize);
804 if (config_stats || (config_valgrind && opt_valgrind))
811 if (config_xmalloc && opt_xmalloc) {
812 malloc_write("<jemalloc>: Error in malloc(): "
818 if (config_prof && opt_prof && ret != NULL)
819 prof_malloc(ret, usize, cnt);
820 if (config_stats && ret != NULL) {
821 assert(usize == isalloc(ret, config_prof));
822 thread_allocated_tsd_get()->allocated += usize;
824 UTRACE(0, size, ret);
825 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
829 JEMALLOC_ATTR(nonnull(1))
832 * Avoid any uncertainty as to how many backtrace frames to ignore in
835 JEMALLOC_ATTR(noinline)
838 imemalign(void **memptr, size_t alignment, size_t size,
839 size_t min_alignment)
844 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
846 assert(min_alignment != 0);
854 /* Make sure that alignment is a large enough power of 2. */
855 if (((alignment - 1) & alignment) != 0
856 || (alignment < min_alignment)) {
857 if (config_xmalloc && opt_xmalloc) {
858 malloc_write("<jemalloc>: Error allocating "
859 "aligned memory: invalid alignment\n");
867 usize = sa2u(size, alignment);
874 if (config_prof && opt_prof) {
875 PROF_ALLOC_PREP(2, usize, cnt);
880 if (prof_promote && (uintptr_t)cnt !=
881 (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
882 assert(sa2u(SMALL_MAXCLASS+1,
884 result = ipalloc(sa2u(SMALL_MAXCLASS+1,
885 alignment), alignment, false);
886 if (result != NULL) {
887 arena_prof_promoted(result,
891 result = ipalloc(usize, alignment,
896 result = ipalloc(usize, alignment, false);
899 if (result == NULL) {
900 if (config_xmalloc && opt_xmalloc) {
901 malloc_write("<jemalloc>: Error allocating aligned "
902 "memory: out of memory\n");
913 if (config_stats && result != NULL) {
914 assert(usize == isalloc(result, config_prof));
915 thread_allocated_tsd_get()->allocated += usize;
917 if (config_prof && opt_prof && result != NULL)
918 prof_malloc(result, usize, cnt);
919 UTRACE(0, size, result);
923 JEMALLOC_ATTR(nonnull(1))
924 JEMALLOC_ATTR(visibility("default"))
926 je_posix_memalign(void **memptr, size_t alignment, size_t size)
928 int ret = imemalign(memptr, alignment, size, sizeof(void *));
929 JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
930 config_prof), false);
934 JEMALLOC_ATTR(malloc)
935 JEMALLOC_ATTR(visibility("default"))
937 je_aligned_alloc(size_t alignment, size_t size)
942 if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
946 JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
951 JEMALLOC_ATTR(malloc)
952 JEMALLOC_ATTR(visibility("default"))
954 je_calloc(size_t num, size_t size)
959 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
967 num_size = num * size;
969 if (num == 0 || size == 0)
976 * Try to avoid division here. We know that it isn't possible to
977 * overflow during multiplication if neither operand uses any of the
978 * most significant half of the bits in a size_t.
980 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
981 && (num_size / size != num)) {
982 /* size_t overflow. */
987 if (config_prof && opt_prof) {
988 usize = s2u(num_size);
989 PROF_ALLOC_PREP(1, usize, cnt);
994 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
996 ret = icalloc(SMALL_MAXCLASS+1);
998 arena_prof_promoted(ret, usize);
1000 ret = icalloc(num_size);
1002 if (config_stats || (config_valgrind && opt_valgrind))
1003 usize = s2u(num_size);
1004 ret = icalloc(num_size);
1009 if (config_xmalloc && opt_xmalloc) {
1010 malloc_write("<jemalloc>: Error in calloc(): out of "
1017 if (config_prof && opt_prof && ret != NULL)
1018 prof_malloc(ret, usize, cnt);
1019 if (config_stats && ret != NULL) {
1020 assert(usize == isalloc(ret, config_prof));
1021 thread_allocated_tsd_get()->allocated += usize;
1023 UTRACE(0, num_size, ret);
1024 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
1028 JEMALLOC_ATTR(visibility("default"))
1030 je_realloc(void *ptr, size_t size)
1034 size_t old_size = 0;
1035 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1036 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
1037 prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
1041 /* realloc(ptr, 0) is equivalent to free(p). */
1043 old_size = isalloc(ptr, true);
1044 if (config_valgrind && opt_valgrind)
1045 old_rzsize = p2rz(ptr);
1046 } else if (config_stats) {
1047 old_size = isalloc(ptr, false);
1048 if (config_valgrind && opt_valgrind)
1049 old_rzsize = u2rz(old_size);
1050 } else if (config_valgrind && opt_valgrind) {
1051 old_size = isalloc(ptr, false);
1052 old_rzsize = u2rz(old_size);
1054 if (config_prof && opt_prof) {
1055 old_ctx = prof_ctx_get(ptr);
1066 assert(malloc_initialized || IS_INITIALIZER);
1069 old_size = isalloc(ptr, true);
1070 if (config_valgrind && opt_valgrind)
1071 old_rzsize = p2rz(ptr);
1072 } else if (config_stats) {
1073 old_size = isalloc(ptr, false);
1074 if (config_valgrind && opt_valgrind)
1075 old_rzsize = u2rz(old_size);
1076 } else if (config_valgrind && opt_valgrind) {
1077 old_size = isalloc(ptr, false);
1078 old_rzsize = u2rz(old_size);
1080 if (config_prof && opt_prof) {
1082 old_ctx = prof_ctx_get(ptr);
1083 PROF_ALLOC_PREP(1, usize, cnt);
1089 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
1090 usize <= SMALL_MAXCLASS) {
1091 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
1094 arena_prof_promoted(ret, usize);
1098 ret = iralloc(ptr, size, 0, 0, false, false);
1103 if (config_stats || (config_valgrind && opt_valgrind))
1105 ret = iralloc(ptr, size, 0, 0, false, false);
1110 if (config_xmalloc && opt_xmalloc) {
1111 malloc_write("<jemalloc>: Error in realloc(): "
1118 /* realloc(NULL, size) is equivalent to malloc(size). */
1119 if (config_prof && opt_prof)
1121 if (malloc_init()) {
1122 if (config_prof && opt_prof)
1126 if (config_prof && opt_prof) {
1128 PROF_ALLOC_PREP(1, usize, cnt);
1132 if (prof_promote && (uintptr_t)cnt !=
1133 (uintptr_t)1U && usize <=
1135 ret = imalloc(SMALL_MAXCLASS+1);
1137 arena_prof_promoted(ret,
1141 ret = imalloc(size);
1144 if (config_stats || (config_valgrind &&
1147 ret = imalloc(size);
1152 if (config_xmalloc && opt_xmalloc) {
1153 malloc_write("<jemalloc>: Error in realloc(): "
1162 if (config_prof && opt_prof)
1163 prof_realloc(ret, usize, cnt, old_size, old_ctx);
1164 if (config_stats && ret != NULL) {
1165 thread_allocated_t *ta;
1166 assert(usize == isalloc(ret, config_prof));
1167 ta = thread_allocated_tsd_get();
1168 ta->allocated += usize;
1169 ta->deallocated += old_size;
1171 UTRACE(ptr, size, ret);
1172 JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false);
1176 JEMALLOC_ATTR(visibility("default"))
1184 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1186 assert(malloc_initialized || IS_INITIALIZER);
1188 if (config_prof && opt_prof) {
1189 usize = isalloc(ptr, config_prof);
1190 prof_free(ptr, usize);
1191 } else if (config_stats || config_valgrind)
1192 usize = isalloc(ptr, config_prof);
1194 thread_allocated_tsd_get()->deallocated += usize;
1195 if (config_valgrind && opt_valgrind)
1198 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1203 * End malloc(3)-compatible functions.
1205 /******************************************************************************/
1207 * Begin non-standard override functions.
1210 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
1211 JEMALLOC_ATTR(malloc)
1212 JEMALLOC_ATTR(visibility("default"))
1214 je_memalign(size_t alignment, size_t size)
1216 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1217 imemalign(&ret, alignment, size, 1);
1218 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1223 #ifdef JEMALLOC_OVERRIDE_VALLOC
1224 JEMALLOC_ATTR(malloc)
1225 JEMALLOC_ATTR(visibility("default"))
1227 je_valloc(size_t size)
1229 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1230 imemalign(&ret, PAGE, size, 1);
1231 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1237 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1238 * #define je_malloc malloc
1240 #define malloc_is_malloc 1
1241 #define is_malloc_(a) malloc_is_ ## a
1242 #define is_malloc(a) is_malloc_(a)
1244 #if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
1246 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1247 * to inconsistently reference libc's malloc(3)-compatible functions
1248 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1250 * These definitions interpose hooks in glibc. The functions are actually
1251 * passed an extra argument for the caller return address, which will be
1254 JEMALLOC_ATTR(visibility("default"))
1255 void (* const __free_hook)(void *ptr) = je_free;
1257 JEMALLOC_ATTR(visibility("default"))
1258 void *(* const __malloc_hook)(size_t size) = je_malloc;
1260 JEMALLOC_ATTR(visibility("default"))
1261 void *(* const __realloc_hook)(void *ptr, size_t size) = je_realloc;
1263 JEMALLOC_ATTR(visibility("default"))
1264 void *(* const __memalign_hook)(size_t alignment, size_t size) = je_memalign;
1268 * End non-standard override functions.
1270 /******************************************************************************/
1272 * Begin non-standard functions.
1275 JEMALLOC_ATTR(visibility("default"))
1277 je_malloc_usable_size(const void *ptr)
1281 assert(malloc_initialized || IS_INITIALIZER);
1283 if (config_ivsalloc)
1284 ret = ivsalloc(ptr, config_prof);
1286 ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
1291 JEMALLOC_ATTR(visibility("default"))
1293 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1297 stats_print(write_cb, cbopaque, opts);
1300 JEMALLOC_ATTR(visibility("default"))
1302 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
1309 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1312 JEMALLOC_ATTR(visibility("default"))
1314 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
1320 return (ctl_nametomib(name, mibp, miblenp));
1323 JEMALLOC_ATTR(visibility("default"))
1325 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1326 void *newp, size_t newlen)
1332 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1336 * End non-standard functions.
1338 /******************************************************************************/
1340 * Begin experimental functions.
1342 #ifdef JEMALLOC_EXPERIMENTAL
1344 JEMALLOC_INLINE void *
1345 iallocm(size_t usize, size_t alignment, bool zero)
1348 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
1352 return (ipalloc(usize, alignment, zero));
1354 return (icalloc(usize));
1356 return (imalloc(usize));
1359 JEMALLOC_ATTR(nonnull(1))
1360 JEMALLOC_ATTR(visibility("default"))
1362 je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
1366 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1368 bool zero = flags & ALLOCM_ZERO;
1369 prof_thr_cnt_t *cnt;
1371 assert(ptr != NULL);
1377 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1381 if (config_prof && opt_prof) {
1382 PROF_ALLOC_PREP(1, usize, cnt);
1385 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
1387 size_t usize_promoted = (alignment == 0) ?
1388 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
1390 assert(usize_promoted != 0);
1391 p = iallocm(usize_promoted, alignment, zero);
1394 arena_prof_promoted(p, usize);
1396 p = iallocm(usize, alignment, zero);
1400 prof_malloc(p, usize, cnt);
1402 p = iallocm(usize, alignment, zero);
1411 assert(usize == isalloc(p, config_prof));
1412 thread_allocated_tsd_get()->allocated += usize;
1415 JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
1416 return (ALLOCM_SUCCESS);
1418 if (config_xmalloc && opt_xmalloc) {
1419 malloc_write("<jemalloc>: Error in allocm(): "
1425 return (ALLOCM_ERR_OOM);
1428 JEMALLOC_ATTR(nonnull(1))
1429 JEMALLOC_ATTR(visibility("default"))
1431 je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
1436 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1437 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1439 bool zero = flags & ALLOCM_ZERO;
1440 bool no_move = flags & ALLOCM_NO_MOVE;
1441 prof_thr_cnt_t *cnt;
1443 assert(ptr != NULL);
1444 assert(*ptr != NULL);
1446 assert(SIZE_T_MAX - size >= extra);
1447 assert(malloc_initialized || IS_INITIALIZER);
1450 if (config_prof && opt_prof) {
1452 * usize isn't knowable before iralloc() returns when extra is
1453 * non-zero. Therefore, compute its maximum possible value and
1454 * use that in PROF_ALLOC_PREP() to decide whether to capture a
1455 * backtrace. prof_realloc() will use the actual usize to
1456 * decide whether to sample.
1458 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1459 sa2u(size+extra, alignment);
1460 prof_ctx_t *old_ctx = prof_ctx_get(p);
1461 old_size = isalloc(p, true);
1462 if (config_valgrind && opt_valgrind)
1463 old_rzsize = p2rz(p);
1464 PROF_ALLOC_PREP(1, max_usize, cnt);
1468 * Use minimum usize to determine whether promotion may happen.
1470 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1471 && ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
1472 <= SMALL_MAXCLASS) {
1473 q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1474 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
1475 alignment, zero, no_move);
1478 if (max_usize < PAGE) {
1480 arena_prof_promoted(q, usize);
1482 usize = isalloc(q, config_prof);
1484 q = iralloc(p, size, extra, alignment, zero, no_move);
1487 usize = isalloc(q, config_prof);
1489 prof_realloc(q, usize, cnt, old_size, old_ctx);
1494 old_size = isalloc(p, false);
1495 if (config_valgrind && opt_valgrind)
1496 old_rzsize = u2rz(old_size);
1497 } else if (config_valgrind && opt_valgrind) {
1498 old_size = isalloc(p, false);
1499 old_rzsize = u2rz(old_size);
1501 q = iralloc(p, size, extra, alignment, zero, no_move);
1505 usize = isalloc(q, config_prof);
1506 if (rsize != NULL) {
1507 if (config_stats == false)
1508 usize = isalloc(q, config_prof);
1515 thread_allocated_t *ta;
1516 ta = thread_allocated_tsd_get();
1517 ta->allocated += usize;
1518 ta->deallocated += old_size;
1521 JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero);
1522 return (ALLOCM_SUCCESS);
1526 return (ALLOCM_ERR_NOT_MOVED);
1529 if (config_xmalloc && opt_xmalloc) {
1530 malloc_write("<jemalloc>: Error in rallocm(): "
1535 return (ALLOCM_ERR_OOM);
1538 JEMALLOC_ATTR(nonnull(1))
1539 JEMALLOC_ATTR(visibility("default"))
1541 je_sallocm(const void *ptr, size_t *rsize, int flags)
1545 assert(malloc_initialized || IS_INITIALIZER);
1547 if (config_ivsalloc)
1548 sz = ivsalloc(ptr, config_prof);
1550 assert(ptr != NULL);
1551 sz = isalloc(ptr, config_prof);
1553 assert(rsize != NULL);
1556 return (ALLOCM_SUCCESS);
1559 JEMALLOC_ATTR(nonnull(1))
1560 JEMALLOC_ATTR(visibility("default"))
1562 je_dallocm(void *ptr, int flags)
1565 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1567 assert(ptr != NULL);
1568 assert(malloc_initialized || IS_INITIALIZER);
1571 if (config_stats || config_valgrind)
1572 usize = isalloc(ptr, config_prof);
1573 if (config_prof && opt_prof) {
1574 if (config_stats == false && config_valgrind == false)
1575 usize = isalloc(ptr, config_prof);
1576 prof_free(ptr, usize);
1579 thread_allocated_tsd_get()->deallocated += usize;
1580 if (config_valgrind && opt_valgrind)
1583 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1585 return (ALLOCM_SUCCESS);
1588 JEMALLOC_ATTR(visibility("default"))
1590 je_nallocm(size_t *rsize, size_t size, int flags)
1593 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1599 return (ALLOCM_ERR_OOM);
1601 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1603 return (ALLOCM_ERR_OOM);
1607 return (ALLOCM_SUCCESS);
1612 * End experimental functions.
1614 /******************************************************************************/
1616 * The following functions are used by threading libraries for protection of
1617 * malloc during fork().
1620 #ifndef JEMALLOC_MUTEX_INIT_CB
1622 jemalloc_prefork(void)
1624 JEMALLOC_ATTR(visibility("default"))
1626 _malloc_prefork(void)
1631 /* Acquire all mutexes in a safe order. */
1632 malloc_mutex_prefork(&arenas_lock);
1633 for (i = 0; i < narenas; i++) {
1634 if (arenas[i] != NULL)
1635 arena_prefork(arenas[i]);
1639 chunk_dss_prefork();
1642 #ifndef JEMALLOC_MUTEX_INIT_CB
1644 jemalloc_postfork_parent(void)
1646 JEMALLOC_ATTR(visibility("default"))
1648 _malloc_postfork(void)
1653 /* Release all mutexes, now that fork() has completed. */
1654 chunk_dss_postfork_parent();
1655 huge_postfork_parent();
1656 base_postfork_parent();
1657 for (i = 0; i < narenas; i++) {
1658 if (arenas[i] != NULL)
1659 arena_postfork_parent(arenas[i]);
1661 malloc_mutex_postfork_parent(&arenas_lock);
1665 jemalloc_postfork_child(void)
1669 /* Release all mutexes, now that fork() has completed. */
1670 chunk_dss_postfork_child();
1671 huge_postfork_child();
1672 base_postfork_child();
1673 for (i = 0; i < narenas; i++) {
1674 if (arenas[i] != NULL)
1675 arena_postfork_child(arenas[i]);
1677 malloc_mutex_postfork_child(&arenas_lock);
1680 /******************************************************************************/
1682 * The following functions are used for TLS allocation/deallocation in static
1683 * binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
1684 * is that these avoid accessing TLS variables.
1688 a0alloc(size_t size, bool zero)
1697 if (size <= arena_maxclass)
1698 return (arena_malloc(arenas[0], size, zero, false));
1700 return (huge_malloc(size, zero));
1704 a0malloc(size_t size)
1707 return (a0alloc(size, false));
1711 a0calloc(size_t num, size_t size)
1714 return (a0alloc(num * size, true));
1720 arena_chunk_t *chunk;
1725 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1727 arena_dalloc(chunk->arena, chunk, ptr, false);
1729 huge_dalloc(ptr, true);
1732 /******************************************************************************/