2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */
8 const char *__malloc_options_1_0 = NULL;
9 __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
11 /* Runtime configuration options. */
12 const char *je_malloc_conf JEMALLOC_ATTR(weak);
20 const char *opt_junk =
21 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
28 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
35 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
42 size_t opt_quarantine = ZU(0);
43 bool opt_redzone = false;
44 bool opt_utrace = false;
45 bool opt_xmalloc = false;
46 bool opt_zero = false;
47 size_t opt_narenas = 0;
49 /* Initialized to true if the process is running inside Valgrind. */
54 /* Protects arenas initialization (arenas, narenas_total). */
55 static malloc_mutex_t arenas_lock;
57 * Arenas that are used to service external requests. Not all elements of the
58 * arenas array are necessarily used; arenas are created lazily as needed.
60 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
61 * arenas. arenas[narenas_auto..narenas_total) are only used if the application
62 * takes some action to create them and allocate from them.
64 static arena_t **arenas;
65 static unsigned narenas_total;
66 static arena_t *a0; /* arenas[0]; read-only after initialization. */
67 static unsigned narenas_auto; /* Read-only after initialization. */
70 malloc_init_uninitialized = 3,
71 malloc_init_a0_initialized = 2,
72 malloc_init_recursible = 1,
73 malloc_init_initialized = 0 /* Common case --> jnz. */
75 static malloc_init_t malloc_init_state = malloc_init_uninitialized;
77 JEMALLOC_ALIGNED(CACHELINE)
78 const size_t index2size_tab[NSIZES] = {
79 #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
80 ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
85 JEMALLOC_ALIGNED(CACHELINE)
86 const uint8_t size2index_tab[] = {
88 #warning "Dangerous LG_TINY_MIN"
90 #elif LG_TINY_MIN == 1
91 #warning "Dangerous LG_TINY_MIN"
93 #elif LG_TINY_MIN == 2
94 #warning "Dangerous LG_TINY_MIN"
96 #elif LG_TINY_MIN == 3
98 #elif LG_TINY_MIN == 4
100 #elif LG_TINY_MIN == 5
102 #elif LG_TINY_MIN == 6
104 #elif LG_TINY_MIN == 7
106 #elif LG_TINY_MIN == 8
108 #elif LG_TINY_MIN == 9
110 #elif LG_TINY_MIN == 10
112 #elif LG_TINY_MIN == 11
115 #error "Unsupported LG_TINY_MIN"
118 #define S2B_1(i) S2B_0(i) S2B_0(i)
121 #define S2B_2(i) S2B_1(i) S2B_1(i)
124 #define S2B_3(i) S2B_2(i) S2B_2(i)
127 #define S2B_4(i) S2B_3(i) S2B_3(i)
130 #define S2B_5(i) S2B_4(i) S2B_4(i)
133 #define S2B_6(i) S2B_5(i) S2B_5(i)
136 #define S2B_7(i) S2B_6(i) S2B_6(i)
139 #define S2B_8(i) S2B_7(i) S2B_7(i)
142 #define S2B_9(i) S2B_8(i) S2B_8(i)
145 #define S2B_10(i) S2B_9(i) S2B_9(i)
148 #define S2B_11(i) S2B_10(i) S2B_10(i)
151 #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
152 S2B_##lg_delta_lookup(index)
167 #ifdef JEMALLOC_THREADED_INIT
168 /* Used to let the initializing thread recursively allocate. */
169 # define NO_INITIALIZER ((unsigned long)0)
170 # define INITIALIZER pthread_self()
171 # define IS_INITIALIZER (malloc_initializer == pthread_self())
172 static pthread_t malloc_initializer = NO_INITIALIZER;
174 # define NO_INITIALIZER false
175 # define INITIALIZER true
176 # define IS_INITIALIZER malloc_initializer
177 static bool malloc_initializer = NO_INITIALIZER;
180 /* Used to avoid initialization races. */
182 #if _WIN32_WINNT >= 0x0600
183 static malloc_mutex_t init_lock = SRWLOCK_INIT;
185 static malloc_mutex_t init_lock;
186 static bool init_lock_initialized = false;
188 JEMALLOC_ATTR(constructor)
190 _init_init_lock(void)
193 /* If another constructor in the same binary is using mallctl to
194 * e.g. setup chunk hooks, it may end up running before this one,
195 * and malloc_init_hard will crash trying to lock the uninitialized
196 * lock. So we force an initialization of the lock in
197 * malloc_init_hard as well. We don't try to care about atomicity
198 * of the accessed to the init_lock_initialized boolean, since it
199 * really only matters early in the process creation, before any
200 * separate thread normally starts doing anything. */
201 if (!init_lock_initialized)
202 malloc_mutex_init(&init_lock);
203 init_lock_initialized = true;
207 # pragma section(".CRT$XCU", read)
208 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
209 static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
213 static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
217 void *p; /* Input pointer (as in realloc(p, s)). */
218 size_t s; /* Request size. */
219 void *r; /* Result pointer. */
222 #ifdef JEMALLOC_UTRACE
223 # define UTRACE(a, b, c) do { \
224 if (unlikely(opt_utrace)) { \
225 int utrace_serrno = errno; \
226 malloc_utrace_t ut; \
230 utrace(&ut, sizeof(ut)); \
231 errno = utrace_serrno; \
235 # define UTRACE(a, b, c)
238 /******************************************************************************/
240 * Function prototypes for static functions that are referenced prior to
244 static bool malloc_init_hard_a0(void);
245 static bool malloc_init_hard(void);
247 /******************************************************************************/
249 * Begin miscellaneous support functions.
252 JEMALLOC_ALWAYS_INLINE_C bool
253 malloc_initialized(void)
256 return (malloc_init_state == malloc_init_initialized);
259 JEMALLOC_ALWAYS_INLINE_C void
260 malloc_thread_init(void)
264 * TSD initialization can't be safely done as a side effect of
265 * deallocation, because it is possible for a thread to do nothing but
266 * deallocate its TLS data via free(), in which case writing to TLS
267 * would cause write-after-free memory corruption. The quarantine
268 * facility *only* gets used as a side effect of deallocation, so make
269 * a best effort attempt at initializing its TSD by hooking all
272 if (config_fill && unlikely(opt_quarantine))
273 quarantine_alloc_hook();
276 JEMALLOC_ALWAYS_INLINE_C bool
280 if (unlikely(malloc_init_state == malloc_init_uninitialized))
281 return (malloc_init_hard_a0());
285 JEMALLOC_ALWAYS_INLINE_C bool
289 if (unlikely(!malloc_initialized()) && malloc_init_hard())
291 malloc_thread_init();
297 * The a0*() functions are used instead of i[mcd]alloc() in situations that
298 * cannot tolerate TLS variable access.
310 a0ialloc(size_t size, bool zero, bool is_metadata)
313 if (unlikely(malloc_init_a0()))
316 return (iallocztm(NULL, size, zero, false, is_metadata, a0get()));
320 a0idalloc(void *ptr, bool is_metadata)
323 idalloctm(NULL, ptr, false, is_metadata);
327 a0malloc(size_t size)
330 return (a0ialloc(size, false, true));
337 a0idalloc(ptr, true);
341 * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
342 * situations that cannot tolerate TLS variable access (TLS allocation and very
343 * early internal data structure initialization).
347 bootstrap_malloc(size_t size)
350 if (unlikely(size == 0))
353 return (a0ialloc(size, false, false));
357 bootstrap_calloc(size_t num, size_t size)
361 num_size = num * size;
362 if (unlikely(num_size == 0)) {
363 assert(num == 0 || size == 0);
367 return (a0ialloc(num_size, true, false));
371 bootstrap_free(void *ptr)
374 if (unlikely(ptr == NULL))
377 a0idalloc(ptr, false);
380 /* Create a new arena and insert it into the arenas array at index ind. */
382 arena_init_locked(unsigned ind)
386 /* Expand arenas if necessary. */
387 assert(ind <= narenas_total);
388 if (ind > MALLOCX_ARENA_MAX)
390 if (ind == narenas_total) {
391 unsigned narenas_new = narenas_total + 1;
392 arena_t **arenas_new =
393 (arena_t **)a0malloc(CACHELINE_CEILING(narenas_new *
395 if (arenas_new == NULL)
397 memcpy(arenas_new, arenas, narenas_total * sizeof(arena_t *));
398 arenas_new[ind] = NULL;
400 * Deallocate only if arenas came from a0malloc() (not
403 if (narenas_total != narenas_auto)
406 narenas_total = narenas_new;
410 * Another thread may have already initialized arenas[ind] if it's an
415 assert(ind < narenas_auto);
419 /* Actually initialize the arena. */
420 arena = arenas[ind] = arena_new(ind);
425 arena_init(unsigned ind)
429 malloc_mutex_lock(&arenas_lock);
430 arena = arena_init_locked(ind);
431 malloc_mutex_unlock(&arenas_lock);
436 narenas_total_get(void)
440 malloc_mutex_lock(&arenas_lock);
441 narenas = narenas_total;
442 malloc_mutex_unlock(&arenas_lock);
448 arena_bind_locked(tsd_t *tsd, unsigned ind)
455 if (tsd_nominal(tsd))
456 tsd_arena_set(tsd, arena);
460 arena_bind(tsd_t *tsd, unsigned ind)
463 malloc_mutex_lock(&arenas_lock);
464 arena_bind_locked(tsd, ind);
465 malloc_mutex_unlock(&arenas_lock);
469 arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
471 arena_t *oldarena, *newarena;
473 malloc_mutex_lock(&arenas_lock);
474 oldarena = arenas[oldind];
475 newarena = arenas[newind];
476 oldarena->nthreads--;
477 newarena->nthreads++;
478 malloc_mutex_unlock(&arenas_lock);
479 tsd_arena_set(tsd, newarena);
483 arena_nbound(unsigned ind)
487 malloc_mutex_lock(&arenas_lock);
488 nthreads = arenas[ind]->nthreads;
489 malloc_mutex_unlock(&arenas_lock);
494 arena_unbind(tsd_t *tsd, unsigned ind)
498 malloc_mutex_lock(&arenas_lock);
501 malloc_mutex_unlock(&arenas_lock);
502 tsd_arena_set(tsd, NULL);
506 arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
509 arena_t **arenas_cache = tsd_arenas_cache_get(tsd);
510 unsigned narenas_cache = tsd_narenas_cache_get(tsd);
511 unsigned narenas_actual = narenas_total_get();
513 /* Deallocate old cache if it's too small. */
514 if (arenas_cache != NULL && narenas_cache < narenas_actual) {
515 a0dalloc(arenas_cache);
518 tsd_arenas_cache_set(tsd, arenas_cache);
519 tsd_narenas_cache_set(tsd, narenas_cache);
522 /* Allocate cache if it's missing. */
523 if (arenas_cache == NULL) {
524 bool *arenas_cache_bypassp = tsd_arenas_cache_bypassp_get(tsd);
525 assert(ind < narenas_actual || !init_if_missing);
526 narenas_cache = (ind < narenas_actual) ? narenas_actual : ind+1;
528 if (tsd_nominal(tsd) && !*arenas_cache_bypassp) {
529 *arenas_cache_bypassp = true;
530 arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) *
532 *arenas_cache_bypassp = false;
534 if (arenas_cache == NULL) {
536 * This function must always tell the truth, even if
537 * it's slow, so don't let OOM, thread cleanup (note
538 * tsd_nominal check), nor recursive allocation
539 * avoidance (note arenas_cache_bypass check) get in the
542 if (ind >= narenas_actual)
544 malloc_mutex_lock(&arenas_lock);
546 malloc_mutex_unlock(&arenas_lock);
549 assert(tsd_nominal(tsd) && !*arenas_cache_bypassp);
550 tsd_arenas_cache_set(tsd, arenas_cache);
551 tsd_narenas_cache_set(tsd, narenas_cache);
555 * Copy to cache. It's possible that the actual number of arenas has
556 * increased since narenas_total_get() was called above, but that causes
557 * no correctness issues unless two threads concurrently execute the
558 * arenas.extend mallctl, which we trust mallctl synchronization to
561 malloc_mutex_lock(&arenas_lock);
562 memcpy(arenas_cache, arenas, sizeof(arena_t *) * narenas_actual);
563 malloc_mutex_unlock(&arenas_lock);
564 if (narenas_cache > narenas_actual) {
565 memset(&arenas_cache[narenas_actual], 0, sizeof(arena_t *) *
566 (narenas_cache - narenas_actual));
569 /* Read the refreshed cache, and init the arena if necessary. */
570 arena = arenas_cache[ind];
571 if (init_if_missing && arena == NULL)
572 arena = arenas_cache[ind] = arena_init(ind);
576 /* Slow path, called only by arena_choose(). */
578 arena_choose_hard(tsd_t *tsd)
582 if (narenas_auto > 1) {
583 unsigned i, choose, first_null;
586 first_null = narenas_auto;
587 malloc_mutex_lock(&arenas_lock);
588 assert(a0get() != NULL);
589 for (i = 1; i < narenas_auto; i++) {
590 if (arenas[i] != NULL) {
592 * Choose the first arena that has the lowest
593 * number of threads assigned to it.
595 if (arenas[i]->nthreads <
596 arenas[choose]->nthreads)
598 } else if (first_null == narenas_auto) {
600 * Record the index of the first uninitialized
601 * arena, in case all extant arenas are in use.
603 * NB: It is possible for there to be
604 * discontinuities in terms of initialized
605 * versus uninitialized arenas, due to the
606 * "thread.arena" mallctl.
612 if (arenas[choose]->nthreads == 0
613 || first_null == narenas_auto) {
615 * Use an unloaded arena, or the least loaded arena if
616 * all arenas are already initialized.
618 ret = arenas[choose];
620 /* Initialize a new arena. */
622 ret = arena_init_locked(choose);
624 malloc_mutex_unlock(&arenas_lock);
628 arena_bind_locked(tsd, choose);
629 malloc_mutex_unlock(&arenas_lock);
639 thread_allocated_cleanup(tsd_t *tsd)
646 thread_deallocated_cleanup(tsd_t *tsd)
653 arena_cleanup(tsd_t *tsd)
657 arena = tsd_arena_get(tsd);
659 arena_unbind(tsd, arena->ind);
663 arenas_cache_cleanup(tsd_t *tsd)
665 arena_t **arenas_cache;
667 arenas_cache = tsd_arenas_cache_get(tsd);
668 if (arenas_cache != NULL) {
669 tsd_arenas_cache_set(tsd, NULL);
670 a0dalloc(arenas_cache);
675 narenas_cache_cleanup(tsd_t *tsd)
682 arenas_cache_bypass_cleanup(tsd_t *tsd)
689 stats_print_atexit(void)
692 if (config_tcache && config_stats) {
696 * Merge stats from extant threads. This is racy, since
697 * individual threads do not lock when recording tcache stats
698 * events. As a consequence, the final stats may be slightly
699 * out of date by the time they are reported, if other threads
700 * continue to allocate.
702 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
703 arena_t *arena = arenas[i];
708 * tcache_stats_merge() locks bins, so if any
709 * code is introduced that acquires both arena
710 * and bin locks in the opposite order,
711 * deadlocks may result.
713 malloc_mutex_lock(&arena->lock);
714 ql_foreach(tcache, &arena->tcache_ql, link) {
715 tcache_stats_merge(tcache, arena);
717 malloc_mutex_unlock(&arena->lock);
721 je_malloc_stats_print(NULL, NULL, NULL);
725 * End miscellaneous support functions.
727 /******************************************************************************/
729 * Begin initialization functions.
732 #ifndef JEMALLOC_HAVE_SECURE_GETENV
734 secure_getenv(const char *name)
737 # ifdef JEMALLOC_HAVE_ISSETUGID
738 if (issetugid() != 0)
741 return (getenv(name));
753 result = si.dwNumberOfProcessors;
755 result = sysconf(_SC_NPROCESSORS_ONLN);
757 return ((result == -1) ? 1 : (unsigned)result);
761 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
762 char const **v_p, size_t *vlen_p)
765 const char *opts = *opts_p;
769 for (accept = false; !accept;) {
771 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
772 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
773 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
774 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
776 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
777 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
778 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
779 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
781 case '0': case '1': case '2': case '3': case '4': case '5':
782 case '6': case '7': case '8': case '9':
788 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
793 if (opts != *opts_p) {
794 malloc_write("<jemalloc>: Conf string ends "
799 malloc_write("<jemalloc>: Malformed conf string\n");
804 for (accept = false; !accept;) {
809 * Look ahead one character here, because the next time
810 * this function is called, it will assume that end of
811 * input has been cleanly reached if no input remains,
812 * but we have optimistically already consumed the
813 * comma if one exists.
816 malloc_write("<jemalloc>: Conf string ends "
819 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
823 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
837 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
841 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
846 malloc_conf_init(void)
849 char buf[PATH_MAX + 1];
850 const char *opts, *k, *v;
854 * Automatically configure valgrind before processing options. The
855 * valgrind option remains in jemalloc 3.x for compatibility reasons.
857 if (config_valgrind) {
858 in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
859 if (config_fill && unlikely(in_valgrind)) {
861 opt_junk_alloc = false;
862 opt_junk_free = false;
864 opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
867 if (config_tcache && unlikely(in_valgrind))
871 for (i = 0; i < 3; i++) {
872 /* Get runtime configuration. */
875 if (je_malloc_conf != NULL) {
877 * Use options that were compiled into the
880 opts = je_malloc_conf;
882 /* No configuration specified. */
890 int saved_errno = errno;
891 const char *linkname =
892 # ifdef JEMALLOC_PREFIX
893 "/etc/"JEMALLOC_PREFIX"malloc.conf"
900 * Try to use the contents of the "/etc/malloc.conf"
901 * symbolic link's name.
903 linklen = readlink(linkname, buf, sizeof(buf) - 1);
905 /* No configuration specified. */
908 set_errno(saved_errno);
915 const char *envname =
916 #ifdef JEMALLOC_PREFIX
917 JEMALLOC_CPREFIX"MALLOC_CONF"
923 if ((opts = secure_getenv(envname)) != NULL) {
925 * Do nothing; opts is already initialized to
926 * the value of the MALLOC_CONF environment
930 /* No configuration specified. */
941 while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
943 #define CONF_MATCH(n) \
944 (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
945 #define CONF_MATCH_VALUE(n) \
946 (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
947 #define CONF_HANDLE_BOOL(o, n, cont) \
948 if (CONF_MATCH(n)) { \
949 if (CONF_MATCH_VALUE("true")) \
951 else if (CONF_MATCH_VALUE("false")) \
955 "Invalid conf value", \
961 #define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
962 if (CONF_MATCH(n)) { \
967 um = malloc_strtoumax(v, &end, 0); \
968 if (get_errno() != 0 || (uintptr_t)end -\
969 (uintptr_t)v != vlen) { \
971 "Invalid conf value", \
974 if ((min) != 0 && um < (min)) \
976 else if (um > (max)) \
981 if (((min) != 0 && um < (min)) \
992 #define CONF_HANDLE_SSIZE_T(o, n, min, max) \
993 if (CONF_MATCH(n)) { \
998 l = strtol(v, &end, 0); \
999 if (get_errno() != 0 || (uintptr_t)end -\
1000 (uintptr_t)v != vlen) { \
1001 malloc_conf_error( \
1002 "Invalid conf value", \
1003 k, klen, v, vlen); \
1004 } else if (l < (ssize_t)(min) || l > \
1006 malloc_conf_error( \
1007 "Out-of-range conf value", \
1008 k, klen, v, vlen); \
1013 #define CONF_HANDLE_CHAR_P(o, n, d) \
1014 if (CONF_MATCH(n)) { \
1015 size_t cpylen = (vlen <= \
1016 sizeof(o)-1) ? vlen : \
1018 strncpy(o, v, cpylen); \
1023 CONF_HANDLE_BOOL(opt_abort, "abort", true)
1025 * Chunks always require at least one header page,
1026 * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and
1027 * possibly an additional page in the presence of
1028 * redzones. In order to simplify options processing,
1029 * use a conservative bound that accommodates all these
1032 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
1033 LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
1034 (sizeof(size_t) << 3) - 1, true)
1035 if (strncmp("dss", k, klen) == 0) {
1038 for (i = 0; i < dss_prec_limit; i++) {
1039 if (strncmp(dss_prec_names[i], v, vlen)
1041 if (chunk_dss_prec_set(i)) {
1043 "Error setting dss",
1054 malloc_conf_error("Invalid conf value",
1059 CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
1061 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
1062 -1, (sizeof(size_t) << 3) - 1)
1063 CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
1065 if (CONF_MATCH("junk")) {
1066 if (CONF_MATCH_VALUE("true")) {
1068 opt_junk_alloc = opt_junk_free =
1070 } else if (CONF_MATCH_VALUE("false")) {
1072 opt_junk_alloc = opt_junk_free =
1074 } else if (CONF_MATCH_VALUE("alloc")) {
1076 opt_junk_alloc = true;
1077 opt_junk_free = false;
1078 } else if (CONF_MATCH_VALUE("free")) {
1080 opt_junk_alloc = false;
1081 opt_junk_free = true;
1084 "Invalid conf value", k,
1089 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
1090 0, SIZE_T_MAX, false)
1091 CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
1092 CONF_HANDLE_BOOL(opt_zero, "zero", true)
1094 if (config_utrace) {
1095 CONF_HANDLE_BOOL(opt_utrace, "utrace", true)
1097 if (config_xmalloc) {
1098 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
1100 if (config_tcache) {
1101 CONF_HANDLE_BOOL(opt_tcache, "tcache",
1102 !config_valgrind || !in_valgrind)
1103 if (CONF_MATCH("tcache")) {
1104 assert(config_valgrind && in_valgrind);
1108 "tcache cannot be enabled "
1109 "while running inside Valgrind",
1114 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
1115 "lg_tcache_max", -1,
1116 (sizeof(size_t) << 3) - 1)
1119 CONF_HANDLE_BOOL(opt_prof, "prof", true)
1120 CONF_HANDLE_CHAR_P(opt_prof_prefix,
1121 "prof_prefix", "jeprof")
1122 CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
1124 CONF_HANDLE_BOOL(opt_prof_thread_active_init,
1125 "prof_thread_active_init", true)
1126 CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
1127 "lg_prof_sample", 0,
1128 (sizeof(uint64_t) << 3) - 1, true)
1129 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
1131 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
1132 "lg_prof_interval", -1,
1133 (sizeof(uint64_t) << 3) - 1)
1134 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump",
1136 CONF_HANDLE_BOOL(opt_prof_final, "prof_final",
1138 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak",
1141 malloc_conf_error("Invalid conf pair", k, klen, v,
1144 #undef CONF_HANDLE_BOOL
1145 #undef CONF_HANDLE_SIZE_T
1146 #undef CONF_HANDLE_SSIZE_T
1147 #undef CONF_HANDLE_CHAR_P
1152 /* init_lock must be held. */
1154 malloc_init_hard_needed(void)
1157 if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
1158 malloc_init_recursible)) {
1160 * Another thread initialized the allocator before this one
1161 * acquired init_lock, or this thread is the initializing
1162 * thread, and it is recursively allocating.
1166 #ifdef JEMALLOC_THREADED_INIT
1167 if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
1168 /* Busy-wait until the initializing thread completes. */
1170 malloc_mutex_unlock(&init_lock);
1172 malloc_mutex_lock(&init_lock);
1173 } while (!malloc_initialized());
1180 /* init_lock must be held. */
1182 malloc_init_hard_a0_locked(void)
1185 malloc_initializer = INITIALIZER;
1190 if (opt_stats_print) {
1191 /* Print statistics at exit. */
1192 if (atexit(stats_print_atexit) != 0) {
1193 malloc_write("<jemalloc>: Error in atexit()\n");
1208 if (config_tcache && tcache_boot())
1210 if (malloc_mutex_init(&arenas_lock))
1213 * Create enough scaffolding to allow recursive allocation in
1216 narenas_total = narenas_auto = 1;
1218 memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
1220 * Initialize one arena here. The rest are lazily created in
1221 * arena_choose_hard().
1223 if (arena_init(0) == NULL)
1225 malloc_init_state = malloc_init_a0_initialized;
1230 malloc_init_hard_a0(void)
1234 malloc_mutex_lock(&init_lock);
1235 ret = malloc_init_hard_a0_locked();
1236 malloc_mutex_unlock(&init_lock);
1241 * Initialize data structures which may trigger recursive allocation.
1243 * init_lock must be held.
1246 malloc_init_hard_recursible(void)
1249 malloc_init_state = malloc_init_recursible;
1250 malloc_mutex_unlock(&init_lock);
1252 ncpus = malloc_ncpus();
1254 #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
1255 && !defined(_WIN32) && !defined(__native_client__))
1256 /* LinuxThreads's pthread_atfork() allocates. */
1257 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
1258 jemalloc_postfork_child) != 0) {
1259 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1264 malloc_mutex_lock(&init_lock);
1267 /* init_lock must be held. */
1269 malloc_init_hard_finish(void)
1275 if (opt_narenas == 0) {
1277 * For SMP systems, create more than one arena per CPU by
1281 opt_narenas = ncpus << 2;
1285 narenas_auto = opt_narenas;
1287 * Make sure that the arenas array can be allocated. In practice, this
1288 * limit is enough to allow the allocator to function, but the ctl
1289 * machinery will fail to allocate memory at far lower limits.
1291 if (narenas_auto > chunksize / sizeof(arena_t *)) {
1292 narenas_auto = chunksize / sizeof(arena_t *);
1293 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
1296 narenas_total = narenas_auto;
1298 /* Allocate and initialize arenas. */
1299 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
1303 * Zero the array. In practice, this should always be pre-zeroed,
1304 * since it was just mmap()ed, but let's be sure.
1306 memset(arenas, 0, sizeof(arena_t *) * narenas_total);
1307 /* Copy the pointer to the one arena that was already initialized. */
1310 malloc_init_state = malloc_init_initialized;
1315 malloc_init_hard(void)
1318 #if defined(_WIN32) && _WIN32_WINNT < 0x0600
1321 malloc_mutex_lock(&init_lock);
1322 if (!malloc_init_hard_needed()) {
1323 malloc_mutex_unlock(&init_lock);
1327 if (malloc_init_state != malloc_init_a0_initialized &&
1328 malloc_init_hard_a0_locked()) {
1329 malloc_mutex_unlock(&init_lock);
1332 if (malloc_tsd_boot0()) {
1333 malloc_mutex_unlock(&init_lock);
1336 if (config_prof && prof_boot2()) {
1337 malloc_mutex_unlock(&init_lock);
1341 malloc_init_hard_recursible();
1343 if (malloc_init_hard_finish()) {
1344 malloc_mutex_unlock(&init_lock);
1348 malloc_mutex_unlock(&init_lock);
1354 * End initialization functions.
1356 /******************************************************************************/
1358 * Begin malloc(3)-compatible functions.
1362 imalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
1368 if (usize <= SMALL_MAXCLASS) {
1369 p = imalloc(tsd, LARGE_MINCLASS);
1372 arena_prof_promoted(p, usize);
1374 p = imalloc(tsd, usize);
1379 JEMALLOC_ALWAYS_INLINE_C void *
1380 imalloc_prof(tsd_t *tsd, size_t usize)
1385 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
1386 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1387 p = imalloc_prof_sample(tsd, usize, tctx);
1389 p = imalloc(tsd, usize);
1390 if (unlikely(p == NULL)) {
1391 prof_alloc_rollback(tsd, tctx, true);
1394 prof_malloc(p, usize, tctx);
1399 JEMALLOC_ALWAYS_INLINE_C void *
1400 imalloc_body(size_t size, tsd_t **tsd, size_t *usize)
1403 if (unlikely(malloc_init()))
1407 if (config_prof && opt_prof) {
1409 if (unlikely(*usize == 0))
1411 return (imalloc_prof(*tsd, *usize));
1414 if (config_stats || (config_valgrind && unlikely(in_valgrind)))
1416 return (imalloc(*tsd, size));
1419 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1420 void JEMALLOC_NOTHROW *
1421 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
1422 je_malloc(size_t size)
1426 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1431 ret = imalloc_body(size, &tsd, &usize);
1432 if (unlikely(ret == NULL)) {
1433 if (config_xmalloc && unlikely(opt_xmalloc)) {
1434 malloc_write("<jemalloc>: Error in malloc(): "
1440 if (config_stats && likely(ret != NULL)) {
1441 assert(usize == isalloc(ret, config_prof));
1442 *tsd_thread_allocatedp_get(tsd) += usize;
1444 UTRACE(0, size, ret);
1445 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
1450 imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
1457 if (usize <= SMALL_MAXCLASS) {
1458 assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS);
1459 p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
1462 arena_prof_promoted(p, usize);
1464 p = ipalloc(tsd, usize, alignment, false);
1469 JEMALLOC_ALWAYS_INLINE_C void *
1470 imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
1475 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
1476 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1477 p = imemalign_prof_sample(tsd, alignment, usize, tctx);
1479 p = ipalloc(tsd, usize, alignment, false);
1480 if (unlikely(p == NULL)) {
1481 prof_alloc_rollback(tsd, tctx, true);
1484 prof_malloc(p, usize, tctx);
1489 JEMALLOC_ATTR(nonnull(1))
1491 imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
1498 assert(min_alignment != 0);
1500 if (unlikely(malloc_init())) {
1508 /* Make sure that alignment is a large enough power of 2. */
1509 if (unlikely(((alignment - 1) & alignment) != 0
1510 || (alignment < min_alignment))) {
1511 if (config_xmalloc && unlikely(opt_xmalloc)) {
1512 malloc_write("<jemalloc>: Error allocating "
1513 "aligned memory: invalid alignment\n");
1521 usize = sa2u(size, alignment);
1522 if (unlikely(usize == 0)) {
1527 if (config_prof && opt_prof)
1528 result = imemalign_prof(tsd, alignment, usize);
1530 result = ipalloc(tsd, usize, alignment, false);
1531 if (unlikely(result == NULL))
1533 assert(((uintptr_t)result & (alignment - 1)) == ZU(0));
1538 if (config_stats && likely(result != NULL)) {
1539 assert(usize == isalloc(result, config_prof));
1540 *tsd_thread_allocatedp_get(tsd) += usize;
1542 UTRACE(0, size, result);
1545 assert(result == NULL);
1546 if (config_xmalloc && unlikely(opt_xmalloc)) {
1547 malloc_write("<jemalloc>: Error allocating aligned memory: "
1555 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
1556 JEMALLOC_ATTR(nonnull(1))
1557 je_posix_memalign(void **memptr, size_t alignment, size_t size)
1559 int ret = imemalign(memptr, alignment, size, sizeof(void *));
1560 JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
1561 config_prof), false);
1565 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1566 void JEMALLOC_NOTHROW *
1567 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
1568 je_aligned_alloc(size_t alignment, size_t size)
1573 if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) {
1577 JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
1583 icalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
1589 if (usize <= SMALL_MAXCLASS) {
1590 p = icalloc(tsd, LARGE_MINCLASS);
1593 arena_prof_promoted(p, usize);
1595 p = icalloc(tsd, usize);
1600 JEMALLOC_ALWAYS_INLINE_C void *
1601 icalloc_prof(tsd_t *tsd, size_t usize)
1606 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
1607 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1608 p = icalloc_prof_sample(tsd, usize, tctx);
1610 p = icalloc(tsd, usize);
1611 if (unlikely(p == NULL)) {
1612 prof_alloc_rollback(tsd, tctx, true);
1615 prof_malloc(p, usize, tctx);
1620 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1621 void JEMALLOC_NOTHROW *
1622 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
1623 je_calloc(size_t num, size_t size)
1628 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1630 if (unlikely(malloc_init())) {
1637 num_size = num * size;
1638 if (unlikely(num_size == 0)) {
1639 if (num == 0 || size == 0)
1646 * Try to avoid division here. We know that it isn't possible to
1647 * overflow during multiplication if neither operand uses any of the
1648 * most significant half of the bits in a size_t.
1650 } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) <<
1651 2))) && (num_size / size != num))) {
1652 /* size_t overflow. */
1657 if (config_prof && opt_prof) {
1658 usize = s2u(num_size);
1659 if (unlikely(usize == 0)) {
1663 ret = icalloc_prof(tsd, usize);
1665 if (config_stats || (config_valgrind && unlikely(in_valgrind)))
1666 usize = s2u(num_size);
1667 ret = icalloc(tsd, num_size);
1671 if (unlikely(ret == NULL)) {
1672 if (config_xmalloc && unlikely(opt_xmalloc)) {
1673 malloc_write("<jemalloc>: Error in calloc(): out of "
1679 if (config_stats && likely(ret != NULL)) {
1680 assert(usize == isalloc(ret, config_prof));
1681 *tsd_thread_allocatedp_get(tsd) += usize;
1683 UTRACE(0, num_size, ret);
1684 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
1689 irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
1696 if (usize <= SMALL_MAXCLASS) {
1697 p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
1700 arena_prof_promoted(p, usize);
1702 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
1707 JEMALLOC_ALWAYS_INLINE_C void *
1708 irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize)
1712 prof_tctx_t *old_tctx, *tctx;
1714 prof_active = prof_active_get_unlocked();
1715 old_tctx = prof_tctx_get(old_ptr);
1716 tctx = prof_alloc_prep(tsd, usize, prof_active, true);
1717 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1718 p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
1720 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
1721 if (unlikely(p == NULL)) {
1722 prof_alloc_rollback(tsd, tctx, true);
1725 prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
1731 JEMALLOC_INLINE_C void
1732 ifree(tsd_t *tsd, void *ptr, tcache_t *tcache)
1735 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1737 assert(ptr != NULL);
1738 assert(malloc_initialized() || IS_INITIALIZER);
1740 if (config_prof && opt_prof) {
1741 usize = isalloc(ptr, config_prof);
1742 prof_free(tsd, ptr, usize);
1743 } else if (config_stats || config_valgrind)
1744 usize = isalloc(ptr, config_prof);
1746 *tsd_thread_deallocatedp_get(tsd) += usize;
1747 if (config_valgrind && unlikely(in_valgrind))
1749 iqalloc(tsd, ptr, tcache);
1750 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1753 JEMALLOC_INLINE_C void
1754 isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache)
1756 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1758 assert(ptr != NULL);
1759 assert(malloc_initialized() || IS_INITIALIZER);
1761 if (config_prof && opt_prof)
1762 prof_free(tsd, ptr, usize);
1764 *tsd_thread_deallocatedp_get(tsd) += usize;
1765 if (config_valgrind && unlikely(in_valgrind))
1767 isqalloc(tsd, ptr, usize, tcache);
1768 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1771 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1772 void JEMALLOC_NOTHROW *
1773 JEMALLOC_ALLOC_SIZE(2)
1774 je_realloc(void *ptr, size_t size)
1777 tsd_t *tsd JEMALLOC_CC_SILENCE_INIT(NULL);
1778 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1779 size_t old_usize = 0;
1780 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1782 if (unlikely(size == 0)) {
1784 /* realloc(ptr, 0) is equivalent to free(ptr). */
1787 ifree(tsd, ptr, tcache_get(tsd, false));
1793 if (likely(ptr != NULL)) {
1794 assert(malloc_initialized() || IS_INITIALIZER);
1795 malloc_thread_init();
1798 old_usize = isalloc(ptr, config_prof);
1799 if (config_valgrind && unlikely(in_valgrind))
1800 old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
1802 if (config_prof && opt_prof) {
1804 ret = unlikely(usize == 0) ? NULL : irealloc_prof(tsd,
1805 ptr, old_usize, usize);
1807 if (config_stats || (config_valgrind &&
1808 unlikely(in_valgrind)))
1810 ret = iralloc(tsd, ptr, old_usize, size, 0, false);
1813 /* realloc(NULL, size) is equivalent to malloc(size). */
1814 ret = imalloc_body(size, &tsd, &usize);
1817 if (unlikely(ret == NULL)) {
1818 if (config_xmalloc && unlikely(opt_xmalloc)) {
1819 malloc_write("<jemalloc>: Error in realloc(): "
1825 if (config_stats && likely(ret != NULL)) {
1826 assert(usize == isalloc(ret, config_prof));
1827 *tsd_thread_allocatedp_get(tsd) += usize;
1828 *tsd_thread_deallocatedp_get(tsd) += old_usize;
1830 UTRACE(ptr, size, ret);
1831 JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize,
1832 old_rzsize, true, false);
1836 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
1841 if (likely(ptr != NULL)) {
1842 tsd_t *tsd = tsd_fetch();
1843 ifree(tsd, ptr, tcache_get(tsd, false));
1848 * End malloc(3)-compatible functions.
1850 /******************************************************************************/
1852 * Begin non-standard override functions.
1855 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
1856 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1857 void JEMALLOC_NOTHROW *
1858 JEMALLOC_ATTR(malloc)
1859 je_memalign(size_t alignment, size_t size)
1861 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1862 if (unlikely(imemalign(&ret, alignment, size, 1) != 0))
1864 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1869 #ifdef JEMALLOC_OVERRIDE_VALLOC
1870 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1871 void JEMALLOC_NOTHROW *
1872 JEMALLOC_ATTR(malloc)
1873 je_valloc(size_t size)
1875 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1876 if (unlikely(imemalign(&ret, PAGE, size, 1) != 0))
1878 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1884 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1885 * #define je_malloc malloc
1887 #define malloc_is_malloc 1
1888 #define is_malloc_(a) malloc_is_ ## a
1889 #define is_malloc(a) is_malloc_(a)
1891 #if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK))
1893 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1894 * to inconsistently reference libc's malloc(3)-compatible functions
1895 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1897 * These definitions interpose hooks in glibc. The functions are actually
1898 * passed an extra argument for the caller return address, which will be
1901 JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
1902 JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
1903 JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
1904 # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
1905 JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
1911 * End non-standard override functions.
1913 /******************************************************************************/
1915 * Begin non-standard functions.
1918 JEMALLOC_ALWAYS_INLINE_C bool
1919 imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize,
1920 size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
1923 if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) {
1927 *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
1928 *usize = sa2u(size, *alignment);
1930 assert(*usize != 0);
1931 *zero = MALLOCX_ZERO_GET(flags);
1932 if ((flags & MALLOCX_TCACHE_MASK) != 0) {
1933 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
1936 *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
1938 *tcache = tcache_get(tsd, true);
1939 if ((flags & MALLOCX_ARENA_MASK) != 0) {
1940 unsigned arena_ind = MALLOCX_ARENA_GET(flags);
1941 *arena = arena_get(tsd, arena_ind, true, true);
1942 if (unlikely(*arena == NULL))
1949 JEMALLOC_ALWAYS_INLINE_C bool
1950 imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
1951 size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
1954 if (likely(flags == 0)) {
1956 assert(*usize != 0);
1959 *tcache = tcache_get(tsd, true);
1963 return (imallocx_flags_decode_hard(tsd, size, flags, usize,
1964 alignment, zero, tcache, arena));
1968 JEMALLOC_ALWAYS_INLINE_C void *
1969 imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
1970 tcache_t *tcache, arena_t *arena)
1973 if (unlikely(alignment != 0))
1974 return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
1976 return (icalloct(tsd, usize, tcache, arena));
1977 return (imalloct(tsd, usize, tcache, arena));
1981 imallocx_prof_sample(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
1982 tcache_t *tcache, arena_t *arena)
1986 if (usize <= SMALL_MAXCLASS) {
1987 assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
1988 sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
1989 p = imallocx_flags(tsd, LARGE_MINCLASS, alignment, zero, tcache,
1993 arena_prof_promoted(p, usize);
1995 p = imallocx_flags(tsd, usize, alignment, zero, tcache, arena);
2000 JEMALLOC_ALWAYS_INLINE_C void *
2001 imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
2010 if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
2011 &zero, &tcache, &arena)))
2013 tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true);
2014 if (likely((uintptr_t)tctx == (uintptr_t)1U))
2015 p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
2016 else if ((uintptr_t)tctx > (uintptr_t)1U) {
2017 p = imallocx_prof_sample(tsd, *usize, alignment, zero, tcache,
2021 if (unlikely(p == NULL)) {
2022 prof_alloc_rollback(tsd, tctx, true);
2025 prof_malloc(p, *usize, tctx);
2027 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2031 JEMALLOC_ALWAYS_INLINE_C void *
2032 imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
2040 if (likely(flags == 0)) {
2041 if (config_stats || (config_valgrind && unlikely(in_valgrind)))
2043 return (imalloc(tsd, size));
2046 if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize,
2047 &alignment, &zero, &tcache, &arena)))
2049 p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
2050 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2054 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2055 void JEMALLOC_NOTHROW *
2056 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2057 je_mallocx(size_t size, int flags)
2065 if (unlikely(malloc_init()))
2069 if (config_prof && opt_prof)
2070 p = imallocx_prof(tsd, size, flags, &usize);
2072 p = imallocx_no_prof(tsd, size, flags, &usize);
2073 if (unlikely(p == NULL))
2077 assert(usize == isalloc(p, config_prof));
2078 *tsd_thread_allocatedp_get(tsd) += usize;
2081 JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags));
2084 if (config_xmalloc && unlikely(opt_xmalloc)) {
2085 malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
2093 irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize,
2094 size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
2101 if (usize <= SMALL_MAXCLASS) {
2102 p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment,
2103 zero, tcache, arena);
2106 arena_prof_promoted(p, usize);
2108 p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero,
2115 JEMALLOC_ALWAYS_INLINE_C void *
2116 irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
2117 size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
2122 prof_tctx_t *old_tctx, *tctx;
2124 prof_active = prof_active_get_unlocked();
2125 old_tctx = prof_tctx_get(old_ptr);
2126 tctx = prof_alloc_prep(tsd, *usize, prof_active, true);
2127 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2128 p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize,
2129 alignment, zero, tcache, arena, tctx);
2131 p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero,
2134 if (unlikely(p == NULL)) {
2135 prof_alloc_rollback(tsd, tctx, true);
2139 if (p == old_ptr && alignment != 0) {
2141 * The allocation did not move, so it is possible that the size
2142 * class is smaller than would guarantee the requested
2143 * alignment, and that the alignment constraint was
2144 * serendipitously satisfied. Additionally, old_usize may not
2145 * be the same as the current usize because of in-place large
2146 * reallocation. Therefore, query the actual value of usize.
2148 *usize = isalloc(p, config_prof);
2150 prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr,
2151 old_usize, old_tctx);
2156 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2157 void JEMALLOC_NOTHROW *
2158 JEMALLOC_ALLOC_SIZE(2)
2159 je_rallocx(void *ptr, size_t size, int flags)
2165 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
2166 size_t alignment = MALLOCX_ALIGN_GET(flags);
2167 bool zero = flags & MALLOCX_ZERO;
2171 assert(ptr != NULL);
2173 assert(malloc_initialized() || IS_INITIALIZER);
2174 malloc_thread_init();
2177 if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
2178 unsigned arena_ind = MALLOCX_ARENA_GET(flags);
2179 arena = arena_get(tsd, arena_ind, true, true);
2180 if (unlikely(arena == NULL))
2185 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2186 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2189 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2191 tcache = tcache_get(tsd, true);
2193 old_usize = isalloc(ptr, config_prof);
2194 if (config_valgrind && unlikely(in_valgrind))
2195 old_rzsize = u2rz(old_usize);
2197 if (config_prof && opt_prof) {
2198 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
2200 p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
2201 zero, tcache, arena);
2202 if (unlikely(p == NULL))
2205 p = iralloct(tsd, ptr, old_usize, size, alignment, zero,
2207 if (unlikely(p == NULL))
2209 if (config_stats || (config_valgrind && unlikely(in_valgrind)))
2210 usize = isalloc(p, config_prof);
2212 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2215 *tsd_thread_allocatedp_get(tsd) += usize;
2216 *tsd_thread_deallocatedp_get(tsd) += old_usize;
2218 UTRACE(ptr, size, p);
2219 JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize,
2220 old_rzsize, false, zero);
2223 if (config_xmalloc && unlikely(opt_xmalloc)) {
2224 malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
2227 UTRACE(ptr, size, 0);
2231 JEMALLOC_ALWAYS_INLINE_C size_t
2232 ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
2233 size_t alignment, bool zero)
2237 if (ixalloc(ptr, old_usize, size, extra, alignment, zero))
2239 usize = isalloc(ptr, config_prof);
2245 ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
2246 size_t alignment, bool zero, prof_tctx_t *tctx)
2252 usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, zero);
2257 JEMALLOC_ALWAYS_INLINE_C size_t
2258 ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2259 size_t extra, size_t alignment, bool zero)
2261 size_t usize_max, usize;
2263 prof_tctx_t *old_tctx, *tctx;
2265 prof_active = prof_active_get_unlocked();
2266 old_tctx = prof_tctx_get(ptr);
2268 * usize isn't knowable before ixalloc() returns when extra is non-zero.
2269 * Therefore, compute its maximum possible value and use that in
2270 * prof_alloc_prep() to decide whether to capture a backtrace.
2271 * prof_realloc() will use the actual usize to decide whether to sample.
2273 usize_max = (alignment == 0) ? s2u(size+extra) : sa2u(size+extra,
2275 assert(usize_max != 0);
2276 tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
2277 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2278 usize = ixallocx_prof_sample(ptr, old_usize, size, extra,
2279 alignment, zero, tctx);
2281 usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
2284 if (usize == old_usize) {
2285 prof_alloc_rollback(tsd, tctx, false);
2288 prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
2294 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2295 je_xallocx(void *ptr, size_t size, size_t extra, int flags)
2298 size_t usize, old_usize;
2299 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
2300 size_t alignment = MALLOCX_ALIGN_GET(flags);
2301 bool zero = flags & MALLOCX_ZERO;
2303 assert(ptr != NULL);
2305 assert(SIZE_T_MAX - size >= extra);
2306 assert(malloc_initialized() || IS_INITIALIZER);
2307 malloc_thread_init();
2310 old_usize = isalloc(ptr, config_prof);
2312 /* Clamp extra if necessary to avoid (size + extra) overflow. */
2313 if (unlikely(size + extra > HUGE_MAXCLASS)) {
2314 /* Check for size overflow. */
2315 if (unlikely(size > HUGE_MAXCLASS)) {
2317 goto label_not_resized;
2319 extra = HUGE_MAXCLASS - size;
2322 if (config_valgrind && unlikely(in_valgrind))
2323 old_rzsize = u2rz(old_usize);
2325 if (config_prof && opt_prof) {
2326 usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
2329 usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
2332 if (unlikely(usize == old_usize))
2333 goto label_not_resized;
2336 *tsd_thread_allocatedp_get(tsd) += usize;
2337 *tsd_thread_deallocatedp_get(tsd) += old_usize;
2339 JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize,
2340 old_rzsize, false, zero);
2342 UTRACE(ptr, size, ptr);
2346 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2348 je_sallocx(const void *ptr, int flags)
2352 assert(malloc_initialized() || IS_INITIALIZER);
2353 malloc_thread_init();
2355 if (config_ivsalloc)
2356 usize = ivsalloc(ptr, config_prof);
2358 usize = isalloc(ptr, config_prof);
2363 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2364 je_dallocx(void *ptr, int flags)
2369 assert(ptr != NULL);
2370 assert(malloc_initialized() || IS_INITIALIZER);
2373 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2374 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2377 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2379 tcache = tcache_get(tsd, false);
2382 ifree(tsd_fetch(), ptr, tcache);
2385 JEMALLOC_ALWAYS_INLINE_C size_t
2386 inallocx(size_t size, int flags)
2390 if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
2393 usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
2398 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2399 je_sdallocx(void *ptr, size_t size, int flags)
2405 assert(ptr != NULL);
2406 assert(malloc_initialized() || IS_INITIALIZER);
2407 usize = inallocx(size, flags);
2408 assert(usize == isalloc(ptr, config_prof));
2411 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2412 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2415 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2417 tcache = tcache_get(tsd, false);
2420 isfree(tsd, ptr, usize, tcache);
2423 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2425 je_nallocx(size_t size, int flags)
2430 if (unlikely(malloc_init()))
2433 return (inallocx(size, flags));
2436 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2437 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
2441 if (unlikely(malloc_init()))
2444 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
2447 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2448 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
2451 if (unlikely(malloc_init()))
2454 return (ctl_nametomib(name, mibp, miblenp));
2457 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2458 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
2459 void *newp, size_t newlen)
2462 if (unlikely(malloc_init()))
2465 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
2468 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2469 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
2473 stats_print(write_cb, cbopaque, opts);
2476 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2477 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
2481 assert(malloc_initialized() || IS_INITIALIZER);
2482 malloc_thread_init();
2484 if (config_ivsalloc)
2485 ret = ivsalloc(ptr, config_prof);
2487 ret = (ptr == NULL) ? 0 : isalloc(ptr, config_prof);
2493 * End non-standard functions.
2495 /******************************************************************************/
2497 * Begin compatibility functions.
2500 #define ALLOCM_LG_ALIGN(la) (la)
2501 #define ALLOCM_ALIGN(a) (ffsl(a)-1)
2502 #define ALLOCM_ZERO ((int)0x40)
2503 #define ALLOCM_NO_MOVE ((int)0x80)
2505 #define ALLOCM_SUCCESS 0
2506 #define ALLOCM_ERR_OOM 1
2507 #define ALLOCM_ERR_NOT_MOVED 2
2510 je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
2514 assert(ptr != NULL);
2516 p = je_mallocx(size, flags);
2518 return (ALLOCM_ERR_OOM);
2520 *rsize = isalloc(p, config_prof);
2522 return (ALLOCM_SUCCESS);
2526 je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
2529 bool no_move = flags & ALLOCM_NO_MOVE;
2531 assert(ptr != NULL);
2532 assert(*ptr != NULL);
2534 assert(SIZE_T_MAX - size >= extra);
2537 size_t usize = je_xallocx(*ptr, size, extra, flags);
2538 ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED;
2542 void *p = je_rallocx(*ptr, size+extra, flags);
2545 ret = ALLOCM_SUCCESS;
2547 ret = ALLOCM_ERR_OOM;
2549 *rsize = isalloc(*ptr, config_prof);
2555 je_sallocm(const void *ptr, size_t *rsize, int flags)
2558 assert(rsize != NULL);
2559 *rsize = je_sallocx(ptr, flags);
2560 return (ALLOCM_SUCCESS);
2564 je_dallocm(void *ptr, int flags)
2567 je_dallocx(ptr, flags);
2568 return (ALLOCM_SUCCESS);
2572 je_nallocm(size_t *rsize, size_t size, int flags)
2576 usize = je_nallocx(size, flags);
2578 return (ALLOCM_ERR_OOM);
2581 return (ALLOCM_SUCCESS);
2584 #undef ALLOCM_LG_ALIGN
2587 #undef ALLOCM_NO_MOVE
2589 #undef ALLOCM_SUCCESS
2590 #undef ALLOCM_ERR_OOM
2591 #undef ALLOCM_ERR_NOT_MOVED
2594 * End compatibility functions.
2596 /******************************************************************************/
2598 * The following functions are used by threading libraries for protection of
2599 * malloc during fork().
2603 * If an application creates a thread before doing any allocation in the main
2604 * thread, then calls fork(2) in the main thread followed by memory allocation
2605 * in the child process, a race can occur that results in deadlock within the
2606 * child: the main thread may have forked while the created thread had
2607 * partially initialized the allocator. Ordinarily jemalloc prevents
2608 * fork/malloc races via the following functions it registers during
2609 * initialization using pthread_atfork(), but of course that does no good if
2610 * the allocator isn't fully initialized at fork time. The following library
2611 * constructor is a partial solution to this problem. It may still be possible
2612 * to trigger the deadlock described above, but doing so would involve forking
2613 * via a library constructor that runs before jemalloc's runs.
2615 JEMALLOC_ATTR(constructor)
2617 jemalloc_constructor(void)
2623 #ifndef JEMALLOC_MUTEX_INIT_CB
2625 jemalloc_prefork(void)
2627 JEMALLOC_EXPORT void
2628 _malloc_prefork(void)
2633 #ifdef JEMALLOC_MUTEX_INIT_CB
2634 if (!malloc_initialized())
2637 assert(malloc_initialized());
2639 /* Acquire all mutexes in a safe order. */
2642 malloc_mutex_prefork(&arenas_lock);
2643 for (i = 0; i < narenas_total; i++) {
2644 if (arenas[i] != NULL)
2645 arena_prefork(arenas[i]);
2651 #ifndef JEMALLOC_MUTEX_INIT_CB
2653 jemalloc_postfork_parent(void)
2655 JEMALLOC_EXPORT void
2656 _malloc_postfork(void)
2661 #ifdef JEMALLOC_MUTEX_INIT_CB
2662 if (!malloc_initialized())
2665 assert(malloc_initialized());
2667 /* Release all mutexes, now that fork() has completed. */
2668 base_postfork_parent();
2669 chunk_postfork_parent();
2670 for (i = 0; i < narenas_total; i++) {
2671 if (arenas[i] != NULL)
2672 arena_postfork_parent(arenas[i]);
2674 malloc_mutex_postfork_parent(&arenas_lock);
2675 prof_postfork_parent();
2676 ctl_postfork_parent();
2680 jemalloc_postfork_child(void)
2684 assert(malloc_initialized());
2686 /* Release all mutexes, now that fork() has completed. */
2687 base_postfork_child();
2688 chunk_postfork_child();
2689 for (i = 0; i < narenas_total; i++) {
2690 if (arenas[i] != NULL)
2691 arena_postfork_child(arenas[i]);
2693 malloc_mutex_postfork_child(&arenas_lock);
2694 prof_postfork_child();
2695 ctl_postfork_child();
2699 _malloc_first_thread(void)
2702 (void)malloc_mutex_first_thread();
2705 /******************************************************************************/