2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */
8 const char *__malloc_options_1_0 = NULL;
9 __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
11 /* Runtime configuration options. */
12 const char *je_malloc_conf
24 const char *opt_junk =
25 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
32 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
39 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
46 size_t opt_quarantine = ZU(0);
47 bool opt_redzone = false;
48 bool opt_utrace = false;
49 bool opt_xmalloc = false;
50 bool opt_zero = false;
51 unsigned opt_narenas = 0;
53 /* Initialized to true if the process is running inside Valgrind. */
58 /* Protects arenas initialization. */
59 static malloc_mutex_t arenas_lock;
61 * Arenas that are used to service external requests. Not all elements of the
62 * arenas array are necessarily used; arenas are created lazily as needed.
64 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
65 * arenas. arenas[narenas_auto..narenas_total) are only used if the application
66 * takes some action to create them and allocate from them.
69 static unsigned narenas_total; /* Use narenas_total_*(). */
70 static arena_t *a0; /* arenas[0]; read-only after initialization. */
71 unsigned narenas_auto; /* Read-only after initialization. */
74 malloc_init_uninitialized = 3,
75 malloc_init_a0_initialized = 2,
76 malloc_init_recursible = 1,
77 malloc_init_initialized = 0 /* Common case --> jnz. */
79 static malloc_init_t malloc_init_state = malloc_init_uninitialized;
81 /* False should be the common case. Set to true to trigger initialization. */
82 static bool malloc_slow = true;
84 /* When malloc_slow is true, set the corresponding bits for sanity check. */
86 flag_opt_junk_alloc = (1U),
87 flag_opt_junk_free = (1U << 1),
88 flag_opt_quarantine = (1U << 2),
89 flag_opt_zero = (1U << 3),
90 flag_opt_utrace = (1U << 4),
91 flag_in_valgrind = (1U << 5),
92 flag_opt_xmalloc = (1U << 6)
94 static uint8_t malloc_slow_flags;
96 JEMALLOC_ALIGNED(CACHELINE)
97 const size_t pind2sz_tab[NPSIZES] = {
98 #define PSZ_yes(lg_grp, ndelta, lg_delta) \
99 (((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))),
100 #define PSZ_no(lg_grp, ndelta, lg_delta)
101 #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
102 PSZ_##psz(lg_grp, ndelta, lg_delta)
109 JEMALLOC_ALIGNED(CACHELINE)
110 const size_t index2size_tab[NSIZES] = {
111 #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
112 ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
117 JEMALLOC_ALIGNED(CACHELINE)
118 const uint8_t size2index_tab[] = {
120 #warning "Dangerous LG_TINY_MIN"
122 #elif LG_TINY_MIN == 1
123 #warning "Dangerous LG_TINY_MIN"
125 #elif LG_TINY_MIN == 2
126 #warning "Dangerous LG_TINY_MIN"
128 #elif LG_TINY_MIN == 3
130 #elif LG_TINY_MIN == 4
132 #elif LG_TINY_MIN == 5
134 #elif LG_TINY_MIN == 6
136 #elif LG_TINY_MIN == 7
138 #elif LG_TINY_MIN == 8
140 #elif LG_TINY_MIN == 9
142 #elif LG_TINY_MIN == 10
144 #elif LG_TINY_MIN == 11
147 #error "Unsupported LG_TINY_MIN"
150 #define S2B_1(i) S2B_0(i) S2B_0(i)
153 #define S2B_2(i) S2B_1(i) S2B_1(i)
156 #define S2B_3(i) S2B_2(i) S2B_2(i)
159 #define S2B_4(i) S2B_3(i) S2B_3(i)
162 #define S2B_5(i) S2B_4(i) S2B_4(i)
165 #define S2B_6(i) S2B_5(i) S2B_5(i)
168 #define S2B_7(i) S2B_6(i) S2B_6(i)
171 #define S2B_8(i) S2B_7(i) S2B_7(i)
174 #define S2B_9(i) S2B_8(i) S2B_8(i)
177 #define S2B_10(i) S2B_9(i) S2B_9(i)
180 #define S2B_11(i) S2B_10(i) S2B_10(i)
183 #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
184 S2B_##lg_delta_lookup(index)
199 #ifdef JEMALLOC_THREADED_INIT
200 /* Used to let the initializing thread recursively allocate. */
201 # define NO_INITIALIZER ((unsigned long)0)
202 # define INITIALIZER pthread_self()
203 # define IS_INITIALIZER (malloc_initializer == pthread_self())
204 static pthread_t malloc_initializer = NO_INITIALIZER;
206 # define NO_INITIALIZER false
207 # define INITIALIZER true
208 # define IS_INITIALIZER malloc_initializer
209 static bool malloc_initializer = NO_INITIALIZER;
212 /* Used to avoid initialization races. */
214 #if _WIN32_WINNT >= 0x0600
215 static malloc_mutex_t init_lock = SRWLOCK_INIT;
217 static malloc_mutex_t init_lock;
218 static bool init_lock_initialized = false;
220 JEMALLOC_ATTR(constructor)
222 _init_init_lock(void)
225 /* If another constructor in the same binary is using mallctl to
226 * e.g. setup chunk hooks, it may end up running before this one,
227 * and malloc_init_hard will crash trying to lock the uninitialized
228 * lock. So we force an initialization of the lock in
229 * malloc_init_hard as well. We don't try to care about atomicity
230 * of the accessed to the init_lock_initialized boolean, since it
231 * really only matters early in the process creation, before any
232 * separate thread normally starts doing anything. */
233 if (!init_lock_initialized)
234 malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT);
235 init_lock_initialized = true;
239 # pragma section(".CRT$XCU", read)
240 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
241 static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
245 static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
249 void *p; /* Input pointer (as in realloc(p, s)). */
250 size_t s; /* Request size. */
251 void *r; /* Result pointer. */
254 #ifdef JEMALLOC_UTRACE
255 # define UTRACE(a, b, c) do { \
256 if (unlikely(opt_utrace)) { \
257 int utrace_serrno = errno; \
258 malloc_utrace_t ut; \
262 utrace(&ut, sizeof(ut)); \
263 errno = utrace_serrno; \
267 # define UTRACE(a, b, c)
270 /******************************************************************************/
272 * Function prototypes for static functions that are referenced prior to
276 static bool malloc_init_hard_a0(void);
277 static bool malloc_init_hard(void);
279 /******************************************************************************/
281 * Begin miscellaneous support functions.
284 JEMALLOC_ALWAYS_INLINE_C bool
285 malloc_initialized(void)
288 return (malloc_init_state == malloc_init_initialized);
291 JEMALLOC_ALWAYS_INLINE_C void
292 malloc_thread_init(void)
296 * TSD initialization can't be safely done as a side effect of
297 * deallocation, because it is possible for a thread to do nothing but
298 * deallocate its TLS data via free(), in which case writing to TLS
299 * would cause write-after-free memory corruption. The quarantine
300 * facility *only* gets used as a side effect of deallocation, so make
301 * a best effort attempt at initializing its TSD by hooking all
304 if (config_fill && unlikely(opt_quarantine))
305 quarantine_alloc_hook();
308 JEMALLOC_ALWAYS_INLINE_C bool
312 if (unlikely(malloc_init_state == malloc_init_uninitialized))
313 return (malloc_init_hard_a0());
317 JEMALLOC_ALWAYS_INLINE_C bool
321 if (unlikely(!malloc_initialized()) && malloc_init_hard())
323 malloc_thread_init();
329 * The a0*() functions are used instead of i{d,}alloc() in situations that
330 * cannot tolerate TLS variable access.
334 a0ialloc(size_t size, bool zero, bool is_metadata)
337 if (unlikely(malloc_init_a0()))
340 return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL,
341 is_metadata, arena_get(TSDN_NULL, 0, true), true));
345 a0idalloc(void *ptr, bool is_metadata)
348 idalloctm(TSDN_NULL, ptr, false, is_metadata, true);
359 a0malloc(size_t size)
362 return (a0ialloc(size, false, true));
369 a0idalloc(ptr, true);
373 * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
374 * situations that cannot tolerate TLS variable access (TLS allocation and very
375 * early internal data structure initialization).
379 bootstrap_malloc(size_t size)
382 if (unlikely(size == 0))
385 return (a0ialloc(size, false, false));
389 bootstrap_calloc(size_t num, size_t size)
393 num_size = num * size;
394 if (unlikely(num_size == 0)) {
395 assert(num == 0 || size == 0);
399 return (a0ialloc(num_size, true, false));
403 bootstrap_free(void *ptr)
406 if (unlikely(ptr == NULL))
409 a0idalloc(ptr, false);
413 arena_set(unsigned ind, arena_t *arena)
416 atomic_write_p((void **)&arenas[ind], arena);
420 narenas_total_set(unsigned narenas)
423 atomic_write_u(&narenas_total, narenas);
427 narenas_total_inc(void)
430 atomic_add_u(&narenas_total, 1);
434 narenas_total_get(void)
437 return (atomic_read_u(&narenas_total));
440 /* Create a new arena and insert it into the arenas array at index ind. */
442 arena_init_locked(tsdn_t *tsdn, unsigned ind)
446 assert(ind <= narenas_total_get());
447 if (ind > MALLOCX_ARENA_MAX)
449 if (ind == narenas_total_get())
453 * Another thread may have already initialized arenas[ind] if it's an
456 arena = arena_get(tsdn, ind, false);
458 assert(ind < narenas_auto);
462 /* Actually initialize the arena. */
463 arena = arena_new(tsdn, ind);
464 arena_set(ind, arena);
469 arena_init(tsdn_t *tsdn, unsigned ind)
473 malloc_mutex_lock(tsdn, &arenas_lock);
474 arena = arena_init_locked(tsdn, ind);
475 malloc_mutex_unlock(tsdn, &arenas_lock);
480 arena_bind(tsd_t *tsd, unsigned ind, bool internal)
484 if (!tsd_nominal(tsd))
487 arena = arena_get(tsd_tsdn(tsd), ind, false);
488 arena_nthreads_inc(arena, internal);
491 tsd_iarena_set(tsd, arena);
493 tsd_arena_set(tsd, arena);
497 arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
499 arena_t *oldarena, *newarena;
501 oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
502 newarena = arena_get(tsd_tsdn(tsd), newind, false);
503 arena_nthreads_dec(oldarena, false);
504 arena_nthreads_inc(newarena, false);
505 tsd_arena_set(tsd, newarena);
509 arena_unbind(tsd_t *tsd, unsigned ind, bool internal)
513 arena = arena_get(tsd_tsdn(tsd), ind, false);
514 arena_nthreads_dec(arena, internal);
516 tsd_iarena_set(tsd, NULL);
518 tsd_arena_set(tsd, NULL);
522 arena_tdata_get_hard(tsd_t *tsd, unsigned ind)
524 arena_tdata_t *tdata, *arenas_tdata_old;
525 arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
526 unsigned narenas_tdata_old, i;
527 unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
528 unsigned narenas_actual = narenas_total_get();
531 * Dissociate old tdata array (and set up for deallocation upon return)
534 if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
535 arenas_tdata_old = arenas_tdata;
536 narenas_tdata_old = narenas_tdata;
539 tsd_arenas_tdata_set(tsd, arenas_tdata);
540 tsd_narenas_tdata_set(tsd, narenas_tdata);
542 arenas_tdata_old = NULL;
543 narenas_tdata_old = 0;
546 /* Allocate tdata array if it's missing. */
547 if (arenas_tdata == NULL) {
548 bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
549 narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
551 if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
552 *arenas_tdata_bypassp = true;
553 arenas_tdata = (arena_tdata_t *)a0malloc(
554 sizeof(arena_tdata_t) * narenas_tdata);
555 *arenas_tdata_bypassp = false;
557 if (arenas_tdata == NULL) {
561 assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
562 tsd_arenas_tdata_set(tsd, arenas_tdata);
563 tsd_narenas_tdata_set(tsd, narenas_tdata);
567 * Copy to tdata array. It's possible that the actual number of arenas
568 * has increased since narenas_total_get() was called above, but that
569 * causes no correctness issues unless two threads concurrently execute
570 * the arenas.extend mallctl, which we trust mallctl synchronization to
574 /* Copy/initialize tickers. */
575 for (i = 0; i < narenas_actual; i++) {
576 if (i < narenas_tdata_old) {
577 ticker_copy(&arenas_tdata[i].decay_ticker,
578 &arenas_tdata_old[i].decay_ticker);
580 ticker_init(&arenas_tdata[i].decay_ticker,
581 DECAY_NTICKS_PER_UPDATE);
584 if (narenas_tdata > narenas_actual) {
585 memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
586 * (narenas_tdata - narenas_actual));
589 /* Read the refreshed tdata array. */
590 tdata = &arenas_tdata[ind];
592 if (arenas_tdata_old != NULL)
593 a0dalloc(arenas_tdata_old);
597 /* Slow path, called only by arena_choose(). */
599 arena_choose_hard(tsd_t *tsd, bool internal)
601 arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
603 if (narenas_auto > 1) {
604 unsigned i, j, choose[2], first_null;
607 * Determine binding for both non-internal and internal
610 * choose[0]: For application allocation.
611 * choose[1]: For internal metadata allocation.
614 for (j = 0; j < 2; j++)
617 first_null = narenas_auto;
618 malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
619 assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
620 for (i = 1; i < narenas_auto; i++) {
621 if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
623 * Choose the first arena that has the lowest
624 * number of threads assigned to it.
626 for (j = 0; j < 2; j++) {
627 if (arena_nthreads_get(arena_get(
628 tsd_tsdn(tsd), i, false), !!j) <
629 arena_nthreads_get(arena_get(
630 tsd_tsdn(tsd), choose[j], false),
634 } else if (first_null == narenas_auto) {
636 * Record the index of the first uninitialized
637 * arena, in case all extant arenas are in use.
639 * NB: It is possible for there to be
640 * discontinuities in terms of initialized
641 * versus uninitialized arenas, due to the
642 * "thread.arena" mallctl.
648 for (j = 0; j < 2; j++) {
649 if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
650 choose[j], false), !!j) == 0 || first_null ==
653 * Use an unloaded arena, or the least loaded
654 * arena if all arenas are already initialized.
656 if (!!j == internal) {
657 ret = arena_get(tsd_tsdn(tsd),
663 /* Initialize a new arena. */
664 choose[j] = first_null;
665 arena = arena_init_locked(tsd_tsdn(tsd),
668 malloc_mutex_unlock(tsd_tsdn(tsd),
675 arena_bind(tsd, choose[j], !!j);
677 malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
679 ret = arena_get(tsd_tsdn(tsd), 0, false);
680 arena_bind(tsd, 0, false);
681 arena_bind(tsd, 0, true);
688 thread_allocated_cleanup(tsd_t *tsd)
695 thread_deallocated_cleanup(tsd_t *tsd)
702 iarena_cleanup(tsd_t *tsd)
706 iarena = tsd_iarena_get(tsd);
708 arena_unbind(tsd, iarena->ind, true);
712 arena_cleanup(tsd_t *tsd)
716 arena = tsd_arena_get(tsd);
718 arena_unbind(tsd, arena->ind, false);
722 arenas_tdata_cleanup(tsd_t *tsd)
724 arena_tdata_t *arenas_tdata;
726 /* Prevent tsd->arenas_tdata from being (re)created. */
727 *tsd_arenas_tdata_bypassp_get(tsd) = true;
729 arenas_tdata = tsd_arenas_tdata_get(tsd);
730 if (arenas_tdata != NULL) {
731 tsd_arenas_tdata_set(tsd, NULL);
732 a0dalloc(arenas_tdata);
737 narenas_tdata_cleanup(tsd_t *tsd)
744 arenas_tdata_bypass_cleanup(tsd_t *tsd)
751 stats_print_atexit(void)
754 if (config_tcache && config_stats) {
761 * Merge stats from extant threads. This is racy, since
762 * individual threads do not lock when recording tcache stats
763 * events. As a consequence, the final stats may be slightly
764 * out of date by the time they are reported, if other threads
765 * continue to allocate.
767 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
768 arena_t *arena = arena_get(tsdn, i, false);
773 * tcache_stats_merge() locks bins, so if any
774 * code is introduced that acquires both arena
775 * and bin locks in the opposite order,
776 * deadlocks may result.
778 malloc_mutex_lock(tsdn, &arena->lock);
779 ql_foreach(tcache, &arena->tcache_ql, link) {
780 tcache_stats_merge(tsdn, tcache, arena);
782 malloc_mutex_unlock(tsdn, &arena->lock);
786 je_malloc_stats_print(NULL, NULL, NULL);
790 * End miscellaneous support functions.
792 /******************************************************************************/
794 * Begin initialization functions.
797 #ifndef JEMALLOC_HAVE_SECURE_GETENV
799 secure_getenv(const char *name)
802 # ifdef JEMALLOC_HAVE_ISSETUGID
803 if (issetugid() != 0)
806 return (getenv(name));
818 result = si.dwNumberOfProcessors;
819 #elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
821 * glibc >= 2.6 has the CPU_COUNT macro.
823 * glibc's sysconf() uses isspace(). glibc allocates for the first time
824 * *before* setting up the isspace tables. Therefore we need a
825 * different method to get the number of CPUs.
830 pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
831 result = CPU_COUNT(&set);
834 result = sysconf(_SC_NPROCESSORS_ONLN);
836 return ((result == -1) ? 1 : (unsigned)result);
840 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
841 char const **v_p, size_t *vlen_p)
844 const char *opts = *opts_p;
848 for (accept = false; !accept;) {
850 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
851 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
852 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
853 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
855 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
856 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
857 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
858 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
860 case '0': case '1': case '2': case '3': case '4': case '5':
861 case '6': case '7': case '8': case '9':
867 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
872 if (opts != *opts_p) {
873 malloc_write("<jemalloc>: Conf string ends "
878 malloc_write("<jemalloc>: Malformed conf string\n");
883 for (accept = false; !accept;) {
888 * Look ahead one character here, because the next time
889 * this function is called, it will assume that end of
890 * input has been cleanly reached if no input remains,
891 * but we have optimistically already consumed the
892 * comma if one exists.
895 malloc_write("<jemalloc>: Conf string ends "
898 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
902 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
916 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
920 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
925 malloc_slow_flag_init(void)
928 * Combine the runtime options into malloc_slow for fast path. Called
929 * after processing all the options.
931 malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
932 | (opt_junk_free ? flag_opt_junk_free : 0)
933 | (opt_quarantine ? flag_opt_quarantine : 0)
934 | (opt_zero ? flag_opt_zero : 0)
935 | (opt_utrace ? flag_opt_utrace : 0)
936 | (opt_xmalloc ? flag_opt_xmalloc : 0);
939 malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0);
941 malloc_slow = (malloc_slow_flags != 0);
945 malloc_conf_init(void)
948 char buf[PATH_MAX + 1];
949 const char *opts, *k, *v;
953 * Automatically configure valgrind before processing options. The
954 * valgrind option remains in jemalloc 3.x for compatibility reasons.
956 if (config_valgrind) {
957 in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
958 if (config_fill && unlikely(in_valgrind)) {
960 opt_junk_alloc = false;
961 opt_junk_free = false;
963 opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
966 if (config_tcache && unlikely(in_valgrind))
970 for (i = 0; i < 4; i++) {
971 /* Get runtime configuration. */
974 opts = config_malloc_conf;
977 if (je_malloc_conf != NULL) {
979 * Use options that were compiled into the
982 opts = je_malloc_conf;
984 /* No configuration specified. */
992 int saved_errno = errno;
993 const char *linkname =
994 # ifdef JEMALLOC_PREFIX
995 "/etc/"JEMALLOC_PREFIX"malloc.conf"
1002 * Try to use the contents of the "/etc/malloc.conf"
1003 * symbolic link's name.
1005 linklen = readlink(linkname, buf, sizeof(buf) - 1);
1006 if (linklen == -1) {
1007 /* No configuration specified. */
1009 /* Restore errno. */
1010 set_errno(saved_errno);
1013 buf[linklen] = '\0';
1017 const char *envname =
1018 #ifdef JEMALLOC_PREFIX
1019 JEMALLOC_CPREFIX"MALLOC_CONF"
1025 if ((opts = secure_getenv(envname)) != NULL) {
1027 * Do nothing; opts is already initialized to
1028 * the value of the MALLOC_CONF environment
1032 /* No configuration specified. */
1043 while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
1045 #define CONF_MATCH(n) \
1046 (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
1047 #define CONF_MATCH_VALUE(n) \
1048 (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
1049 #define CONF_HANDLE_BOOL(o, n, cont) \
1050 if (CONF_MATCH(n)) { \
1051 if (CONF_MATCH_VALUE("true")) \
1053 else if (CONF_MATCH_VALUE("false")) \
1056 malloc_conf_error( \
1057 "Invalid conf value", \
1058 k, klen, v, vlen); \
1063 #define CONF_MIN_no(um, min) false
1064 #define CONF_MIN_yes(um, min) ((um) < (min))
1065 #define CONF_MAX_no(um, max) false
1066 #define CONF_MAX_yes(um, max) ((um) > (max))
1067 #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
1068 if (CONF_MATCH(n)) { \
1073 um = malloc_strtoumax(v, &end, 0); \
1074 if (get_errno() != 0 || (uintptr_t)end -\
1075 (uintptr_t)v != vlen) { \
1076 malloc_conf_error( \
1077 "Invalid conf value", \
1078 k, klen, v, vlen); \
1079 } else if (clip) { \
1080 if (CONF_MIN_##check_min(um, \
1083 else if (CONF_MAX_##check_max( \
1089 if (CONF_MIN_##check_min(um, \
1091 CONF_MAX_##check_max(um, \
1093 malloc_conf_error( \
1096 k, klen, v, vlen); \
1102 #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
1104 CONF_HANDLE_T_U(unsigned, o, n, min, max, \
1105 check_min, check_max, clip)
1106 #define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \
1107 CONF_HANDLE_T_U(size_t, o, n, min, max, \
1108 check_min, check_max, clip)
1109 #define CONF_HANDLE_SSIZE_T(o, n, min, max) \
1110 if (CONF_MATCH(n)) { \
1115 l = strtol(v, &end, 0); \
1116 if (get_errno() != 0 || (uintptr_t)end -\
1117 (uintptr_t)v != vlen) { \
1118 malloc_conf_error( \
1119 "Invalid conf value", \
1120 k, klen, v, vlen); \
1121 } else if (l < (ssize_t)(min) || l > \
1123 malloc_conf_error( \
1124 "Out-of-range conf value", \
1125 k, klen, v, vlen); \
1130 #define CONF_HANDLE_CHAR_P(o, n, d) \
1131 if (CONF_MATCH(n)) { \
1132 size_t cpylen = (vlen <= \
1133 sizeof(o)-1) ? vlen : \
1135 strncpy(o, v, cpylen); \
1140 CONF_HANDLE_BOOL(opt_abort, "abort", true)
1142 * Chunks always require at least one header page,
1143 * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and
1144 * possibly an additional page in the presence of
1145 * redzones. In order to simplify options processing,
1146 * use a conservative bound that accommodates all these
1149 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
1150 LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
1151 (sizeof(size_t) << 3) - 1, yes, yes, true)
1152 if (strncmp("dss", k, klen) == 0) {
1155 for (i = 0; i < dss_prec_limit; i++) {
1156 if (strncmp(dss_prec_names[i], v, vlen)
1158 if (chunk_dss_prec_set(i)) {
1160 "Error setting dss",
1171 malloc_conf_error("Invalid conf value",
1176 CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
1177 UINT_MAX, yes, no, false)
1178 if (strncmp("purge", k, klen) == 0) {
1181 for (i = 0; i < purge_mode_limit; i++) {
1182 if (strncmp(purge_mode_names[i], v,
1184 opt_purge = (purge_mode_t)i;
1190 malloc_conf_error("Invalid conf value",
1195 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
1196 -1, (sizeof(size_t) << 3) - 1)
1197 CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1,
1199 CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
1201 if (CONF_MATCH("junk")) {
1202 if (CONF_MATCH_VALUE("true")) {
1203 if (config_valgrind &&
1204 unlikely(in_valgrind)) {
1206 "Deallocation-time "
1207 "junk filling cannot "
1210 "Valgrind", k, klen, v,
1214 opt_junk_alloc = true;
1215 opt_junk_free = true;
1217 } else if (CONF_MATCH_VALUE("false")) {
1219 opt_junk_alloc = opt_junk_free =
1221 } else if (CONF_MATCH_VALUE("alloc")) {
1223 opt_junk_alloc = true;
1224 opt_junk_free = false;
1225 } else if (CONF_MATCH_VALUE("free")) {
1226 if (config_valgrind &&
1227 unlikely(in_valgrind)) {
1229 "Deallocation-time "
1230 "junk filling cannot "
1233 "Valgrind", k, klen, v,
1237 opt_junk_alloc = false;
1238 opt_junk_free = true;
1242 "Invalid conf value", k,
1247 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
1248 0, SIZE_T_MAX, no, no, false)
1249 CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
1250 CONF_HANDLE_BOOL(opt_zero, "zero", true)
1252 if (config_utrace) {
1253 CONF_HANDLE_BOOL(opt_utrace, "utrace", true)
1255 if (config_xmalloc) {
1256 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
1258 if (config_tcache) {
1259 CONF_HANDLE_BOOL(opt_tcache, "tcache",
1260 !config_valgrind || !in_valgrind)
1261 if (CONF_MATCH("tcache")) {
1262 assert(config_valgrind && in_valgrind);
1266 "tcache cannot be enabled "
1267 "while running inside Valgrind",
1272 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
1273 "lg_tcache_max", -1,
1274 (sizeof(size_t) << 3) - 1)
1277 CONF_HANDLE_BOOL(opt_prof, "prof", true)
1278 CONF_HANDLE_CHAR_P(opt_prof_prefix,
1279 "prof_prefix", "jeprof")
1280 CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
1282 CONF_HANDLE_BOOL(opt_prof_thread_active_init,
1283 "prof_thread_active_init", true)
1284 CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
1285 "lg_prof_sample", 0, (sizeof(uint64_t) << 3)
1287 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
1289 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
1290 "lg_prof_interval", -1,
1291 (sizeof(uint64_t) << 3) - 1)
1292 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump",
1294 CONF_HANDLE_BOOL(opt_prof_final, "prof_final",
1296 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak",
1299 malloc_conf_error("Invalid conf pair", k, klen, v,
1302 #undef CONF_MATCH_VALUE
1303 #undef CONF_HANDLE_BOOL
1308 #undef CONF_HANDLE_T_U
1309 #undef CONF_HANDLE_UNSIGNED
1310 #undef CONF_HANDLE_SIZE_T
1311 #undef CONF_HANDLE_SSIZE_T
1312 #undef CONF_HANDLE_CHAR_P
1318 malloc_init_hard_needed(void)
1321 if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
1322 malloc_init_recursible)) {
1324 * Another thread initialized the allocator before this one
1325 * acquired init_lock, or this thread is the initializing
1326 * thread, and it is recursively allocating.
1330 #ifdef JEMALLOC_THREADED_INIT
1331 if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
1334 /* Busy-wait until the initializing thread completes. */
1335 spin_init(&spinner);
1337 malloc_mutex_unlock(TSDN_NULL, &init_lock);
1338 spin_adaptive(&spinner);
1339 malloc_mutex_lock(TSDN_NULL, &init_lock);
1340 } while (!malloc_initialized());
1348 malloc_init_hard_a0_locked()
1351 malloc_initializer = INITIALIZER;
1356 if (opt_stats_print) {
1357 /* Print statistics at exit. */
1358 if (atexit(stats_print_atexit) != 0) {
1359 malloc_write("<jemalloc>: Error in atexit()\n");
1374 if (config_tcache && tcache_boot(TSDN_NULL))
1376 if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS))
1379 * Create enough scaffolding to allow recursive allocation in
1383 narenas_total_set(narenas_auto);
1385 memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
1387 * Initialize one arena here. The rest are lazily created in
1388 * arena_choose_hard().
1390 if (arena_init(TSDN_NULL, 0) == NULL)
1393 malloc_init_state = malloc_init_a0_initialized;
1399 malloc_init_hard_a0(void)
1403 malloc_mutex_lock(TSDN_NULL, &init_lock);
1404 ret = malloc_init_hard_a0_locked();
1405 malloc_mutex_unlock(TSDN_NULL, &init_lock);
1409 /* Initialize data structures which may trigger recursive allocation. */
1411 malloc_init_hard_recursible(void)
1414 malloc_init_state = malloc_init_recursible;
1416 ncpus = malloc_ncpus();
1418 #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
1419 && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
1420 !defined(__native_client__))
1421 /* LinuxThreads' pthread_atfork() allocates. */
1422 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
1423 jemalloc_postfork_child) != 0) {
1424 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1435 malloc_init_hard_finish(tsdn_t *tsdn)
1438 if (malloc_mutex_boot())
1441 if (opt_narenas == 0) {
1443 * For SMP systems, create more than one arena per CPU by
1447 opt_narenas = ncpus << 2;
1451 narenas_auto = opt_narenas;
1453 * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
1455 if (narenas_auto > MALLOCX_ARENA_MAX) {
1456 narenas_auto = MALLOCX_ARENA_MAX;
1457 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
1460 narenas_total_set(narenas_auto);
1462 /* Allocate and initialize arenas. */
1463 arenas = (arena_t **)base_alloc(tsdn, sizeof(arena_t *) *
1464 (MALLOCX_ARENA_MAX+1));
1467 /* Copy the pointer to the one arena that was already initialized. */
1470 malloc_init_state = malloc_init_initialized;
1471 malloc_slow_flag_init();
1477 malloc_init_hard(void)
1481 #if defined(_WIN32) && _WIN32_WINNT < 0x0600
1484 malloc_mutex_lock(TSDN_NULL, &init_lock);
1485 if (!malloc_init_hard_needed()) {
1486 malloc_mutex_unlock(TSDN_NULL, &init_lock);
1490 if (malloc_init_state != malloc_init_a0_initialized &&
1491 malloc_init_hard_a0_locked()) {
1492 malloc_mutex_unlock(TSDN_NULL, &init_lock);
1496 malloc_mutex_unlock(TSDN_NULL, &init_lock);
1497 /* Recursive allocation relies on functional tsd. */
1498 tsd = malloc_tsd_boot0();
1501 if (malloc_init_hard_recursible())
1503 malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
1505 if (config_prof && prof_boot2(tsd)) {
1506 malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
1510 if (malloc_init_hard_finish(tsd_tsdn(tsd))) {
1511 malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
1515 malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
1521 * End initialization functions.
1523 /******************************************************************************/
1525 * Begin malloc(3)-compatible functions.
1529 ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero,
1530 prof_tctx_t *tctx, bool slow_path)
1536 if (usize <= SMALL_MAXCLASS) {
1537 szind_t ind_large = size2index(LARGE_MINCLASS);
1538 p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path);
1541 arena_prof_promoted(tsd_tsdn(tsd), p, usize);
1543 p = ialloc(tsd, usize, ind, zero, slow_path);
1548 JEMALLOC_ALWAYS_INLINE_C void *
1549 ialloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool zero, bool slow_path)
1554 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
1555 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1556 p = ialloc_prof_sample(tsd, usize, ind, zero, tctx, slow_path);
1558 p = ialloc(tsd, usize, ind, zero, slow_path);
1559 if (unlikely(p == NULL)) {
1560 prof_alloc_rollback(tsd, tctx, true);
1563 prof_malloc(tsd_tsdn(tsd), p, usize, tctx);
1569 * ialloc_body() is inlined so that fast and slow paths are generated separately
1570 * with statically known slow_path.
1572 * This function guarantees that *tsdn is non-NULL on success.
1574 JEMALLOC_ALWAYS_INLINE_C void *
1575 ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize,
1581 if (slow_path && unlikely(malloc_init())) {
1587 *tsdn = tsd_tsdn(tsd);
1588 witness_assert_lockless(tsd_tsdn(tsd));
1590 ind = size2index(size);
1591 if (unlikely(ind >= NSIZES))
1594 if (config_stats || (config_prof && opt_prof) || (slow_path &&
1595 config_valgrind && unlikely(in_valgrind))) {
1596 *usize = index2size(ind);
1597 assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
1600 if (config_prof && opt_prof)
1601 return (ialloc_prof(tsd, *usize, ind, zero, slow_path));
1603 return (ialloc(tsd, size, ind, zero, slow_path));
1606 JEMALLOC_ALWAYS_INLINE_C void
1607 ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func,
1608 bool update_errno, bool slow_path)
1611 assert(!tsdn_null(tsdn) || ret == NULL);
1613 if (unlikely(ret == NULL)) {
1614 if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) {
1615 malloc_printf("<jemalloc>: Error in %s(): out of "
1622 if (config_stats && likely(ret != NULL)) {
1623 assert(usize == isalloc(tsdn, ret, config_prof));
1624 *tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize;
1626 witness_assert_lockless(tsdn);
1629 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1630 void JEMALLOC_NOTHROW *
1631 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
1632 je_malloc(size_t size)
1636 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1641 if (likely(!malloc_slow)) {
1642 ret = ialloc_body(size, false, &tsdn, &usize, false);
1643 ialloc_post_check(ret, tsdn, usize, "malloc", true, false);
1645 ret = ialloc_body(size, false, &tsdn, &usize, true);
1646 ialloc_post_check(ret, tsdn, usize, "malloc", true, true);
1647 UTRACE(0, size, ret);
1648 JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false);
1655 imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
1662 if (usize <= SMALL_MAXCLASS) {
1663 assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS);
1664 p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
1667 arena_prof_promoted(tsd_tsdn(tsd), p, usize);
1669 p = ipalloc(tsd, usize, alignment, false);
1674 JEMALLOC_ALWAYS_INLINE_C void *
1675 imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
1680 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
1681 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1682 p = imemalign_prof_sample(tsd, alignment, usize, tctx);
1684 p = ipalloc(tsd, usize, alignment, false);
1685 if (unlikely(p == NULL)) {
1686 prof_alloc_rollback(tsd, tctx, true);
1689 prof_malloc(tsd_tsdn(tsd), p, usize, tctx);
1694 JEMALLOC_ATTR(nonnull(1))
1696 imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
1703 assert(min_alignment != 0);
1705 if (unlikely(malloc_init())) {
1711 witness_assert_lockless(tsd_tsdn(tsd));
1715 /* Make sure that alignment is a large enough power of 2. */
1716 if (unlikely(((alignment - 1) & alignment) != 0
1717 || (alignment < min_alignment))) {
1718 if (config_xmalloc && unlikely(opt_xmalloc)) {
1719 malloc_write("<jemalloc>: Error allocating "
1720 "aligned memory: invalid alignment\n");
1728 usize = sa2u(size, alignment);
1729 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
1734 if (config_prof && opt_prof)
1735 result = imemalign_prof(tsd, alignment, usize);
1737 result = ipalloc(tsd, usize, alignment, false);
1738 if (unlikely(result == NULL))
1740 assert(((uintptr_t)result & (alignment - 1)) == ZU(0));
1745 if (config_stats && likely(result != NULL)) {
1746 assert(usize == isalloc(tsd_tsdn(tsd), result, config_prof));
1747 *tsd_thread_allocatedp_get(tsd) += usize;
1749 UTRACE(0, size, result);
1750 JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize,
1752 witness_assert_lockless(tsd_tsdn(tsd));
1755 assert(result == NULL);
1756 if (config_xmalloc && unlikely(opt_xmalloc)) {
1757 malloc_write("<jemalloc>: Error allocating aligned memory: "
1762 witness_assert_lockless(tsd_tsdn(tsd));
1766 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
1767 JEMALLOC_ATTR(nonnull(1))
1768 je_posix_memalign(void **memptr, size_t alignment, size_t size)
1772 ret = imemalign(memptr, alignment, size, sizeof(void *));
1777 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1778 void JEMALLOC_NOTHROW *
1779 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
1780 je_aligned_alloc(size_t alignment, size_t size)
1785 if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) {
1793 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1794 void JEMALLOC_NOTHROW *
1795 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
1796 je_calloc(size_t num, size_t size)
1801 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1803 num_size = num * size;
1804 if (unlikely(num_size == 0)) {
1805 if (num == 0 || size == 0)
1808 num_size = HUGE_MAXCLASS + 1; /* Trigger OOM. */
1810 * Try to avoid division here. We know that it isn't possible to
1811 * overflow during multiplication if neither operand uses any of the
1812 * most significant half of the bits in a size_t.
1814 } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) <<
1815 2))) && (num_size / size != num)))
1816 num_size = HUGE_MAXCLASS + 1; /* size_t overflow. */
1818 if (likely(!malloc_slow)) {
1819 ret = ialloc_body(num_size, true, &tsdn, &usize, false);
1820 ialloc_post_check(ret, tsdn, usize, "calloc", true, false);
1822 ret = ialloc_body(num_size, true, &tsdn, &usize, true);
1823 ialloc_post_check(ret, tsdn, usize, "calloc", true, true);
1824 UTRACE(0, num_size, ret);
1825 JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, true);
1832 irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
1839 if (usize <= SMALL_MAXCLASS) {
1840 p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
1843 arena_prof_promoted(tsd_tsdn(tsd), p, usize);
1845 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
1850 JEMALLOC_ALWAYS_INLINE_C void *
1851 irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize)
1855 prof_tctx_t *old_tctx, *tctx;
1857 prof_active = prof_active_get_unlocked();
1858 old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr);
1859 tctx = prof_alloc_prep(tsd, usize, prof_active, true);
1860 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1861 p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
1863 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
1864 if (unlikely(p == NULL)) {
1865 prof_alloc_rollback(tsd, tctx, true);
1868 prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
1874 JEMALLOC_INLINE_C void
1875 ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
1878 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1880 witness_assert_lockless(tsd_tsdn(tsd));
1882 assert(ptr != NULL);
1883 assert(malloc_initialized() || IS_INITIALIZER);
1885 if (config_prof && opt_prof) {
1886 usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
1887 prof_free(tsd, ptr, usize);
1888 } else if (config_stats || config_valgrind)
1889 usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
1891 *tsd_thread_deallocatedp_get(tsd) += usize;
1893 if (likely(!slow_path))
1894 iqalloc(tsd, ptr, tcache, false);
1896 if (config_valgrind && unlikely(in_valgrind))
1897 rzsize = p2rz(tsd_tsdn(tsd), ptr);
1898 iqalloc(tsd, ptr, tcache, true);
1899 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1903 JEMALLOC_INLINE_C void
1904 isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path)
1906 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1908 witness_assert_lockless(tsd_tsdn(tsd));
1910 assert(ptr != NULL);
1911 assert(malloc_initialized() || IS_INITIALIZER);
1913 if (config_prof && opt_prof)
1914 prof_free(tsd, ptr, usize);
1916 *tsd_thread_deallocatedp_get(tsd) += usize;
1917 if (config_valgrind && unlikely(in_valgrind))
1918 rzsize = p2rz(tsd_tsdn(tsd), ptr);
1919 isqalloc(tsd, ptr, usize, tcache, slow_path);
1920 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1923 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1924 void JEMALLOC_NOTHROW *
1925 JEMALLOC_ALLOC_SIZE(2)
1926 je_realloc(void *ptr, size_t size)
1929 tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
1930 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1931 size_t old_usize = 0;
1932 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1934 if (unlikely(size == 0)) {
1938 /* realloc(ptr, 0) is equivalent to free(ptr). */
1941 ifree(tsd, ptr, tcache_get(tsd, false), true);
1947 if (likely(ptr != NULL)) {
1950 assert(malloc_initialized() || IS_INITIALIZER);
1951 malloc_thread_init();
1954 witness_assert_lockless(tsd_tsdn(tsd));
1956 old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
1957 if (config_valgrind && unlikely(in_valgrind)) {
1958 old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) :
1962 if (config_prof && opt_prof) {
1964 ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ?
1965 NULL : irealloc_prof(tsd, ptr, old_usize, usize);
1967 if (config_stats || (config_valgrind &&
1968 unlikely(in_valgrind)))
1970 ret = iralloc(tsd, ptr, old_usize, size, 0, false);
1972 tsdn = tsd_tsdn(tsd);
1974 /* realloc(NULL, size) is equivalent to malloc(size). */
1975 if (likely(!malloc_slow))
1976 ret = ialloc_body(size, false, &tsdn, &usize, false);
1978 ret = ialloc_body(size, false, &tsdn, &usize, true);
1979 assert(!tsdn_null(tsdn) || ret == NULL);
1982 if (unlikely(ret == NULL)) {
1983 if (config_xmalloc && unlikely(opt_xmalloc)) {
1984 malloc_write("<jemalloc>: Error in realloc(): "
1990 if (config_stats && likely(ret != NULL)) {
1993 assert(usize == isalloc(tsdn, ret, config_prof));
1994 tsd = tsdn_tsd(tsdn);
1995 *tsd_thread_allocatedp_get(tsd) += usize;
1996 *tsd_thread_deallocatedp_get(tsd) += old_usize;
1998 UTRACE(ptr, size, ret);
1999 JEMALLOC_VALGRIND_REALLOC(maybe, tsdn, ret, usize, maybe, ptr,
2000 old_usize, old_rzsize, maybe, false);
2001 witness_assert_lockless(tsdn);
2005 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2010 if (likely(ptr != NULL)) {
2011 tsd_t *tsd = tsd_fetch();
2012 witness_assert_lockless(tsd_tsdn(tsd));
2013 if (likely(!malloc_slow))
2014 ifree(tsd, ptr, tcache_get(tsd, false), false);
2016 ifree(tsd, ptr, tcache_get(tsd, false), true);
2017 witness_assert_lockless(tsd_tsdn(tsd));
2022 * End malloc(3)-compatible functions.
2024 /******************************************************************************/
2026 * Begin non-standard override functions.
2029 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
2030 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2031 void JEMALLOC_NOTHROW *
2032 JEMALLOC_ATTR(malloc)
2033 je_memalign(size_t alignment, size_t size)
2035 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
2036 if (unlikely(imemalign(&ret, alignment, size, 1) != 0))
2042 #ifdef JEMALLOC_OVERRIDE_VALLOC
2043 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2044 void JEMALLOC_NOTHROW *
2045 JEMALLOC_ATTR(malloc)
2046 je_valloc(size_t size)
2048 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
2049 if (unlikely(imemalign(&ret, PAGE, size, 1) != 0))
2056 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
2057 * #define je_malloc malloc
2059 #define malloc_is_malloc 1
2060 #define is_malloc_(a) malloc_is_ ## a
2061 #define is_malloc(a) is_malloc_(a)
2063 #if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK))
2065 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
2066 * to inconsistently reference libc's malloc(3)-compatible functions
2067 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
2069 * These definitions interpose hooks in glibc. The functions are actually
2070 * passed an extra argument for the caller return address, which will be
2073 JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
2074 JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
2075 JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
2076 # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
2077 JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
2083 * To enable static linking with glibc, the libc specific malloc interface must
2084 * be implemented also, so none of glibc's malloc.o functions are added to the
2087 #define ALIAS(je_fn) __attribute__((alias (#je_fn), used))
2088 /* To force macro expansion of je_ prefix before stringification. */
2089 #define PREALIAS(je_fn) ALIAS(je_fn)
2090 void *__libc_malloc(size_t size) PREALIAS(je_malloc);
2091 void __libc_free(void* ptr) PREALIAS(je_free);
2092 void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
2093 void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
2094 void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
2095 void *__libc_valloc(size_t size) PREALIAS(je_valloc);
2096 int __posix_memalign(void** r, size_t a, size_t s)
2097 PREALIAS(je_posix_memalign);
2106 * End non-standard override functions.
2108 /******************************************************************************/
2110 * Begin non-standard functions.
2113 JEMALLOC_ALWAYS_INLINE_C bool
2114 imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
2115 size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
2118 if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) {
2122 *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
2123 *usize = sa2u(size, *alignment);
2125 if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS))
2127 *zero = MALLOCX_ZERO_GET(flags);
2128 if ((flags & MALLOCX_TCACHE_MASK) != 0) {
2129 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2132 *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2134 *tcache = tcache_get(tsd, true);
2135 if ((flags & MALLOCX_ARENA_MASK) != 0) {
2136 unsigned arena_ind = MALLOCX_ARENA_GET(flags);
2137 *arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
2138 if (unlikely(*arena == NULL))
2145 JEMALLOC_ALWAYS_INLINE_C void *
2146 imallocx_flags(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
2147 tcache_t *tcache, arena_t *arena, bool slow_path)
2151 if (unlikely(alignment != 0))
2152 return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
2153 ind = size2index(usize);
2154 assert(ind < NSIZES);
2155 return (iallocztm(tsdn, usize, ind, zero, tcache, false, arena,
2160 imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
2161 tcache_t *tcache, arena_t *arena, bool slow_path)
2165 if (usize <= SMALL_MAXCLASS) {
2166 assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
2167 sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
2168 p = imallocx_flags(tsdn, LARGE_MINCLASS, alignment, zero,
2169 tcache, arena, slow_path);
2172 arena_prof_promoted(tsdn, p, usize);
2174 p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena,
2181 JEMALLOC_ALWAYS_INLINE_C void *
2182 imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path)
2191 if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
2192 &zero, &tcache, &arena)))
2194 tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true);
2195 if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
2196 p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero,
2197 tcache, arena, slow_path);
2198 } else if ((uintptr_t)tctx > (uintptr_t)1U) {
2199 p = imallocx_prof_sample(tsd_tsdn(tsd), *usize, alignment, zero,
2200 tcache, arena, slow_path);
2203 if (unlikely(p == NULL)) {
2204 prof_alloc_rollback(tsd, tctx, true);
2207 prof_malloc(tsd_tsdn(tsd), p, *usize, tctx);
2209 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2213 JEMALLOC_ALWAYS_INLINE_C void *
2214 imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize,
2223 if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
2224 &zero, &tcache, &arena)))
2226 p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, tcache,
2228 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2232 /* This function guarantees that *tsdn is non-NULL on success. */
2233 JEMALLOC_ALWAYS_INLINE_C void *
2234 imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize,
2239 if (slow_path && unlikely(malloc_init())) {
2245 *tsdn = tsd_tsdn(tsd);
2246 witness_assert_lockless(tsd_tsdn(tsd));
2248 if (likely(flags == 0)) {
2249 szind_t ind = size2index(size);
2250 if (unlikely(ind >= NSIZES))
2252 if (config_stats || (config_prof && opt_prof) || (slow_path &&
2253 config_valgrind && unlikely(in_valgrind))) {
2254 *usize = index2size(ind);
2255 assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
2258 if (config_prof && opt_prof) {
2259 return (ialloc_prof(tsd, *usize, ind, false,
2263 return (ialloc(tsd, size, ind, false, slow_path));
2266 if (config_prof && opt_prof)
2267 return (imallocx_prof(tsd, size, flags, usize, slow_path));
2269 return (imallocx_no_prof(tsd, size, flags, usize, slow_path));
2272 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2273 void JEMALLOC_NOTHROW *
2274 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2275 je_mallocx(size_t size, int flags)
2283 if (likely(!malloc_slow)) {
2284 p = imallocx_body(size, flags, &tsdn, &usize, false);
2285 ialloc_post_check(p, tsdn, usize, "mallocx", false, false);
2287 p = imallocx_body(size, flags, &tsdn, &usize, true);
2288 ialloc_post_check(p, tsdn, usize, "mallocx", false, true);
2290 JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize,
2291 MALLOCX_ZERO_GET(flags));
2298 irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize,
2299 size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
2306 if (usize <= SMALL_MAXCLASS) {
2307 p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment,
2308 zero, tcache, arena);
2311 arena_prof_promoted(tsd_tsdn(tsd), p, usize);
2313 p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero,
2320 JEMALLOC_ALWAYS_INLINE_C void *
2321 irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
2322 size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
2327 prof_tctx_t *old_tctx, *tctx;
2329 prof_active = prof_active_get_unlocked();
2330 old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr);
2331 tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
2332 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2333 p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize,
2334 alignment, zero, tcache, arena, tctx);
2336 p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero,
2339 if (unlikely(p == NULL)) {
2340 prof_alloc_rollback(tsd, tctx, false);
2344 if (p == old_ptr && alignment != 0) {
2346 * The allocation did not move, so it is possible that the size
2347 * class is smaller than would guarantee the requested
2348 * alignment, and that the alignment constraint was
2349 * serendipitously satisfied. Additionally, old_usize may not
2350 * be the same as the current usize because of in-place large
2351 * reallocation. Therefore, query the actual value of usize.
2353 *usize = isalloc(tsd_tsdn(tsd), p, config_prof);
2355 prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
2356 old_usize, old_tctx);
2361 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2362 void JEMALLOC_NOTHROW *
2363 JEMALLOC_ALLOC_SIZE(2)
2364 je_rallocx(void *ptr, size_t size, int flags)
2370 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
2371 size_t alignment = MALLOCX_ALIGN_GET(flags);
2372 bool zero = flags & MALLOCX_ZERO;
2376 assert(ptr != NULL);
2378 assert(malloc_initialized() || IS_INITIALIZER);
2379 malloc_thread_init();
2381 witness_assert_lockless(tsd_tsdn(tsd));
2383 if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
2384 unsigned arena_ind = MALLOCX_ARENA_GET(flags);
2385 arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
2386 if (unlikely(arena == NULL))
2391 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2392 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2395 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2397 tcache = tcache_get(tsd, true);
2399 old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
2400 if (config_valgrind && unlikely(in_valgrind))
2401 old_rzsize = u2rz(old_usize);
2403 if (config_prof && opt_prof) {
2404 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
2405 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
2407 p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
2408 zero, tcache, arena);
2409 if (unlikely(p == NULL))
2412 p = iralloct(tsd, ptr, old_usize, size, alignment, zero,
2414 if (unlikely(p == NULL))
2416 if (config_stats || (config_valgrind && unlikely(in_valgrind)))
2417 usize = isalloc(tsd_tsdn(tsd), p, config_prof);
2419 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2422 *tsd_thread_allocatedp_get(tsd) += usize;
2423 *tsd_thread_deallocatedp_get(tsd) += old_usize;
2425 UTRACE(ptr, size, p);
2426 JEMALLOC_VALGRIND_REALLOC(maybe, tsd_tsdn(tsd), p, usize, no, ptr,
2427 old_usize, old_rzsize, no, zero);
2428 witness_assert_lockless(tsd_tsdn(tsd));
2431 if (config_xmalloc && unlikely(opt_xmalloc)) {
2432 malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
2435 UTRACE(ptr, size, 0);
2436 witness_assert_lockless(tsd_tsdn(tsd));
2440 JEMALLOC_ALWAYS_INLINE_C size_t
2441 ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
2442 size_t extra, size_t alignment, bool zero)
2446 if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero))
2448 usize = isalloc(tsdn, ptr, config_prof);
2454 ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
2455 size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx)
2461 usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
2467 JEMALLOC_ALWAYS_INLINE_C size_t
2468 ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2469 size_t extra, size_t alignment, bool zero)
2471 size_t usize_max, usize;
2473 prof_tctx_t *old_tctx, *tctx;
2475 prof_active = prof_active_get_unlocked();
2476 old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr);
2478 * usize isn't knowable before ixalloc() returns when extra is non-zero.
2479 * Therefore, compute its maximum possible value and use that in
2480 * prof_alloc_prep() to decide whether to capture a backtrace.
2481 * prof_realloc() will use the actual usize to decide whether to sample.
2483 if (alignment == 0) {
2484 usize_max = s2u(size+extra);
2485 assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS);
2487 usize_max = sa2u(size+extra, alignment);
2488 if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) {
2490 * usize_max is out of range, and chances are that
2491 * allocation will fail, but use the maximum possible
2492 * value and carry on with prof_alloc_prep(), just in
2493 * case allocation succeeds.
2495 usize_max = HUGE_MAXCLASS;
2498 tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
2500 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2501 usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
2502 size, extra, alignment, zero, tctx);
2504 usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2505 extra, alignment, zero);
2507 if (usize == old_usize) {
2508 prof_alloc_rollback(tsd, tctx, false);
2511 prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
2517 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2518 je_xallocx(void *ptr, size_t size, size_t extra, int flags)
2521 size_t usize, old_usize;
2522 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
2523 size_t alignment = MALLOCX_ALIGN_GET(flags);
2524 bool zero = flags & MALLOCX_ZERO;
2526 assert(ptr != NULL);
2528 assert(SIZE_T_MAX - size >= extra);
2529 assert(malloc_initialized() || IS_INITIALIZER);
2530 malloc_thread_init();
2532 witness_assert_lockless(tsd_tsdn(tsd));
2534 old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
2537 * The API explicitly absolves itself of protecting against (size +
2538 * extra) numerical overflow, but we may need to clamp extra to avoid
2539 * exceeding HUGE_MAXCLASS.
2541 * Ordinarily, size limit checking is handled deeper down, but here we
2542 * have to check as part of (size + extra) clamping, since we need the
2543 * clamped value in the above helper functions.
2545 if (unlikely(size > HUGE_MAXCLASS)) {
2547 goto label_not_resized;
2549 if (unlikely(HUGE_MAXCLASS - size < extra))
2550 extra = HUGE_MAXCLASS - size;
2552 if (config_valgrind && unlikely(in_valgrind))
2553 old_rzsize = u2rz(old_usize);
2555 if (config_prof && opt_prof) {
2556 usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
2559 usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2560 extra, alignment, zero);
2562 if (unlikely(usize == old_usize))
2563 goto label_not_resized;
2566 *tsd_thread_allocatedp_get(tsd) += usize;
2567 *tsd_thread_deallocatedp_get(tsd) += old_usize;
2569 JEMALLOC_VALGRIND_REALLOC(no, tsd_tsdn(tsd), ptr, usize, no, ptr,
2570 old_usize, old_rzsize, no, zero);
2572 UTRACE(ptr, size, ptr);
2573 witness_assert_lockless(tsd_tsdn(tsd));
2577 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2579 je_sallocx(const void *ptr, int flags)
2584 assert(malloc_initialized() || IS_INITIALIZER);
2585 malloc_thread_init();
2587 tsdn = tsdn_fetch();
2588 witness_assert_lockless(tsdn);
2590 if (config_ivsalloc)
2591 usize = ivsalloc(tsdn, ptr, config_prof);
2593 usize = isalloc(tsdn, ptr, config_prof);
2595 witness_assert_lockless(tsdn);
2599 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2600 je_dallocx(void *ptr, int flags)
2605 assert(ptr != NULL);
2606 assert(malloc_initialized() || IS_INITIALIZER);
2609 witness_assert_lockless(tsd_tsdn(tsd));
2610 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2611 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2614 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2616 tcache = tcache_get(tsd, false);
2619 if (likely(!malloc_slow))
2620 ifree(tsd, ptr, tcache, false);
2622 ifree(tsd, ptr, tcache, true);
2623 witness_assert_lockless(tsd_tsdn(tsd));
2626 JEMALLOC_ALWAYS_INLINE_C size_t
2627 inallocx(tsdn_t *tsdn, size_t size, int flags)
2631 witness_assert_lockless(tsdn);
2633 if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
2636 usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
2637 witness_assert_lockless(tsdn);
2641 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2642 je_sdallocx(void *ptr, size_t size, int flags)
2648 assert(ptr != NULL);
2649 assert(malloc_initialized() || IS_INITIALIZER);
2651 usize = inallocx(tsd_tsdn(tsd), size, flags);
2652 assert(usize == isalloc(tsd_tsdn(tsd), ptr, config_prof));
2654 witness_assert_lockless(tsd_tsdn(tsd));
2655 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2656 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2659 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2661 tcache = tcache_get(tsd, false);
2664 if (likely(!malloc_slow))
2665 isfree(tsd, ptr, usize, tcache, false);
2667 isfree(tsd, ptr, usize, tcache, true);
2668 witness_assert_lockless(tsd_tsdn(tsd));
2671 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2673 je_nallocx(size_t size, int flags)
2680 if (unlikely(malloc_init()))
2683 tsdn = tsdn_fetch();
2684 witness_assert_lockless(tsdn);
2686 usize = inallocx(tsdn, size, flags);
2687 if (unlikely(usize > HUGE_MAXCLASS))
2690 witness_assert_lockless(tsdn);
2694 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2695 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
2701 if (unlikely(malloc_init()))
2705 witness_assert_lockless(tsd_tsdn(tsd));
2706 ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
2707 witness_assert_lockless(tsd_tsdn(tsd));
2711 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2712 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
2717 if (unlikely(malloc_init()))
2720 tsdn = tsdn_fetch();
2721 witness_assert_lockless(tsdn);
2722 ret = ctl_nametomib(tsdn, name, mibp, miblenp);
2723 witness_assert_lockless(tsdn);
2727 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2728 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
2729 void *newp, size_t newlen)
2734 if (unlikely(malloc_init()))
2738 witness_assert_lockless(tsd_tsdn(tsd));
2739 ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
2740 witness_assert_lockless(tsd_tsdn(tsd));
2744 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2745 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
2750 tsdn = tsdn_fetch();
2751 witness_assert_lockless(tsdn);
2752 stats_print(write_cb, cbopaque, opts);
2753 witness_assert_lockless(tsdn);
2756 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2757 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
2762 assert(malloc_initialized() || IS_INITIALIZER);
2763 malloc_thread_init();
2765 tsdn = tsdn_fetch();
2766 witness_assert_lockless(tsdn);
2768 if (config_ivsalloc)
2769 ret = ivsalloc(tsdn, ptr, config_prof);
2771 ret = (ptr == NULL) ? 0 : isalloc(tsdn, ptr, config_prof);
2773 witness_assert_lockless(tsdn);
2778 * End non-standard functions.
2780 /******************************************************************************/
2782 * Begin compatibility functions.
2785 #define ALLOCM_LG_ALIGN(la) (la)
2786 #define ALLOCM_ALIGN(a) (ffsl(a)-1)
2787 #define ALLOCM_ZERO ((int)0x40)
2788 #define ALLOCM_NO_MOVE ((int)0x80)
2790 #define ALLOCM_SUCCESS 0
2791 #define ALLOCM_ERR_OOM 1
2792 #define ALLOCM_ERR_NOT_MOVED 2
2795 je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
2799 assert(ptr != NULL);
2801 p = je_mallocx(size, flags);
2803 return (ALLOCM_ERR_OOM);
2805 *rsize = isalloc(tsdn_fetch(), p, config_prof);
2807 return (ALLOCM_SUCCESS);
2811 je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
2814 bool no_move = flags & ALLOCM_NO_MOVE;
2816 assert(ptr != NULL);
2817 assert(*ptr != NULL);
2819 assert(SIZE_T_MAX - size >= extra);
2822 size_t usize = je_xallocx(*ptr, size, extra, flags);
2823 ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED;
2827 void *p = je_rallocx(*ptr, size+extra, flags);
2830 ret = ALLOCM_SUCCESS;
2832 ret = ALLOCM_ERR_OOM;
2834 *rsize = isalloc(tsdn_fetch(), *ptr, config_prof);
2840 je_sallocm(const void *ptr, size_t *rsize, int flags)
2843 assert(rsize != NULL);
2844 *rsize = je_sallocx(ptr, flags);
2845 return (ALLOCM_SUCCESS);
2849 je_dallocm(void *ptr, int flags)
2852 je_dallocx(ptr, flags);
2853 return (ALLOCM_SUCCESS);
2857 je_nallocm(size_t *rsize, size_t size, int flags)
2861 usize = je_nallocx(size, flags);
2863 return (ALLOCM_ERR_OOM);
2866 return (ALLOCM_SUCCESS);
2869 #undef ALLOCM_LG_ALIGN
2872 #undef ALLOCM_NO_MOVE
2874 #undef ALLOCM_SUCCESS
2875 #undef ALLOCM_ERR_OOM
2876 #undef ALLOCM_ERR_NOT_MOVED
2879 * End compatibility functions.
2881 /******************************************************************************/
2883 * The following functions are used by threading libraries for protection of
2884 * malloc during fork().
2888 * If an application creates a thread before doing any allocation in the main
2889 * thread, then calls fork(2) in the main thread followed by memory allocation
2890 * in the child process, a race can occur that results in deadlock within the
2891 * child: the main thread may have forked while the created thread had
2892 * partially initialized the allocator. Ordinarily jemalloc prevents
2893 * fork/malloc races via the following functions it registers during
2894 * initialization using pthread_atfork(), but of course that does no good if
2895 * the allocator isn't fully initialized at fork time. The following library
2896 * constructor is a partial solution to this problem. It may still be possible
2897 * to trigger the deadlock described above, but doing so would involve forking
2898 * via a library constructor that runs before jemalloc's runs.
2900 #ifndef JEMALLOC_JET
2901 JEMALLOC_ATTR(constructor)
2903 jemalloc_constructor(void)
2910 #ifndef JEMALLOC_MUTEX_INIT_CB
2912 jemalloc_prefork(void)
2914 JEMALLOC_EXPORT void
2915 _malloc_prefork(void)
2919 unsigned i, j, narenas;
2922 #ifdef JEMALLOC_MUTEX_INIT_CB
2923 if (!malloc_initialized())
2926 assert(malloc_initialized());
2930 narenas = narenas_total_get();
2932 witness_prefork(tsd);
2933 /* Acquire all mutexes in a safe order. */
2934 ctl_prefork(tsd_tsdn(tsd));
2935 malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
2936 prof_prefork0(tsd_tsdn(tsd));
2937 for (i = 0; i < 3; i++) {
2938 for (j = 0; j < narenas; j++) {
2939 if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
2943 arena_prefork0(tsd_tsdn(tsd), arena);
2946 arena_prefork1(tsd_tsdn(tsd), arena);
2949 arena_prefork2(tsd_tsdn(tsd), arena);
2951 default: not_reached();
2956 base_prefork(tsd_tsdn(tsd));
2957 for (i = 0; i < narenas; i++) {
2958 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
2959 arena_prefork3(tsd_tsdn(tsd), arena);
2961 prof_prefork1(tsd_tsdn(tsd));
2964 #ifndef JEMALLOC_MUTEX_INIT_CB
2966 jemalloc_postfork_parent(void)
2968 JEMALLOC_EXPORT void
2969 _malloc_postfork(void)
2973 unsigned i, narenas;
2975 #ifdef JEMALLOC_MUTEX_INIT_CB
2976 if (!malloc_initialized())
2979 assert(malloc_initialized());
2983 witness_postfork_parent(tsd);
2984 /* Release all mutexes, now that fork() has completed. */
2985 base_postfork_parent(tsd_tsdn(tsd));
2986 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
2989 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
2990 arena_postfork_parent(tsd_tsdn(tsd), arena);
2992 prof_postfork_parent(tsd_tsdn(tsd));
2993 malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
2994 ctl_postfork_parent(tsd_tsdn(tsd));
2998 jemalloc_postfork_child(void)
3001 unsigned i, narenas;
3003 assert(malloc_initialized());
3007 witness_postfork_child(tsd);
3008 /* Release all mutexes, now that fork() has completed. */
3009 base_postfork_child(tsd_tsdn(tsd));
3010 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
3013 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
3014 arena_postfork_child(tsd_tsdn(tsd), arena);
3016 prof_postfork_child(tsd_tsdn(tsd));
3017 malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
3018 ctl_postfork_child(tsd_tsdn(tsd));
3022 _malloc_first_thread(void)
3025 (void)malloc_mutex_first_thread();
3028 /******************************************************************************/