2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/atomic.h"
7 #include "jemalloc/internal/ctl.h"
8 #include "jemalloc/internal/extent_dss.h"
9 #include "jemalloc/internal/extent_mmap.h"
10 #include "jemalloc/internal/jemalloc_internal_types.h"
11 #include "jemalloc/internal/malloc_io.h"
12 #include "jemalloc/internal/mutex.h"
13 #include "jemalloc/internal/rtree.h"
14 #include "jemalloc/internal/size_classes.h"
15 #include "jemalloc/internal/spin.h"
16 #include "jemalloc/internal/sz.h"
17 #include "jemalloc/internal/ticker.h"
18 #include "jemalloc/internal/util.h"
20 /******************************************************************************/
23 /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */
24 const char *__malloc_options_1_0 = NULL;
25 __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
27 /* Runtime configuration options. */
28 const char *je_malloc_conf
47 const char *opt_junk =
48 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
55 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
62 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
69 bool opt_utrace = false;
70 bool opt_xmalloc = false;
71 bool opt_zero = false;
72 unsigned opt_narenas = 0;
76 /* Protects arenas initialization. */
77 malloc_mutex_t arenas_lock;
79 * Arenas that are used to service external requests. Not all elements of the
80 * arenas array are necessarily used; arenas are created lazily as needed.
82 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
83 * arenas. arenas[narenas_auto..narenas_total) are only used if the application
84 * takes some action to create them and allocate from them.
86 * Points to an arena_t.
88 JEMALLOC_ALIGNED(CACHELINE)
89 atomic_p_t arenas[MALLOCX_ARENA_LIMIT];
90 static atomic_u_t narenas_total; /* Use narenas_total_*(). */
91 static arena_t *a0; /* arenas[0]; read-only after initialization. */
92 unsigned narenas_auto; /* Read-only after initialization. */
95 malloc_init_uninitialized = 3,
96 malloc_init_a0_initialized = 2,
97 malloc_init_recursible = 1,
98 malloc_init_initialized = 0 /* Common case --> jnz. */
100 static malloc_init_t malloc_init_state = malloc_init_uninitialized;
102 /* False should be the common case. Set to true to trigger initialization. */
103 bool malloc_slow = true;
105 /* When malloc_slow is true, set the corresponding bits for sanity check. */
107 flag_opt_junk_alloc = (1U),
108 flag_opt_junk_free = (1U << 1),
109 flag_opt_zero = (1U << 2),
110 flag_opt_utrace = (1U << 3),
111 flag_opt_xmalloc = (1U << 4)
113 static uint8_t malloc_slow_flags;
115 #ifdef JEMALLOC_THREADED_INIT
116 /* Used to let the initializing thread recursively allocate. */
117 # define NO_INITIALIZER ((unsigned long)0)
118 # define INITIALIZER pthread_self()
119 # define IS_INITIALIZER (malloc_initializer == pthread_self())
120 static pthread_t malloc_initializer = NO_INITIALIZER;
122 # define NO_INITIALIZER false
123 # define INITIALIZER true
124 # define IS_INITIALIZER malloc_initializer
125 static bool malloc_initializer = NO_INITIALIZER;
128 /* Used to avoid initialization races. */
130 #if _WIN32_WINNT >= 0x0600
131 static malloc_mutex_t init_lock = SRWLOCK_INIT;
133 static malloc_mutex_t init_lock;
134 static bool init_lock_initialized = false;
136 JEMALLOC_ATTR(constructor)
138 _init_init_lock(void) {
140 * If another constructor in the same binary is using mallctl to e.g.
141 * set up extent hooks, it may end up running before this one, and
142 * malloc_init_hard will crash trying to lock the uninitialized lock. So
143 * we force an initialization of the lock in malloc_init_hard as well.
144 * We don't try to care about atomicity of the accessed to the
145 * init_lock_initialized boolean, since it really only matters early in
146 * the process creation, before any separate thread normally starts
149 if (!init_lock_initialized) {
150 malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT,
151 malloc_mutex_rank_exclusive);
153 init_lock_initialized = true;
157 # pragma section(".CRT$XCU", read)
158 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
159 static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
163 static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
167 void *p; /* Input pointer (as in realloc(p, s)). */
168 size_t s; /* Request size. */
169 void *r; /* Result pointer. */
172 #ifdef JEMALLOC_UTRACE
173 # define UTRACE(a, b, c) do { \
174 if (unlikely(opt_utrace)) { \
175 int utrace_serrno = errno; \
176 malloc_utrace_t ut; \
180 utrace(&ut, sizeof(ut)); \
181 errno = utrace_serrno; \
185 # define UTRACE(a, b, c)
188 /* Whether encountered any invalid config options. */
189 static bool had_conf_error = false;
191 /******************************************************************************/
193 * Function prototypes for static functions that are referenced prior to
197 static bool malloc_init_hard_a0(void);
198 static bool malloc_init_hard(void);
200 /******************************************************************************/
202 * Begin miscellaneous support functions.
206 malloc_initialized(void) {
207 return (malloc_init_state == malloc_init_initialized);
210 JEMALLOC_ALWAYS_INLINE bool
211 malloc_init_a0(void) {
212 if (unlikely(malloc_init_state == malloc_init_uninitialized)) {
213 return malloc_init_hard_a0();
218 JEMALLOC_ALWAYS_INLINE bool
220 if (unlikely(!malloc_initialized()) && malloc_init_hard()) {
227 * The a0*() functions are used instead of i{d,}alloc() in situations that
228 * cannot tolerate TLS variable access.
232 a0ialloc(size_t size, bool zero, bool is_internal) {
233 if (unlikely(malloc_init_a0())) {
237 return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL,
238 is_internal, arena_get(TSDN_NULL, 0, true), true);
242 a0idalloc(void *ptr, bool is_internal) {
243 idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true);
247 a0malloc(size_t size) {
248 return a0ialloc(size, false, true);
252 a0dalloc(void *ptr) {
253 a0idalloc(ptr, true);
257 * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
258 * situations that cannot tolerate TLS variable access (TLS allocation and very
259 * early internal data structure initialization).
263 bootstrap_malloc(size_t size) {
264 if (unlikely(size == 0)) {
268 return a0ialloc(size, false, false);
272 bootstrap_calloc(size_t num, size_t size) {
275 num_size = num * size;
276 if (unlikely(num_size == 0)) {
277 assert(num == 0 || size == 0);
281 return a0ialloc(num_size, true, false);
285 bootstrap_free(void *ptr) {
286 if (unlikely(ptr == NULL)) {
290 a0idalloc(ptr, false);
294 arena_set(unsigned ind, arena_t *arena) {
295 atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE);
299 narenas_total_set(unsigned narenas) {
300 atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE);
304 narenas_total_inc(void) {
305 atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE);
309 narenas_total_get(void) {
310 return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE);
313 /* Create a new arena and insert it into the arenas array at index ind. */
315 arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
318 assert(ind <= narenas_total_get());
319 if (ind >= MALLOCX_ARENA_LIMIT) {
322 if (ind == narenas_total_get()) {
327 * Another thread may have already initialized arenas[ind] if it's an
330 arena = arena_get(tsdn, ind, false);
332 assert(ind < narenas_auto);
336 /* Actually initialize the arena. */
337 arena = arena_new(tsdn, ind, extent_hooks);
343 arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) {
347 if (have_background_thread) {
349 malloc_mutex_lock(tsdn, &background_thread_lock);
350 err = background_thread_create(tsdn_tsd(tsdn), ind);
351 malloc_mutex_unlock(tsdn, &background_thread_lock);
353 malloc_printf("<jemalloc>: error in background thread "
354 "creation for arena %u. Abort.\n", ind);
361 arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
364 malloc_mutex_lock(tsdn, &arenas_lock);
365 arena = arena_init_locked(tsdn, ind, extent_hooks);
366 malloc_mutex_unlock(tsdn, &arenas_lock);
368 arena_new_create_background_thread(tsdn, ind);
374 arena_bind(tsd_t *tsd, unsigned ind, bool internal) {
375 arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false);
376 arena_nthreads_inc(arena, internal);
379 tsd_iarena_set(tsd, arena);
381 tsd_arena_set(tsd, arena);
386 arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) {
387 arena_t *oldarena, *newarena;
389 oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
390 newarena = arena_get(tsd_tsdn(tsd), newind, false);
391 arena_nthreads_dec(oldarena, false);
392 arena_nthreads_inc(newarena, false);
393 tsd_arena_set(tsd, newarena);
397 arena_unbind(tsd_t *tsd, unsigned ind, bool internal) {
400 arena = arena_get(tsd_tsdn(tsd), ind, false);
401 arena_nthreads_dec(arena, internal);
404 tsd_iarena_set(tsd, NULL);
406 tsd_arena_set(tsd, NULL);
411 arena_tdata_get_hard(tsd_t *tsd, unsigned ind) {
412 arena_tdata_t *tdata, *arenas_tdata_old;
413 arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
414 unsigned narenas_tdata_old, i;
415 unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
416 unsigned narenas_actual = narenas_total_get();
419 * Dissociate old tdata array (and set up for deallocation upon return)
422 if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
423 arenas_tdata_old = arenas_tdata;
424 narenas_tdata_old = narenas_tdata;
427 tsd_arenas_tdata_set(tsd, arenas_tdata);
428 tsd_narenas_tdata_set(tsd, narenas_tdata);
430 arenas_tdata_old = NULL;
431 narenas_tdata_old = 0;
434 /* Allocate tdata array if it's missing. */
435 if (arenas_tdata == NULL) {
436 bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
437 narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
439 if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
440 *arenas_tdata_bypassp = true;
441 arenas_tdata = (arena_tdata_t *)a0malloc(
442 sizeof(arena_tdata_t) * narenas_tdata);
443 *arenas_tdata_bypassp = false;
445 if (arenas_tdata == NULL) {
449 assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
450 tsd_arenas_tdata_set(tsd, arenas_tdata);
451 tsd_narenas_tdata_set(tsd, narenas_tdata);
455 * Copy to tdata array. It's possible that the actual number of arenas
456 * has increased since narenas_total_get() was called above, but that
457 * causes no correctness issues unless two threads concurrently execute
458 * the arenas.create mallctl, which we trust mallctl synchronization to
462 /* Copy/initialize tickers. */
463 for (i = 0; i < narenas_actual; i++) {
464 if (i < narenas_tdata_old) {
465 ticker_copy(&arenas_tdata[i].decay_ticker,
466 &arenas_tdata_old[i].decay_ticker);
468 ticker_init(&arenas_tdata[i].decay_ticker,
469 DECAY_NTICKS_PER_UPDATE);
472 if (narenas_tdata > narenas_actual) {
473 memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
474 * (narenas_tdata - narenas_actual));
477 /* Read the refreshed tdata array. */
478 tdata = &arenas_tdata[ind];
480 if (arenas_tdata_old != NULL) {
481 a0dalloc(arenas_tdata_old);
486 /* Slow path, called only by arena_choose(). */
488 arena_choose_hard(tsd_t *tsd, bool internal) {
489 arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
491 if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
492 unsigned choose = percpu_arena_choose();
493 ret = arena_get(tsd_tsdn(tsd), choose, true);
495 arena_bind(tsd, arena_ind_get(ret), false);
496 arena_bind(tsd, arena_ind_get(ret), true);
501 if (narenas_auto > 1) {
502 unsigned i, j, choose[2], first_null;
503 bool is_new_arena[2];
506 * Determine binding for both non-internal and internal
509 * choose[0]: For application allocation.
510 * choose[1]: For internal metadata allocation.
513 for (j = 0; j < 2; j++) {
515 is_new_arena[j] = false;
518 first_null = narenas_auto;
519 malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
520 assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
521 for (i = 1; i < narenas_auto; i++) {
522 if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
524 * Choose the first arena that has the lowest
525 * number of threads assigned to it.
527 for (j = 0; j < 2; j++) {
528 if (arena_nthreads_get(arena_get(
529 tsd_tsdn(tsd), i, false), !!j) <
530 arena_nthreads_get(arena_get(
531 tsd_tsdn(tsd), choose[j], false),
536 } else if (first_null == narenas_auto) {
538 * Record the index of the first uninitialized
539 * arena, in case all extant arenas are in use.
541 * NB: It is possible for there to be
542 * discontinuities in terms of initialized
543 * versus uninitialized arenas, due to the
544 * "thread.arena" mallctl.
550 for (j = 0; j < 2; j++) {
551 if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
552 choose[j], false), !!j) == 0 || first_null ==
555 * Use an unloaded arena, or the least loaded
556 * arena if all arenas are already initialized.
558 if (!!j == internal) {
559 ret = arena_get(tsd_tsdn(tsd),
565 /* Initialize a new arena. */
566 choose[j] = first_null;
567 arena = arena_init_locked(tsd_tsdn(tsd),
569 (extent_hooks_t *)&extent_hooks_default);
571 malloc_mutex_unlock(tsd_tsdn(tsd),
575 is_new_arena[j] = true;
576 if (!!j == internal) {
580 arena_bind(tsd, choose[j], !!j);
582 malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
584 for (j = 0; j < 2; j++) {
585 if (is_new_arena[j]) {
586 assert(choose[j] > 0);
587 arena_new_create_background_thread(
588 tsd_tsdn(tsd), choose[j]);
593 ret = arena_get(tsd_tsdn(tsd), 0, false);
594 arena_bind(tsd, 0, false);
595 arena_bind(tsd, 0, true);
602 iarena_cleanup(tsd_t *tsd) {
605 iarena = tsd_iarena_get(tsd);
606 if (iarena != NULL) {
607 arena_unbind(tsd, arena_ind_get(iarena), true);
612 arena_cleanup(tsd_t *tsd) {
615 arena = tsd_arena_get(tsd);
617 arena_unbind(tsd, arena_ind_get(arena), false);
622 arenas_tdata_cleanup(tsd_t *tsd) {
623 arena_tdata_t *arenas_tdata;
625 /* Prevent tsd->arenas_tdata from being (re)created. */
626 *tsd_arenas_tdata_bypassp_get(tsd) = true;
628 arenas_tdata = tsd_arenas_tdata_get(tsd);
629 if (arenas_tdata != NULL) {
630 tsd_arenas_tdata_set(tsd, NULL);
631 a0dalloc(arenas_tdata);
636 stats_print_atexit(void) {
644 * Merge stats from extant threads. This is racy, since
645 * individual threads do not lock when recording tcache stats
646 * events. As a consequence, the final stats may be slightly
647 * out of date by the time they are reported, if other threads
648 * continue to allocate.
650 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
651 arena_t *arena = arena_get(tsdn, i, false);
655 malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
656 ql_foreach(tcache, &arena->tcache_ql, link) {
657 tcache_stats_merge(tsdn, tcache, arena);
659 malloc_mutex_unlock(tsdn,
660 &arena->tcache_ql_mtx);
664 je_malloc_stats_print(NULL, NULL, opt_stats_print_opts);
668 * Ensure that we don't hold any locks upon entry to or exit from allocator
669 * code (in a "broad" sense that doesn't count a reentrant allocation as an
672 JEMALLOC_ALWAYS_INLINE void
673 check_entry_exit_locking(tsdn_t *tsdn) {
677 if (tsdn_null(tsdn)) {
680 tsd_t *tsd = tsdn_tsd(tsdn);
682 * It's possible we hold locks at entry/exit if we're in a nested
685 int8_t reentrancy_level = tsd_reentrancy_level_get(tsd);
686 if (reentrancy_level != 0) {
689 witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
693 * End miscellaneous support functions.
695 /******************************************************************************/
697 * Begin initialization functions.
701 jemalloc_secure_getenv(const char *name) {
702 #ifdef JEMALLOC_HAVE_SECURE_GETENV
703 return secure_getenv(name);
705 # ifdef JEMALLOC_HAVE_ISSETUGID
706 if (issetugid() != 0) {
721 result = si.dwNumberOfProcessors;
722 #elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
724 * glibc >= 2.6 has the CPU_COUNT macro.
726 * glibc's sysconf() uses isspace(). glibc allocates for the first time
727 * *before* setting up the isspace tables. Therefore we need a
728 * different method to get the number of CPUs.
733 pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
734 result = CPU_COUNT(&set);
737 result = sysconf(_SC_NPROCESSORS_ONLN);
739 return ((result == -1) ? 1 : (unsigned)result);
743 init_opt_stats_print_opts(const char *v, size_t vlen) {
744 size_t opts_len = strlen(opt_stats_print_opts);
745 assert(opts_len <= stats_print_tot_num_options);
747 for (size_t i = 0; i < vlen; i++) {
749 #define OPTION(o, v, d, s) case o: break;
755 if (strchr(opt_stats_print_opts, v[i]) != NULL) {
756 /* Ignore repeated. */
760 opt_stats_print_opts[opts_len++] = v[i];
761 opt_stats_print_opts[opts_len] = '\0';
762 assert(opts_len <= stats_print_tot_num_options);
764 assert(opts_len == strlen(opt_stats_print_opts));
768 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
769 char const **v_p, size_t *vlen_p) {
771 const char *opts = *opts_p;
775 for (accept = false; !accept;) {
777 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
778 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
779 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
780 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
782 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
783 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
784 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
785 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
787 case '0': case '1': case '2': case '3': case '4': case '5':
788 case '6': case '7': case '8': case '9':
794 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
799 if (opts != *opts_p) {
800 malloc_write("<jemalloc>: Conf string ends "
805 malloc_write("<jemalloc>: Malformed conf string\n");
810 for (accept = false; !accept;) {
815 * Look ahead one character here, because the next time
816 * this function is called, it will assume that end of
817 * input has been cleanly reached if no input remains,
818 * but we have optimistically already consumed the
819 * comma if one exists.
822 malloc_write("<jemalloc>: Conf string ends "
825 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
829 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
843 malloc_abort_invalid_conf(void) {
844 assert(opt_abort_conf);
845 malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf "
846 "value (see above).\n");
851 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
853 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
855 had_conf_error = true;
856 if (opt_abort_conf) {
857 malloc_abort_invalid_conf();
862 malloc_slow_flag_init(void) {
864 * Combine the runtime options into malloc_slow for fast path. Called
865 * after processing all the options.
867 malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
868 | (opt_junk_free ? flag_opt_junk_free : 0)
869 | (opt_zero ? flag_opt_zero : 0)
870 | (opt_utrace ? flag_opt_utrace : 0)
871 | (opt_xmalloc ? flag_opt_xmalloc : 0);
873 malloc_slow = (malloc_slow_flags != 0);
877 malloc_conf_init(void) {
879 char buf[PATH_MAX + 1];
880 const char *opts, *k, *v;
883 for (i = 0; i < 4; i++) {
884 /* Get runtime configuration. */
887 opts = config_malloc_conf;
890 if (je_malloc_conf != NULL) {
892 * Use options that were compiled into the
895 opts = je_malloc_conf;
897 /* No configuration specified. */
905 int saved_errno = errno;
906 const char *linkname =
907 # ifdef JEMALLOC_PREFIX
908 "/etc/"JEMALLOC_PREFIX"malloc.conf"
915 * Try to use the contents of the "/etc/malloc.conf"
916 * symbolic link's name.
918 linklen = readlink(linkname, buf, sizeof(buf) - 1);
920 /* No configuration specified. */
923 set_errno(saved_errno);
930 const char *envname =
931 #ifdef JEMALLOC_PREFIX
932 JEMALLOC_CPREFIX"MALLOC_CONF"
938 if ((opts = jemalloc_secure_getenv(envname)) != NULL) {
940 * Do nothing; opts is already initialized to
941 * the value of the MALLOC_CONF environment
945 /* No configuration specified. */
956 while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
958 #define CONF_MATCH(n) \
959 (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
960 #define CONF_MATCH_VALUE(n) \
961 (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
962 #define CONF_HANDLE_BOOL(o, n) \
963 if (CONF_MATCH(n)) { \
964 if (CONF_MATCH_VALUE("true")) { \
966 } else if (CONF_MATCH_VALUE("false")) { \
970 "Invalid conf value", \
975 #define CONF_MIN_no(um, min) false
976 #define CONF_MIN_yes(um, min) ((um) < (min))
977 #define CONF_MAX_no(um, max) false
978 #define CONF_MAX_yes(um, max) ((um) > (max))
979 #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
980 if (CONF_MATCH(n)) { \
985 um = malloc_strtoumax(v, &end, 0); \
986 if (get_errno() != 0 || (uintptr_t)end -\
987 (uintptr_t)v != vlen) { \
989 "Invalid conf value", \
992 if (CONF_MIN_##check_min(um, \
996 CONF_MAX_##check_max(um, \
1003 if (CONF_MIN_##check_min(um, \
1005 CONF_MAX_##check_max(um, \
1007 malloc_conf_error( \
1010 k, klen, v, vlen); \
1017 #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
1019 CONF_HANDLE_T_U(unsigned, o, n, min, max, \
1020 check_min, check_max, clip)
1021 #define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \
1022 CONF_HANDLE_T_U(size_t, o, n, min, max, \
1023 check_min, check_max, clip)
1024 #define CONF_HANDLE_SSIZE_T(o, n, min, max) \
1025 if (CONF_MATCH(n)) { \
1030 l = strtol(v, &end, 0); \
1031 if (get_errno() != 0 || (uintptr_t)end -\
1032 (uintptr_t)v != vlen) { \
1033 malloc_conf_error( \
1034 "Invalid conf value", \
1035 k, klen, v, vlen); \
1036 } else if (l < (ssize_t)(min) || l > \
1038 malloc_conf_error( \
1039 "Out-of-range conf value", \
1040 k, klen, v, vlen); \
1046 #define CONF_HANDLE_CHAR_P(o, n, d) \
1047 if (CONF_MATCH(n)) { \
1048 size_t cpylen = (vlen <= \
1049 sizeof(o)-1) ? vlen : \
1051 strncpy(o, v, cpylen); \
1056 CONF_HANDLE_BOOL(opt_abort, "abort")
1057 CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf")
1058 if (opt_abort_conf && had_conf_error) {
1059 malloc_abort_invalid_conf();
1061 CONF_HANDLE_BOOL(opt_retain, "retain")
1062 if (strncmp("dss", k, klen) == 0) {
1065 for (i = 0; i < dss_prec_limit; i++) {
1066 if (strncmp(dss_prec_names[i], v, vlen)
1068 if (extent_dss_prec_set(i)) {
1070 "Error setting dss",
1081 malloc_conf_error("Invalid conf value",
1086 CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
1087 UINT_MAX, yes, no, false)
1088 CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms,
1089 "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
1090 QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
1092 CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms,
1093 "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
1094 QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
1096 CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
1097 if (CONF_MATCH("stats_print_opts")) {
1098 init_opt_stats_print_opts(v, vlen);
1102 if (CONF_MATCH("junk")) {
1103 if (CONF_MATCH_VALUE("true")) {
1105 opt_junk_alloc = opt_junk_free =
1107 } else if (CONF_MATCH_VALUE("false")) {
1109 opt_junk_alloc = opt_junk_free =
1111 } else if (CONF_MATCH_VALUE("alloc")) {
1113 opt_junk_alloc = true;
1114 opt_junk_free = false;
1115 } else if (CONF_MATCH_VALUE("free")) {
1117 opt_junk_alloc = false;
1118 opt_junk_free = true;
1121 "Invalid conf value", k,
1126 CONF_HANDLE_BOOL(opt_zero, "zero")
1128 if (config_utrace) {
1129 CONF_HANDLE_BOOL(opt_utrace, "utrace")
1131 if (config_xmalloc) {
1132 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
1134 CONF_HANDLE_BOOL(opt_tcache, "tcache")
1135 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max",
1136 -1, (sizeof(size_t) << 3) - 1)
1137 if (strncmp("percpu_arena", k, klen) == 0) {
1140 for (i = percpu_arena_mode_names_base; i <
1141 percpu_arena_mode_names_limit; i++) {
1142 if (strncmp(percpu_arena_mode_names[i],
1144 if (!have_percpu_arena) {
1146 "No getcpu support",
1149 opt_percpu_arena = i;
1155 malloc_conf_error("Invalid conf value",
1160 CONF_HANDLE_BOOL(opt_background_thread,
1161 "background_thread");
1163 CONF_HANDLE_BOOL(opt_prof, "prof")
1164 CONF_HANDLE_CHAR_P(opt_prof_prefix,
1165 "prof_prefix", "jeprof")
1166 CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
1167 CONF_HANDLE_BOOL(opt_prof_thread_active_init,
1168 "prof_thread_active_init")
1169 CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
1170 "lg_prof_sample", 0, (sizeof(uint64_t) << 3)
1172 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
1173 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
1174 "lg_prof_interval", -1,
1175 (sizeof(uint64_t) << 3) - 1)
1176 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
1177 CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
1178 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
1180 malloc_conf_error("Invalid conf pair", k, klen, v,
1183 #undef CONF_MATCH_VALUE
1184 #undef CONF_HANDLE_BOOL
1189 #undef CONF_HANDLE_T_U
1190 #undef CONF_HANDLE_UNSIGNED
1191 #undef CONF_HANDLE_SIZE_T
1192 #undef CONF_HANDLE_SSIZE_T
1193 #undef CONF_HANDLE_CHAR_P
1199 malloc_init_hard_needed(void) {
1200 if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
1201 malloc_init_recursible)) {
1203 * Another thread initialized the allocator before this one
1204 * acquired init_lock, or this thread is the initializing
1205 * thread, and it is recursively allocating.
1209 #ifdef JEMALLOC_THREADED_INIT
1210 if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
1211 /* Busy-wait until the initializing thread completes. */
1212 spin_t spinner = SPIN_INITIALIZER;
1214 malloc_mutex_unlock(TSDN_NULL, &init_lock);
1215 spin_adaptive(&spinner);
1216 malloc_mutex_lock(TSDN_NULL, &init_lock);
1217 } while (!malloc_initialized());
1225 malloc_init_hard_a0_locked() {
1226 malloc_initializer = INITIALIZER;
1232 if (opt_stats_print) {
1233 /* Print statistics at exit. */
1234 if (atexit(stats_print_atexit) != 0) {
1235 malloc_write("<jemalloc>: Error in atexit()\n");
1244 if (base_boot(TSDN_NULL)) {
1247 if (extent_boot()) {
1257 if (tcache_boot(TSDN_NULL)) {
1260 if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS,
1261 malloc_mutex_rank_exclusive)) {
1265 * Create enough scaffolding to allow recursive allocation in
1269 memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
1271 * Initialize one arena here. The rest are lazily created in
1272 * arena_choose_hard().
1274 if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default)
1278 a0 = arena_get(TSDN_NULL, 0, false);
1279 malloc_init_state = malloc_init_a0_initialized;
1285 malloc_init_hard_a0(void) {
1288 malloc_mutex_lock(TSDN_NULL, &init_lock);
1289 ret = malloc_init_hard_a0_locked();
1290 malloc_mutex_unlock(TSDN_NULL, &init_lock);
1294 /* Initialize data structures which may trigger recursive allocation. */
1296 malloc_init_hard_recursible(void) {
1297 malloc_init_state = malloc_init_recursible;
1299 ncpus = malloc_ncpus();
1301 #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
1302 && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
1303 !defined(__native_client__))
1304 /* LinuxThreads' pthread_atfork() allocates. */
1305 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
1306 jemalloc_postfork_child) != 0) {
1307 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1315 if (background_thread_boot0()) {
1323 malloc_narenas_default(void) {
1326 * For SMP systems, create more than one arena per CPU by
1336 static percpu_arena_mode_t
1337 percpu_arena_as_initialized(percpu_arena_mode_t mode) {
1338 assert(!malloc_initialized());
1339 assert(mode <= percpu_arena_disabled);
1341 if (mode != percpu_arena_disabled) {
1342 mode += percpu_arena_mode_enabled_base;
1349 malloc_init_narenas(void) {
1352 if (opt_percpu_arena != percpu_arena_disabled) {
1353 if (!have_percpu_arena || malloc_getcpu() < 0) {
1354 opt_percpu_arena = percpu_arena_disabled;
1355 malloc_printf("<jemalloc>: perCPU arena getcpu() not "
1356 "available. Setting narenas to %u.\n", opt_narenas ?
1357 opt_narenas : malloc_narenas_default());
1362 if (ncpus >= MALLOCX_ARENA_LIMIT) {
1363 malloc_printf("<jemalloc>: narenas w/ percpu"
1364 "arena beyond limit (%d)\n", ncpus);
1370 /* NB: opt_percpu_arena isn't fully initialized yet. */
1371 if (percpu_arena_as_initialized(opt_percpu_arena) ==
1372 per_phycpu_arena && ncpus % 2 != 0) {
1373 malloc_printf("<jemalloc>: invalid "
1374 "configuration -- per physical CPU arena "
1375 "with odd number (%u) of CPUs (no hyper "
1376 "threading?).\n", ncpus);
1380 unsigned n = percpu_arena_ind_limit(
1381 percpu_arena_as_initialized(opt_percpu_arena));
1382 if (opt_narenas < n) {
1384 * If narenas is specified with percpu_arena
1385 * enabled, actual narenas is set as the greater
1386 * of the two. percpu_arena_choose will be free
1387 * to use any of the arenas based on CPU
1388 * id. This is conservative (at a small cost)
1389 * but ensures correctness.
1391 * If for some reason the ncpus determined at
1392 * boot is not the actual number (e.g. because
1393 * of affinity setting from numactl), reserving
1394 * narenas this way provides a workaround for
1401 if (opt_narenas == 0) {
1402 opt_narenas = malloc_narenas_default();
1404 assert(opt_narenas > 0);
1406 narenas_auto = opt_narenas;
1408 * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
1410 if (narenas_auto >= MALLOCX_ARENA_LIMIT) {
1411 narenas_auto = MALLOCX_ARENA_LIMIT - 1;
1412 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
1415 narenas_total_set(narenas_auto);
1421 malloc_init_percpu(void) {
1422 opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena);
1426 malloc_init_hard_finish(void) {
1427 if (malloc_mutex_boot()) {
1431 malloc_init_state = malloc_init_initialized;
1432 malloc_slow_flag_init();
1438 malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) {
1439 malloc_mutex_assert_owner(tsdn, &init_lock);
1440 malloc_mutex_unlock(tsdn, &init_lock);
1441 if (reentrancy_set) {
1442 assert(!tsdn_null(tsdn));
1443 tsd_t *tsd = tsdn_tsd(tsdn);
1444 assert(tsd_reentrancy_level_get(tsd) > 0);
1445 post_reentrancy(tsd);
1450 malloc_init_hard(void) {
1453 #if defined(_WIN32) && _WIN32_WINNT < 0x0600
1456 malloc_mutex_lock(TSDN_NULL, &init_lock);
1458 #define UNLOCK_RETURN(tsdn, ret, reentrancy) \
1459 malloc_init_hard_cleanup(tsdn, reentrancy); \
1462 if (!malloc_init_hard_needed()) {
1463 UNLOCK_RETURN(TSDN_NULL, false, false)
1466 if (malloc_init_state != malloc_init_a0_initialized &&
1467 malloc_init_hard_a0_locked()) {
1468 UNLOCK_RETURN(TSDN_NULL, true, false)
1471 malloc_mutex_unlock(TSDN_NULL, &init_lock);
1472 /* Recursive allocation relies on functional tsd. */
1473 tsd = malloc_tsd_boot0();
1477 if (malloc_init_hard_recursible()) {
1481 malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
1482 /* Set reentrancy level to 1 during init. */
1483 pre_reentrancy(tsd, NULL);
1484 /* Initialize narenas before prof_boot2 (for allocation). */
1485 if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) {
1486 UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1488 if (config_prof && prof_boot2(tsd)) {
1489 UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1492 malloc_init_percpu();
1494 if (malloc_init_hard_finish()) {
1495 UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1497 post_reentrancy(tsd);
1498 malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
1501 /* Update TSD after tsd_boot1. */
1503 if (opt_background_thread) {
1504 assert(have_background_thread);
1506 * Need to finish init & unlock first before creating background
1507 * threads (pthread_create depends on malloc).
1509 malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
1510 bool err = background_thread_create(tsd, 0);
1511 malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
1516 #undef UNLOCK_RETURN
1521 * End initialization functions.
1523 /******************************************************************************/
1525 * Begin allocation-path internal functions and data structures.
1529 * Settings determined by the documented behavior of the allocation functions.
1531 typedef struct static_opts_s static_opts_t;
1532 struct static_opts_s {
1533 /* Whether or not allocation size may overflow. */
1535 /* Whether or not allocations of size 0 should be treated as size 1. */
1536 bool bump_empty_alloc;
1538 * Whether to assert that allocations are not of size 0 (after any
1541 bool assert_nonempty_alloc;
1544 * Whether or not to modify the 'result' argument to malloc in case of
1547 bool null_out_result_on_error;
1548 /* Whether to set errno when we encounter an error condition. */
1549 bool set_errno_on_error;
1552 * The minimum valid alignment for functions requesting aligned storage.
1554 size_t min_alignment;
1556 /* The error string to use if we oom. */
1557 const char *oom_string;
1558 /* The error string to use if the passed-in alignment is invalid. */
1559 const char *invalid_alignment_string;
1562 * False if we're configured to skip some time-consuming operations.
1564 * This isn't really a malloc "behavior", but it acts as a useful
1565 * summary of several other static (or at least, static after program
1566 * initialization) options.
1571 JEMALLOC_ALWAYS_INLINE void
1572 static_opts_init(static_opts_t *static_opts) {
1573 static_opts->may_overflow = false;
1574 static_opts->bump_empty_alloc = false;
1575 static_opts->assert_nonempty_alloc = false;
1576 static_opts->null_out_result_on_error = false;
1577 static_opts->set_errno_on_error = false;
1578 static_opts->min_alignment = 0;
1579 static_opts->oom_string = "";
1580 static_opts->invalid_alignment_string = "";
1581 static_opts->slow = false;
1585 * These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we
1586 * should have one constant here per magic value there. Note however that the
1587 * representations need not be related.
1589 #define TCACHE_IND_NONE ((unsigned)-1)
1590 #define TCACHE_IND_AUTOMATIC ((unsigned)-2)
1591 #define ARENA_IND_AUTOMATIC ((unsigned)-1)
1593 typedef struct dynamic_opts_s dynamic_opts_t;
1594 struct dynamic_opts_s {
1600 unsigned tcache_ind;
1604 JEMALLOC_ALWAYS_INLINE void
1605 dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
1606 dynamic_opts->result = NULL;
1607 dynamic_opts->num_items = 0;
1608 dynamic_opts->item_size = 0;
1609 dynamic_opts->alignment = 0;
1610 dynamic_opts->zero = false;
1611 dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC;
1612 dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC;
1615 /* ind is ignored if dopts->alignment > 0. */
1616 JEMALLOC_ALWAYS_INLINE void *
1617 imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
1618 size_t size, size_t usize, szind_t ind) {
1622 /* Fill in the tcache. */
1623 if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) {
1624 if (likely(!sopts->slow)) {
1625 /* Getting tcache ptr unconditionally. */
1626 tcache = tsd_tcachep_get(tsd);
1627 assert(tcache == tcache_get(tsd));
1629 tcache = tcache_get(tsd);
1631 } else if (dopts->tcache_ind == TCACHE_IND_NONE) {
1634 tcache = tcaches_get(tsd, dopts->tcache_ind);
1637 /* Fill in the arena. */
1638 if (dopts->arena_ind == ARENA_IND_AUTOMATIC) {
1640 * In case of automatic arena management, we defer arena
1641 * computation until as late as we can, hoping to fill the
1642 * allocation out of the tcache.
1646 arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true);
1649 if (unlikely(dopts->alignment != 0)) {
1650 return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment,
1651 dopts->zero, tcache, arena);
1654 return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false,
1655 arena, sopts->slow);
1658 JEMALLOC_ALWAYS_INLINE void *
1659 imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
1660 size_t usize, szind_t ind) {
1664 * For small allocations, sampling bumps the usize. If so, we allocate
1665 * from the ind_large bucket.
1668 size_t bumped_usize = usize;
1670 if (usize <= SMALL_MAXCLASS) {
1671 assert(((dopts->alignment == 0) ? sz_s2u(LARGE_MINCLASS) :
1672 sz_sa2u(LARGE_MINCLASS, dopts->alignment))
1674 ind_large = sz_size2index(LARGE_MINCLASS);
1675 bumped_usize = sz_s2u(LARGE_MINCLASS);
1676 ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
1677 bumped_usize, ind_large);
1678 if (unlikely(ret == NULL)) {
1681 arena_prof_promote(tsd_tsdn(tsd), ret, usize);
1683 ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind);
1690 * Returns true if the allocation will overflow, and false otherwise. Sets
1691 * *size to the product either way.
1693 JEMALLOC_ALWAYS_INLINE bool
1694 compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts,
1697 * This function is just num_items * item_size, except that we may have
1698 * to check for overflow.
1701 if (!may_overflow) {
1702 assert(dopts->num_items == 1);
1703 *size = dopts->item_size;
1707 /* A size_t with its high-half bits all set to 1. */
1708 const static size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2);
1710 *size = dopts->item_size * dopts->num_items;
1712 if (unlikely(*size == 0)) {
1713 return (dopts->num_items != 0 && dopts->item_size != 0);
1717 * We got a non-zero size, but we don't know if we overflowed to get
1718 * there. To avoid having to do a divide, we'll be clever and note that
1719 * if both A and B can be represented in N/2 bits, then their product
1720 * can be represented in N bits (without the possibility of overflow).
1722 if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) {
1725 if (likely(*size / dopts->item_size == dopts->num_items)) {
1731 JEMALLOC_ALWAYS_INLINE int
1732 imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
1733 /* Where the actual allocated memory will live. */
1734 void *allocation = NULL;
1735 /* Filled in by compute_size_with_overflow below. */
1738 * For unaligned allocations, we need only ind. For aligned
1739 * allocations, or in case of stats or profiling we need usize.
1741 * These are actually dead stores, in that their values are reset before
1742 * any branch on their value is taken. Sometimes though, it's
1743 * convenient to pass them as arguments before this point. To avoid
1744 * undefined behavior then, we initialize them with dummy stores.
1749 /* Reentrancy is only checked on slow path. */
1750 int8_t reentrancy_level;
1752 /* Compute the amount of memory the user wants. */
1753 if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts,
1758 /* Validate the user input. */
1759 if (sopts->bump_empty_alloc) {
1760 if (unlikely(size == 0)) {
1765 if (sopts->assert_nonempty_alloc) {
1769 if (unlikely(dopts->alignment < sopts->min_alignment
1770 || (dopts->alignment & (dopts->alignment - 1)) != 0)) {
1771 goto label_invalid_alignment;
1774 /* This is the beginning of the "core" algorithm. */
1776 if (dopts->alignment == 0) {
1777 ind = sz_size2index(size);
1778 if (unlikely(ind >= NSIZES)) {
1781 if (config_stats || (config_prof && opt_prof)) {
1782 usize = sz_index2size(ind);
1783 assert(usize > 0 && usize <= LARGE_MAXCLASS);
1786 usize = sz_sa2u(size, dopts->alignment);
1787 if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
1792 check_entry_exit_locking(tsd_tsdn(tsd));
1795 * If we need to handle reentrancy, we can do it out of a
1796 * known-initialized arena (i.e. arena 0).
1798 reentrancy_level = tsd_reentrancy_level_get(tsd);
1799 if (sopts->slow && unlikely(reentrancy_level > 0)) {
1801 * We should never specify particular arenas or tcaches from
1802 * within our internal allocations.
1804 assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC ||
1805 dopts->tcache_ind == TCACHE_IND_NONE);
1806 assert(dopts->arena_ind == ARENA_IND_AUTOMATIC);
1807 dopts->tcache_ind = TCACHE_IND_NONE;
1808 /* We know that arena 0 has already been initialized. */
1809 dopts->arena_ind = 0;
1812 /* If profiling is on, get our profiling context. */
1813 if (config_prof && opt_prof) {
1815 * Note that if we're going down this path, usize must have been
1816 * initialized in the previous if statement.
1818 prof_tctx_t *tctx = prof_alloc_prep(
1819 tsd, usize, prof_active_get_unlocked(), true);
1821 alloc_ctx_t alloc_ctx;
1822 if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
1823 alloc_ctx.slab = (usize <= SMALL_MAXCLASS);
1824 allocation = imalloc_no_sample(
1825 sopts, dopts, tsd, usize, usize, ind);
1826 } else if ((uintptr_t)tctx > (uintptr_t)1U) {
1828 * Note that ind might still be 0 here. This is fine;
1829 * imalloc_sample ignores ind if dopts->alignment > 0.
1831 allocation = imalloc_sample(
1832 sopts, dopts, tsd, usize, ind);
1833 alloc_ctx.slab = false;
1838 if (unlikely(allocation == NULL)) {
1839 prof_alloc_rollback(tsd, tctx, true);
1842 prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx);
1845 * If dopts->alignment > 0, then ind is still 0, but usize was
1846 * computed in the previous if statement. Down the positive
1847 * alignment path, imalloc_no_sample ignores ind and size
1848 * (relying only on usize).
1850 allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize,
1852 if (unlikely(allocation == NULL)) {
1858 * Allocation has been done at this point. We still have some
1859 * post-allocation work to do though.
1861 assert(dopts->alignment == 0
1862 || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0));
1865 assert(usize == isalloc(tsd_tsdn(tsd), allocation));
1866 *tsd_thread_allocatedp_get(tsd) += usize;
1870 UTRACE(0, size, allocation);
1874 check_entry_exit_locking(tsd_tsdn(tsd));
1875 *dopts->result = allocation;
1879 if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) {
1880 malloc_write(sopts->oom_string);
1885 UTRACE(NULL, size, NULL);
1888 check_entry_exit_locking(tsd_tsdn(tsd));
1890 if (sopts->set_errno_on_error) {
1894 if (sopts->null_out_result_on_error) {
1895 *dopts->result = NULL;
1901 * This label is only jumped to by one goto; we move it out of line
1902 * anyways to avoid obscuring the non-error paths, and for symmetry with
1905 label_invalid_alignment:
1906 if (config_xmalloc && unlikely(opt_xmalloc)) {
1907 malloc_write(sopts->invalid_alignment_string);
1911 if (sopts->set_errno_on_error) {
1916 UTRACE(NULL, size, NULL);
1919 check_entry_exit_locking(tsd_tsdn(tsd));
1921 if (sopts->null_out_result_on_error) {
1922 *dopts->result = NULL;
1928 /* Returns the errno-style error code of the allocation. */
1929 JEMALLOC_ALWAYS_INLINE int
1930 imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
1931 if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) {
1932 if (config_xmalloc && unlikely(opt_xmalloc)) {
1933 malloc_write(sopts->oom_string);
1936 UTRACE(NULL, dopts->num_items * dopts->item_size, NULL);
1938 *dopts->result = NULL;
1943 /* We always need the tsd. Let's grab it right away. */
1944 tsd_t *tsd = tsd_fetch();
1946 if (likely(tsd_fast(tsd))) {
1947 /* Fast and common path. */
1948 tsd_assert_fast(tsd);
1949 sopts->slow = false;
1950 return imalloc_body(sopts, dopts, tsd);
1953 return imalloc_body(sopts, dopts, tsd);
1956 /******************************************************************************/
1958 * Begin malloc(3)-compatible functions.
1961 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1962 void JEMALLOC_NOTHROW *
1963 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
1964 je_malloc(size_t size) {
1966 static_opts_t sopts;
1967 dynamic_opts_t dopts;
1969 static_opts_init(&sopts);
1970 dynamic_opts_init(&dopts);
1972 sopts.bump_empty_alloc = true;
1973 sopts.null_out_result_on_error = true;
1974 sopts.set_errno_on_error = true;
1975 sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n";
1977 dopts.result = &ret;
1978 dopts.num_items = 1;
1979 dopts.item_size = size;
1981 imalloc(&sopts, &dopts);
1986 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
1987 JEMALLOC_ATTR(nonnull(1))
1988 je_posix_memalign(void **memptr, size_t alignment, size_t size) {
1990 static_opts_t sopts;
1991 dynamic_opts_t dopts;
1993 static_opts_init(&sopts);
1994 dynamic_opts_init(&dopts);
1996 sopts.bump_empty_alloc = true;
1997 sopts.min_alignment = sizeof(void *);
1999 "<jemalloc>: Error allocating aligned memory: out of memory\n";
2000 sopts.invalid_alignment_string =
2001 "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2003 dopts.result = memptr;
2004 dopts.num_items = 1;
2005 dopts.item_size = size;
2006 dopts.alignment = alignment;
2008 ret = imalloc(&sopts, &dopts);
2012 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2013 void JEMALLOC_NOTHROW *
2014 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
2015 je_aligned_alloc(size_t alignment, size_t size) {
2018 static_opts_t sopts;
2019 dynamic_opts_t dopts;
2021 static_opts_init(&sopts);
2022 dynamic_opts_init(&dopts);
2024 sopts.bump_empty_alloc = true;
2025 sopts.null_out_result_on_error = true;
2026 sopts.set_errno_on_error = true;
2027 sopts.min_alignment = 1;
2029 "<jemalloc>: Error allocating aligned memory: out of memory\n";
2030 sopts.invalid_alignment_string =
2031 "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2033 dopts.result = &ret;
2034 dopts.num_items = 1;
2035 dopts.item_size = size;
2036 dopts.alignment = alignment;
2038 imalloc(&sopts, &dopts);
2042 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2043 void JEMALLOC_NOTHROW *
2044 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
2045 je_calloc(size_t num, size_t size) {
2047 static_opts_t sopts;
2048 dynamic_opts_t dopts;
2050 static_opts_init(&sopts);
2051 dynamic_opts_init(&dopts);
2053 sopts.may_overflow = true;
2054 sopts.bump_empty_alloc = true;
2055 sopts.null_out_result_on_error = true;
2056 sopts.set_errno_on_error = true;
2057 sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n";
2059 dopts.result = &ret;
2060 dopts.num_items = num;
2061 dopts.item_size = size;
2064 imalloc(&sopts, &dopts);
2070 irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
2071 prof_tctx_t *tctx) {
2077 if (usize <= SMALL_MAXCLASS) {
2078 p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
2082 arena_prof_promote(tsd_tsdn(tsd), p, usize);
2084 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
2090 JEMALLOC_ALWAYS_INLINE void *
2091 irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
2092 alloc_ctx_t *alloc_ctx) {
2095 prof_tctx_t *old_tctx, *tctx;
2097 prof_active = prof_active_get_unlocked();
2098 old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
2099 tctx = prof_alloc_prep(tsd, usize, prof_active, true);
2100 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2101 p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
2103 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
2105 if (unlikely(p == NULL)) {
2106 prof_alloc_rollback(tsd, tctx, true);
2109 prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
2115 JEMALLOC_ALWAYS_INLINE void
2116 ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
2118 tsd_assert_fast(tsd);
2120 check_entry_exit_locking(tsd_tsdn(tsd));
2121 if (tsd_reentrancy_level_get(tsd) != 0) {
2125 assert(ptr != NULL);
2126 assert(malloc_initialized() || IS_INITIALIZER);
2128 alloc_ctx_t alloc_ctx;
2129 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2130 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2131 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2132 assert(alloc_ctx.szind != NSIZES);
2135 if (config_prof && opt_prof) {
2136 usize = sz_index2size(alloc_ctx.szind);
2137 prof_free(tsd, ptr, usize, &alloc_ctx);
2138 } else if (config_stats) {
2139 usize = sz_index2size(alloc_ctx.szind);
2142 *tsd_thread_deallocatedp_get(tsd) += usize;
2145 if (likely(!slow_path)) {
2146 idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
2149 idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
2154 JEMALLOC_ALWAYS_INLINE void
2155 isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
2157 tsd_assert_fast(tsd);
2159 check_entry_exit_locking(tsd_tsdn(tsd));
2160 if (tsd_reentrancy_level_get(tsd) != 0) {
2164 assert(ptr != NULL);
2165 assert(malloc_initialized() || IS_INITIALIZER);
2167 alloc_ctx_t alloc_ctx, *ctx;
2168 if (config_prof && opt_prof) {
2169 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2170 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2171 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2172 assert(alloc_ctx.szind == sz_size2index(usize));
2174 prof_free(tsd, ptr, usize, ctx);
2180 *tsd_thread_deallocatedp_get(tsd) += usize;
2183 if (likely(!slow_path)) {
2184 isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false);
2186 isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true);
2190 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2191 void JEMALLOC_NOTHROW *
2192 JEMALLOC_ALLOC_SIZE(2)
2193 je_realloc(void *ptr, size_t size) {
2195 tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
2196 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
2197 size_t old_usize = 0;
2199 if (unlikely(size == 0)) {
2201 /* realloc(ptr, 0) is equivalent to free(ptr). */
2204 tsd_t *tsd = tsd_fetch();
2205 if (tsd_reentrancy_level_get(tsd) == 0) {
2206 tcache = tcache_get(tsd);
2210 ifree(tsd, ptr, tcache, true);
2216 if (likely(ptr != NULL)) {
2217 assert(malloc_initialized() || IS_INITIALIZER);
2218 tsd_t *tsd = tsd_fetch();
2220 check_entry_exit_locking(tsd_tsdn(tsd));
2222 alloc_ctx_t alloc_ctx;
2223 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2224 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2225 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2226 assert(alloc_ctx.szind != NSIZES);
2227 old_usize = sz_index2size(alloc_ctx.szind);
2228 assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2229 if (config_prof && opt_prof) {
2230 usize = sz_s2u(size);
2231 ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ?
2232 NULL : irealloc_prof(tsd, ptr, old_usize, usize,
2236 usize = sz_s2u(size);
2238 ret = iralloc(tsd, ptr, old_usize, size, 0, false);
2240 tsdn = tsd_tsdn(tsd);
2242 /* realloc(NULL, size) is equivalent to malloc(size). */
2243 return je_malloc(size);
2246 if (unlikely(ret == NULL)) {
2247 if (config_xmalloc && unlikely(opt_xmalloc)) {
2248 malloc_write("<jemalloc>: Error in realloc(): "
2254 if (config_stats && likely(ret != NULL)) {
2257 assert(usize == isalloc(tsdn, ret));
2258 tsd = tsdn_tsd(tsdn);
2259 *tsd_thread_allocatedp_get(tsd) += usize;
2260 *tsd_thread_deallocatedp_get(tsd) += old_usize;
2262 UTRACE(ptr, size, ret);
2263 check_entry_exit_locking(tsdn);
2267 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2268 je_free(void *ptr) {
2270 if (likely(ptr != NULL)) {
2272 * We avoid setting up tsd fully (e.g. tcache, arena binding)
2273 * based on only free() calls -- other activities trigger the
2274 * minimal to full transition. This is because free() may
2275 * happen during thread shutdown after tls deallocation: if a
2276 * thread never had any malloc activities until then, a
2277 * fully-setup tsd won't be destructed properly.
2279 tsd_t *tsd = tsd_fetch_min();
2280 check_entry_exit_locking(tsd_tsdn(tsd));
2283 if (likely(tsd_fast(tsd))) {
2284 tsd_assert_fast(tsd);
2285 /* Unconditionally get tcache ptr on fast path. */
2286 tcache = tsd_tcachep_get(tsd);
2287 ifree(tsd, ptr, tcache, false);
2289 if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
2290 tcache = tcache_get(tsd);
2294 ifree(tsd, ptr, tcache, true);
2296 check_entry_exit_locking(tsd_tsdn(tsd));
2301 * End malloc(3)-compatible functions.
2303 /******************************************************************************/
2305 * Begin non-standard override functions.
2308 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
2309 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2310 void JEMALLOC_NOTHROW *
2311 JEMALLOC_ATTR(malloc)
2312 je_memalign(size_t alignment, size_t size) {
2314 static_opts_t sopts;
2315 dynamic_opts_t dopts;
2317 static_opts_init(&sopts);
2318 dynamic_opts_init(&dopts);
2320 sopts.bump_empty_alloc = true;
2321 sopts.min_alignment = 1;
2323 "<jemalloc>: Error allocating aligned memory: out of memory\n";
2324 sopts.invalid_alignment_string =
2325 "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2326 sopts.null_out_result_on_error = true;
2328 dopts.result = &ret;
2329 dopts.num_items = 1;
2330 dopts.item_size = size;
2331 dopts.alignment = alignment;
2333 imalloc(&sopts, &dopts);
2338 #ifdef JEMALLOC_OVERRIDE_VALLOC
2339 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2340 void JEMALLOC_NOTHROW *
2341 JEMALLOC_ATTR(malloc)
2342 je_valloc(size_t size) {
2345 static_opts_t sopts;
2346 dynamic_opts_t dopts;
2348 static_opts_init(&sopts);
2349 dynamic_opts_init(&dopts);
2351 sopts.bump_empty_alloc = true;
2352 sopts.null_out_result_on_error = true;
2353 sopts.min_alignment = PAGE;
2355 "<jemalloc>: Error allocating aligned memory: out of memory\n";
2356 sopts.invalid_alignment_string =
2357 "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2359 dopts.result = &ret;
2360 dopts.num_items = 1;
2361 dopts.item_size = size;
2362 dopts.alignment = PAGE;
2364 imalloc(&sopts, &dopts);
2370 #if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)
2372 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
2373 * to inconsistently reference libc's malloc(3)-compatible functions
2374 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
2376 * These definitions interpose hooks in glibc. The functions are actually
2377 * passed an extra argument for the caller return address, which will be
2380 JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
2381 JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
2382 JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
2383 # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
2384 JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
2390 * To enable static linking with glibc, the libc specific malloc interface must
2391 * be implemented also, so none of glibc's malloc.o functions are added to the
2394 # define ALIAS(je_fn) __attribute__((alias (#je_fn), used))
2395 /* To force macro expansion of je_ prefix before stringification. */
2396 # define PREALIAS(je_fn) ALIAS(je_fn)
2397 # ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC
2398 void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
2400 # ifdef JEMALLOC_OVERRIDE___LIBC_FREE
2401 void __libc_free(void* ptr) PREALIAS(je_free);
2403 # ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC
2404 void *__libc_malloc(size_t size) PREALIAS(je_malloc);
2406 # ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
2407 void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
2409 # ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC
2410 void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
2412 # ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC
2413 void *__libc_valloc(size_t size) PREALIAS(je_valloc);
2415 # ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
2416 int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign);
2424 * End non-standard override functions.
2426 /******************************************************************************/
2428 * Begin non-standard functions.
2431 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2432 void JEMALLOC_NOTHROW *
2433 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2434 je_mallocx(size_t size, int flags) {
2436 static_opts_t sopts;
2437 dynamic_opts_t dopts;
2439 static_opts_init(&sopts);
2440 dynamic_opts_init(&dopts);
2442 sopts.assert_nonempty_alloc = true;
2443 sopts.null_out_result_on_error = true;
2444 sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n";
2446 dopts.result = &ret;
2447 dopts.num_items = 1;
2448 dopts.item_size = size;
2449 if (unlikely(flags != 0)) {
2450 if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) {
2451 dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
2454 dopts.zero = MALLOCX_ZERO_GET(flags);
2456 if ((flags & MALLOCX_TCACHE_MASK) != 0) {
2457 if ((flags & MALLOCX_TCACHE_MASK)
2458 == MALLOCX_TCACHE_NONE) {
2459 dopts.tcache_ind = TCACHE_IND_NONE;
2461 dopts.tcache_ind = MALLOCX_TCACHE_GET(flags);
2464 dopts.tcache_ind = TCACHE_IND_AUTOMATIC;
2467 if ((flags & MALLOCX_ARENA_MASK) != 0)
2468 dopts.arena_ind = MALLOCX_ARENA_GET(flags);
2471 imalloc(&sopts, &dopts);
2476 irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
2477 size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
2478 prof_tctx_t *tctx) {
2484 if (usize <= SMALL_MAXCLASS) {
2485 p = iralloct(tsdn, old_ptr, old_usize, LARGE_MINCLASS,
2486 alignment, zero, tcache, arena);
2490 arena_prof_promote(tsdn, p, usize);
2492 p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero,
2499 JEMALLOC_ALWAYS_INLINE void *
2500 irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
2501 size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
2502 arena_t *arena, alloc_ctx_t *alloc_ctx) {
2505 prof_tctx_t *old_tctx, *tctx;
2507 prof_active = prof_active_get_unlocked();
2508 old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
2509 tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
2510 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2511 p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize,
2512 *usize, alignment, zero, tcache, arena, tctx);
2514 p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment,
2515 zero, tcache, arena);
2517 if (unlikely(p == NULL)) {
2518 prof_alloc_rollback(tsd, tctx, false);
2522 if (p == old_ptr && alignment != 0) {
2524 * The allocation did not move, so it is possible that the size
2525 * class is smaller than would guarantee the requested
2526 * alignment, and that the alignment constraint was
2527 * serendipitously satisfied. Additionally, old_usize may not
2528 * be the same as the current usize because of in-place large
2529 * reallocation. Therefore, query the actual value of usize.
2531 *usize = isalloc(tsd_tsdn(tsd), p);
2533 prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
2534 old_usize, old_tctx);
2539 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2540 void JEMALLOC_NOTHROW *
2541 JEMALLOC_ALLOC_SIZE(2)
2542 je_rallocx(void *ptr, size_t size, int flags) {
2547 size_t alignment = MALLOCX_ALIGN_GET(flags);
2548 bool zero = flags & MALLOCX_ZERO;
2552 assert(ptr != NULL);
2554 assert(malloc_initialized() || IS_INITIALIZER);
2556 check_entry_exit_locking(tsd_tsdn(tsd));
2558 if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
2559 unsigned arena_ind = MALLOCX_ARENA_GET(flags);
2560 arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
2561 if (unlikely(arena == NULL)) {
2568 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2569 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
2572 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2575 tcache = tcache_get(tsd);
2578 alloc_ctx_t alloc_ctx;
2579 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2580 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2581 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2582 assert(alloc_ctx.szind != NSIZES);
2583 old_usize = sz_index2size(alloc_ctx.szind);
2584 assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2585 if (config_prof && opt_prof) {
2586 usize = (alignment == 0) ?
2587 sz_s2u(size) : sz_sa2u(size, alignment);
2588 if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
2591 p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
2592 zero, tcache, arena, &alloc_ctx);
2593 if (unlikely(p == NULL)) {
2597 p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment,
2598 zero, tcache, arena);
2599 if (unlikely(p == NULL)) {
2603 usize = isalloc(tsd_tsdn(tsd), p);
2606 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2609 *tsd_thread_allocatedp_get(tsd) += usize;
2610 *tsd_thread_deallocatedp_get(tsd) += old_usize;
2612 UTRACE(ptr, size, p);
2613 check_entry_exit_locking(tsd_tsdn(tsd));
2616 if (config_xmalloc && unlikely(opt_xmalloc)) {
2617 malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
2620 UTRACE(ptr, size, 0);
2621 check_entry_exit_locking(tsd_tsdn(tsd));
2625 JEMALLOC_ALWAYS_INLINE size_t
2626 ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
2627 size_t extra, size_t alignment, bool zero) {
2630 if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) {
2633 usize = isalloc(tsdn, ptr);
2639 ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
2640 size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) {
2646 usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
2652 JEMALLOC_ALWAYS_INLINE size_t
2653 ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2654 size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) {
2655 size_t usize_max, usize;
2657 prof_tctx_t *old_tctx, *tctx;
2659 prof_active = prof_active_get_unlocked();
2660 old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
2662 * usize isn't knowable before ixalloc() returns when extra is non-zero.
2663 * Therefore, compute its maximum possible value and use that in
2664 * prof_alloc_prep() to decide whether to capture a backtrace.
2665 * prof_realloc() will use the actual usize to decide whether to sample.
2667 if (alignment == 0) {
2668 usize_max = sz_s2u(size+extra);
2669 assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS);
2671 usize_max = sz_sa2u(size+extra, alignment);
2672 if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) {
2674 * usize_max is out of range, and chances are that
2675 * allocation will fail, but use the maximum possible
2676 * value and carry on with prof_alloc_prep(), just in
2677 * case allocation succeeds.
2679 usize_max = LARGE_MAXCLASS;
2682 tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
2684 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2685 usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
2686 size, extra, alignment, zero, tctx);
2688 usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2689 extra, alignment, zero);
2691 if (usize == old_usize) {
2692 prof_alloc_rollback(tsd, tctx, false);
2695 prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
2701 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2702 je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
2704 size_t usize, old_usize;
2705 size_t alignment = MALLOCX_ALIGN_GET(flags);
2706 bool zero = flags & MALLOCX_ZERO;
2708 assert(ptr != NULL);
2710 assert(SIZE_T_MAX - size >= extra);
2711 assert(malloc_initialized() || IS_INITIALIZER);
2713 check_entry_exit_locking(tsd_tsdn(tsd));
2715 alloc_ctx_t alloc_ctx;
2716 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2717 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2718 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2719 assert(alloc_ctx.szind != NSIZES);
2720 old_usize = sz_index2size(alloc_ctx.szind);
2721 assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2723 * The API explicitly absolves itself of protecting against (size +
2724 * extra) numerical overflow, but we may need to clamp extra to avoid
2725 * exceeding LARGE_MAXCLASS.
2727 * Ordinarily, size limit checking is handled deeper down, but here we
2728 * have to check as part of (size + extra) clamping, since we need the
2729 * clamped value in the above helper functions.
2731 if (unlikely(size > LARGE_MAXCLASS)) {
2733 goto label_not_resized;
2735 if (unlikely(LARGE_MAXCLASS - size < extra)) {
2736 extra = LARGE_MAXCLASS - size;
2739 if (config_prof && opt_prof) {
2740 usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
2741 alignment, zero, &alloc_ctx);
2743 usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2744 extra, alignment, zero);
2746 if (unlikely(usize == old_usize)) {
2747 goto label_not_resized;
2751 *tsd_thread_allocatedp_get(tsd) += usize;
2752 *tsd_thread_deallocatedp_get(tsd) += old_usize;
2755 UTRACE(ptr, size, ptr);
2756 check_entry_exit_locking(tsd_tsdn(tsd));
2760 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2762 je_sallocx(const void *ptr, int flags) {
2766 assert(malloc_initialized() || IS_INITIALIZER);
2767 assert(ptr != NULL);
2769 tsdn = tsdn_fetch();
2770 check_entry_exit_locking(tsdn);
2772 if (config_debug || force_ivsalloc) {
2773 usize = ivsalloc(tsdn, ptr);
2774 assert(force_ivsalloc || usize != 0);
2776 usize = isalloc(tsdn, ptr);
2779 check_entry_exit_locking(tsdn);
2783 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2784 je_dallocx(void *ptr, int flags) {
2785 assert(ptr != NULL);
2786 assert(malloc_initialized() || IS_INITIALIZER);
2788 tsd_t *tsd = tsd_fetch();
2789 bool fast = tsd_fast(tsd);
2790 check_entry_exit_locking(tsd_tsdn(tsd));
2793 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2794 /* Not allowed to be reentrant and specify a custom tcache. */
2795 assert(tsd_reentrancy_level_get(tsd) == 0);
2796 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
2799 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2803 tcache = tsd_tcachep_get(tsd);
2804 assert(tcache == tcache_get(tsd));
2806 if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
2807 tcache = tcache_get(tsd);
2816 tsd_assert_fast(tsd);
2817 ifree(tsd, ptr, tcache, false);
2819 ifree(tsd, ptr, tcache, true);
2821 check_entry_exit_locking(tsd_tsdn(tsd));
2824 JEMALLOC_ALWAYS_INLINE size_t
2825 inallocx(tsdn_t *tsdn, size_t size, int flags) {
2826 check_entry_exit_locking(tsdn);
2829 if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) {
2830 usize = sz_s2u(size);
2832 usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
2834 check_entry_exit_locking(tsdn);
2838 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2839 je_sdallocx(void *ptr, size_t size, int flags) {
2840 assert(ptr != NULL);
2841 assert(malloc_initialized() || IS_INITIALIZER);
2843 tsd_t *tsd = tsd_fetch();
2844 bool fast = tsd_fast(tsd);
2845 size_t usize = inallocx(tsd_tsdn(tsd), size, flags);
2846 assert(usize == isalloc(tsd_tsdn(tsd), ptr));
2847 check_entry_exit_locking(tsd_tsdn(tsd));
2850 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2851 /* Not allowed to be reentrant and specify a custom tcache. */
2852 assert(tsd_reentrancy_level_get(tsd) == 0);
2853 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
2856 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2860 tcache = tsd_tcachep_get(tsd);
2861 assert(tcache == tcache_get(tsd));
2863 if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
2864 tcache = tcache_get(tsd);
2873 tsd_assert_fast(tsd);
2874 isfree(tsd, ptr, usize, tcache, false);
2876 isfree(tsd, ptr, usize, tcache, true);
2878 check_entry_exit_locking(tsd_tsdn(tsd));
2881 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2883 je_nallocx(size_t size, int flags) {
2889 if (unlikely(malloc_init())) {
2893 tsdn = tsdn_fetch();
2894 check_entry_exit_locking(tsdn);
2896 usize = inallocx(tsdn, size, flags);
2897 if (unlikely(usize > LARGE_MAXCLASS)) {
2901 check_entry_exit_locking(tsdn);
2905 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2906 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
2911 if (unlikely(malloc_init())) {
2916 check_entry_exit_locking(tsd_tsdn(tsd));
2917 ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
2918 check_entry_exit_locking(tsd_tsdn(tsd));
2922 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2923 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) {
2926 if (unlikely(malloc_init())) {
2930 tsd_t *tsd = tsd_fetch();
2931 check_entry_exit_locking(tsd_tsdn(tsd));
2932 ret = ctl_nametomib(tsd, name, mibp, miblenp);
2933 check_entry_exit_locking(tsd_tsdn(tsd));
2937 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2938 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
2939 void *newp, size_t newlen) {
2943 if (unlikely(malloc_init())) {
2948 check_entry_exit_locking(tsd_tsdn(tsd));
2949 ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
2950 check_entry_exit_locking(tsd_tsdn(tsd));
2954 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2955 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
2959 tsdn = tsdn_fetch();
2960 check_entry_exit_locking(tsdn);
2961 stats_print(write_cb, cbopaque, opts);
2962 check_entry_exit_locking(tsdn);
2965 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2966 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
2970 assert(malloc_initialized() || IS_INITIALIZER);
2972 tsdn = tsdn_fetch();
2973 check_entry_exit_locking(tsdn);
2975 if (unlikely(ptr == NULL)) {
2978 if (config_debug || force_ivsalloc) {
2979 ret = ivsalloc(tsdn, ptr);
2980 assert(force_ivsalloc || ret != 0);
2982 ret = isalloc(tsdn, ptr);
2986 check_entry_exit_locking(tsdn);
2991 * End non-standard functions.
2993 /******************************************************************************/
2995 * Begin compatibility functions.
2998 #define ALLOCM_LG_ALIGN(la) (la)
2999 #define ALLOCM_ALIGN(a) (ffsl(a)-1)
3000 #define ALLOCM_ZERO ((int)0x40)
3001 #define ALLOCM_NO_MOVE ((int)0x80)
3003 #define ALLOCM_SUCCESS 0
3004 #define ALLOCM_ERR_OOM 1
3005 #define ALLOCM_ERR_NOT_MOVED 2
3008 je_allocm(void **ptr, size_t *rsize, size_t size, int flags) {
3009 assert(ptr != NULL);
3011 void *p = je_mallocx(size, flags);
3013 return (ALLOCM_ERR_OOM);
3015 if (rsize != NULL) {
3016 *rsize = isalloc(tsdn_fetch(), p);
3019 return ALLOCM_SUCCESS;
3023 je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) {
3024 assert(ptr != NULL);
3025 assert(*ptr != NULL);
3027 assert(SIZE_T_MAX - size >= extra);
3030 bool no_move = flags & ALLOCM_NO_MOVE;
3033 size_t usize = je_xallocx(*ptr, size, extra, flags);
3034 ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED;
3035 if (rsize != NULL) {
3039 void *p = je_rallocx(*ptr, size+extra, flags);
3042 ret = ALLOCM_SUCCESS;
3044 ret = ALLOCM_ERR_OOM;
3046 if (rsize != NULL) {
3047 *rsize = isalloc(tsdn_fetch(), *ptr);
3054 je_sallocm(const void *ptr, size_t *rsize, int flags) {
3055 assert(rsize != NULL);
3056 *rsize = je_sallocx(ptr, flags);
3057 return ALLOCM_SUCCESS;
3061 je_dallocm(void *ptr, int flags) {
3062 je_dallocx(ptr, flags);
3063 return ALLOCM_SUCCESS;
3067 je_nallocm(size_t *rsize, size_t size, int flags) {
3068 size_t usize = je_nallocx(size, flags);
3070 return ALLOCM_ERR_OOM;
3072 if (rsize != NULL) {
3075 return ALLOCM_SUCCESS;
3078 #undef ALLOCM_LG_ALIGN
3081 #undef ALLOCM_NO_MOVE
3083 #undef ALLOCM_SUCCESS
3084 #undef ALLOCM_ERR_OOM
3085 #undef ALLOCM_ERR_NOT_MOVED
3088 * End compatibility functions.
3090 /******************************************************************************/
3092 * The following functions are used by threading libraries for protection of
3093 * malloc during fork().
3097 * If an application creates a thread before doing any allocation in the main
3098 * thread, then calls fork(2) in the main thread followed by memory allocation
3099 * in the child process, a race can occur that results in deadlock within the
3100 * child: the main thread may have forked while the created thread had
3101 * partially initialized the allocator. Ordinarily jemalloc prevents
3102 * fork/malloc races via the following functions it registers during
3103 * initialization using pthread_atfork(), but of course that does no good if
3104 * the allocator isn't fully initialized at fork time. The following library
3105 * constructor is a partial solution to this problem. It may still be possible
3106 * to trigger the deadlock described above, but doing so would involve forking
3107 * via a library constructor that runs before jemalloc's runs.
3109 #ifndef JEMALLOC_JET
3110 JEMALLOC_ATTR(constructor)
3112 jemalloc_constructor(void) {
3117 #ifndef JEMALLOC_MUTEX_INIT_CB
3119 jemalloc_prefork(void)
3121 JEMALLOC_EXPORT void
3122 _malloc_prefork(void)
3126 unsigned i, j, narenas;
3129 #ifdef JEMALLOC_MUTEX_INIT_CB
3130 if (!malloc_initialized()) {
3134 assert(malloc_initialized());
3138 narenas = narenas_total_get();
3140 witness_prefork(tsd_witness_tsdp_get(tsd));
3141 /* Acquire all mutexes in a safe order. */
3142 ctl_prefork(tsd_tsdn(tsd));
3143 tcache_prefork(tsd_tsdn(tsd));
3144 malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
3145 if (have_background_thread) {
3146 background_thread_prefork0(tsd_tsdn(tsd));
3148 prof_prefork0(tsd_tsdn(tsd));
3149 if (have_background_thread) {
3150 background_thread_prefork1(tsd_tsdn(tsd));
3152 /* Break arena prefork into stages to preserve lock order. */
3153 for (i = 0; i < 8; i++) {
3154 for (j = 0; j < narenas; j++) {
3155 if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
3159 arena_prefork0(tsd_tsdn(tsd), arena);
3162 arena_prefork1(tsd_tsdn(tsd), arena);
3165 arena_prefork2(tsd_tsdn(tsd), arena);
3168 arena_prefork3(tsd_tsdn(tsd), arena);
3171 arena_prefork4(tsd_tsdn(tsd), arena);
3174 arena_prefork5(tsd_tsdn(tsd), arena);
3177 arena_prefork6(tsd_tsdn(tsd), arena);
3180 arena_prefork7(tsd_tsdn(tsd), arena);
3182 default: not_reached();
3187 prof_prefork1(tsd_tsdn(tsd));
3190 #ifndef JEMALLOC_MUTEX_INIT_CB
3192 jemalloc_postfork_parent(void)
3194 JEMALLOC_EXPORT void
3195 _malloc_postfork(void)
3199 unsigned i, narenas;
3201 #ifdef JEMALLOC_MUTEX_INIT_CB
3202 if (!malloc_initialized()) {
3206 assert(malloc_initialized());
3210 witness_postfork_parent(tsd_witness_tsdp_get(tsd));
3211 /* Release all mutexes, now that fork() has completed. */
3212 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
3215 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
3216 arena_postfork_parent(tsd_tsdn(tsd), arena);
3219 prof_postfork_parent(tsd_tsdn(tsd));
3220 if (have_background_thread) {
3221 background_thread_postfork_parent(tsd_tsdn(tsd));
3223 malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
3224 tcache_postfork_parent(tsd_tsdn(tsd));
3225 ctl_postfork_parent(tsd_tsdn(tsd));
3229 jemalloc_postfork_child(void) {
3231 unsigned i, narenas;
3233 assert(malloc_initialized());
3237 witness_postfork_child(tsd_witness_tsdp_get(tsd));
3238 /* Release all mutexes, now that fork() has completed. */
3239 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
3242 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
3243 arena_postfork_child(tsd_tsdn(tsd), arena);
3246 prof_postfork_child(tsd_tsdn(tsd));
3247 if (have_background_thread) {
3248 background_thread_postfork_child(tsd_tsdn(tsd));
3250 malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
3251 tcache_postfork_child(tsd_tsdn(tsd));
3252 ctl_postfork_child(tsd_tsdn(tsd));
3256 _malloc_first_thread(void)
3259 (void)malloc_mutex_first_thread();
3262 /******************************************************************************/