2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/atomic.h"
7 #include "jemalloc/internal/ctl.h"
8 #include "jemalloc/internal/extent_dss.h"
9 #include "jemalloc/internal/extent_mmap.h"
10 #include "jemalloc/internal/jemalloc_internal_types.h"
11 #include "jemalloc/internal/log.h"
12 #include "jemalloc/internal/malloc_io.h"
13 #include "jemalloc/internal/mutex.h"
14 #include "jemalloc/internal/rtree.h"
15 #include "jemalloc/internal/size_classes.h"
16 #include "jemalloc/internal/spin.h"
17 #include "jemalloc/internal/sz.h"
18 #include "jemalloc/internal/ticker.h"
19 #include "jemalloc/internal/util.h"
21 /******************************************************************************/
24 /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */
25 const char *__malloc_options_1_0 = NULL;
26 __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
28 /* Runtime configuration options. */
29 const char *je_malloc_conf
48 const char *opt_junk =
49 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
56 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
63 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
70 bool opt_utrace = false;
71 bool opt_xmalloc = false;
72 bool opt_zero = false;
73 unsigned opt_narenas = 0;
77 /* Protects arenas initialization. */
78 malloc_mutex_t arenas_lock;
80 * Arenas that are used to service external requests. Not all elements of the
81 * arenas array are necessarily used; arenas are created lazily as needed.
83 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
84 * arenas. arenas[narenas_auto..narenas_total) are only used if the application
85 * takes some action to create them and allocate from them.
87 * Points to an arena_t.
89 JEMALLOC_ALIGNED(CACHELINE)
90 atomic_p_t arenas[MALLOCX_ARENA_LIMIT];
91 static atomic_u_t narenas_total; /* Use narenas_total_*(). */
92 static arena_t *a0; /* arenas[0]; read-only after initialization. */
93 unsigned narenas_auto; /* Read-only after initialization. */
96 malloc_init_uninitialized = 3,
97 malloc_init_a0_initialized = 2,
98 malloc_init_recursible = 1,
99 malloc_init_initialized = 0 /* Common case --> jnz. */
101 static malloc_init_t malloc_init_state = malloc_init_uninitialized;
103 /* False should be the common case. Set to true to trigger initialization. */
104 bool malloc_slow = true;
106 /* When malloc_slow is true, set the corresponding bits for sanity check. */
108 flag_opt_junk_alloc = (1U),
109 flag_opt_junk_free = (1U << 1),
110 flag_opt_zero = (1U << 2),
111 flag_opt_utrace = (1U << 3),
112 flag_opt_xmalloc = (1U << 4)
114 static uint8_t malloc_slow_flags;
116 #ifdef JEMALLOC_THREADED_INIT
117 /* Used to let the initializing thread recursively allocate. */
118 # define NO_INITIALIZER ((unsigned long)0)
119 # define INITIALIZER pthread_self()
120 # define IS_INITIALIZER (malloc_initializer == pthread_self())
121 static pthread_t malloc_initializer = NO_INITIALIZER;
123 # define NO_INITIALIZER false
124 # define INITIALIZER true
125 # define IS_INITIALIZER malloc_initializer
126 static bool malloc_initializer = NO_INITIALIZER;
129 /* Used to avoid initialization races. */
131 #if _WIN32_WINNT >= 0x0600
132 static malloc_mutex_t init_lock = SRWLOCK_INIT;
134 static malloc_mutex_t init_lock;
135 static bool init_lock_initialized = false;
137 JEMALLOC_ATTR(constructor)
139 _init_init_lock(void) {
141 * If another constructor in the same binary is using mallctl to e.g.
142 * set up extent hooks, it may end up running before this one, and
143 * malloc_init_hard will crash trying to lock the uninitialized lock. So
144 * we force an initialization of the lock in malloc_init_hard as well.
145 * We don't try to care about atomicity of the accessed to the
146 * init_lock_initialized boolean, since it really only matters early in
147 * the process creation, before any separate thread normally starts
150 if (!init_lock_initialized) {
151 malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT,
152 malloc_mutex_rank_exclusive);
154 init_lock_initialized = true;
158 # pragma section(".CRT$XCU", read)
159 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
160 static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
164 static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
168 void *p; /* Input pointer (as in realloc(p, s)). */
169 size_t s; /* Request size. */
170 void *r; /* Result pointer. */
173 #ifdef JEMALLOC_UTRACE
174 # define UTRACE(a, b, c) do { \
175 if (unlikely(opt_utrace)) { \
176 int utrace_serrno = errno; \
177 malloc_utrace_t ut; \
181 utrace(&ut, sizeof(ut)); \
182 errno = utrace_serrno; \
186 # define UTRACE(a, b, c)
189 /* Whether encountered any invalid config options. */
190 static bool had_conf_error = false;
192 /******************************************************************************/
194 * Function prototypes for static functions that are referenced prior to
198 static bool malloc_init_hard_a0(void);
199 static bool malloc_init_hard(void);
201 /******************************************************************************/
203 * Begin miscellaneous support functions.
207 malloc_initialized(void) {
208 return (malloc_init_state == malloc_init_initialized);
211 JEMALLOC_ALWAYS_INLINE bool
212 malloc_init_a0(void) {
213 if (unlikely(malloc_init_state == malloc_init_uninitialized)) {
214 return malloc_init_hard_a0();
219 JEMALLOC_ALWAYS_INLINE bool
221 if (unlikely(!malloc_initialized()) && malloc_init_hard()) {
228 * The a0*() functions are used instead of i{d,}alloc() in situations that
229 * cannot tolerate TLS variable access.
233 a0ialloc(size_t size, bool zero, bool is_internal) {
234 if (unlikely(malloc_init_a0())) {
238 return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL,
239 is_internal, arena_get(TSDN_NULL, 0, true), true);
243 a0idalloc(void *ptr, bool is_internal) {
244 idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true);
248 a0malloc(size_t size) {
249 return a0ialloc(size, false, true);
253 a0dalloc(void *ptr) {
254 a0idalloc(ptr, true);
258 * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
259 * situations that cannot tolerate TLS variable access (TLS allocation and very
260 * early internal data structure initialization).
264 bootstrap_malloc(size_t size) {
265 if (unlikely(size == 0)) {
269 return a0ialloc(size, false, false);
273 bootstrap_calloc(size_t num, size_t size) {
276 num_size = num * size;
277 if (unlikely(num_size == 0)) {
278 assert(num == 0 || size == 0);
282 return a0ialloc(num_size, true, false);
286 bootstrap_free(void *ptr) {
287 if (unlikely(ptr == NULL)) {
291 a0idalloc(ptr, false);
295 arena_set(unsigned ind, arena_t *arena) {
296 atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE);
300 narenas_total_set(unsigned narenas) {
301 atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE);
305 narenas_total_inc(void) {
306 atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE);
310 narenas_total_get(void) {
311 return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE);
314 /* Create a new arena and insert it into the arenas array at index ind. */
316 arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
319 assert(ind <= narenas_total_get());
320 if (ind >= MALLOCX_ARENA_LIMIT) {
323 if (ind == narenas_total_get()) {
328 * Another thread may have already initialized arenas[ind] if it's an
331 arena = arena_get(tsdn, ind, false);
333 assert(ind < narenas_auto);
337 /* Actually initialize the arena. */
338 arena = arena_new(tsdn, ind, extent_hooks);
344 arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) {
348 if (have_background_thread) {
350 malloc_mutex_lock(tsdn, &background_thread_lock);
351 err = background_thread_create(tsdn_tsd(tsdn), ind);
352 malloc_mutex_unlock(tsdn, &background_thread_lock);
354 malloc_printf("<jemalloc>: error in background thread "
355 "creation for arena %u. Abort.\n", ind);
362 arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
365 malloc_mutex_lock(tsdn, &arenas_lock);
366 arena = arena_init_locked(tsdn, ind, extent_hooks);
367 malloc_mutex_unlock(tsdn, &arenas_lock);
369 arena_new_create_background_thread(tsdn, ind);
375 arena_bind(tsd_t *tsd, unsigned ind, bool internal) {
376 arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false);
377 arena_nthreads_inc(arena, internal);
380 tsd_iarena_set(tsd, arena);
382 tsd_arena_set(tsd, arena);
387 arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) {
388 arena_t *oldarena, *newarena;
390 oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
391 newarena = arena_get(tsd_tsdn(tsd), newind, false);
392 arena_nthreads_dec(oldarena, false);
393 arena_nthreads_inc(newarena, false);
394 tsd_arena_set(tsd, newarena);
398 arena_unbind(tsd_t *tsd, unsigned ind, bool internal) {
401 arena = arena_get(tsd_tsdn(tsd), ind, false);
402 arena_nthreads_dec(arena, internal);
405 tsd_iarena_set(tsd, NULL);
407 tsd_arena_set(tsd, NULL);
412 arena_tdata_get_hard(tsd_t *tsd, unsigned ind) {
413 arena_tdata_t *tdata, *arenas_tdata_old;
414 arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
415 unsigned narenas_tdata_old, i;
416 unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
417 unsigned narenas_actual = narenas_total_get();
420 * Dissociate old tdata array (and set up for deallocation upon return)
423 if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
424 arenas_tdata_old = arenas_tdata;
425 narenas_tdata_old = narenas_tdata;
428 tsd_arenas_tdata_set(tsd, arenas_tdata);
429 tsd_narenas_tdata_set(tsd, narenas_tdata);
431 arenas_tdata_old = NULL;
432 narenas_tdata_old = 0;
435 /* Allocate tdata array if it's missing. */
436 if (arenas_tdata == NULL) {
437 bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
438 narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
440 if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
441 *arenas_tdata_bypassp = true;
442 arenas_tdata = (arena_tdata_t *)a0malloc(
443 sizeof(arena_tdata_t) * narenas_tdata);
444 *arenas_tdata_bypassp = false;
446 if (arenas_tdata == NULL) {
450 assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
451 tsd_arenas_tdata_set(tsd, arenas_tdata);
452 tsd_narenas_tdata_set(tsd, narenas_tdata);
456 * Copy to tdata array. It's possible that the actual number of arenas
457 * has increased since narenas_total_get() was called above, but that
458 * causes no correctness issues unless two threads concurrently execute
459 * the arenas.create mallctl, which we trust mallctl synchronization to
463 /* Copy/initialize tickers. */
464 for (i = 0; i < narenas_actual; i++) {
465 if (i < narenas_tdata_old) {
466 ticker_copy(&arenas_tdata[i].decay_ticker,
467 &arenas_tdata_old[i].decay_ticker);
469 ticker_init(&arenas_tdata[i].decay_ticker,
470 DECAY_NTICKS_PER_UPDATE);
473 if (narenas_tdata > narenas_actual) {
474 memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
475 * (narenas_tdata - narenas_actual));
478 /* Read the refreshed tdata array. */
479 tdata = &arenas_tdata[ind];
481 if (arenas_tdata_old != NULL) {
482 a0dalloc(arenas_tdata_old);
487 /* Slow path, called only by arena_choose(). */
489 arena_choose_hard(tsd_t *tsd, bool internal) {
490 arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
492 if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
493 unsigned choose = percpu_arena_choose();
494 ret = arena_get(tsd_tsdn(tsd), choose, true);
496 arena_bind(tsd, arena_ind_get(ret), false);
497 arena_bind(tsd, arena_ind_get(ret), true);
502 if (narenas_auto > 1) {
503 unsigned i, j, choose[2], first_null;
504 bool is_new_arena[2];
507 * Determine binding for both non-internal and internal
510 * choose[0]: For application allocation.
511 * choose[1]: For internal metadata allocation.
514 for (j = 0; j < 2; j++) {
516 is_new_arena[j] = false;
519 first_null = narenas_auto;
520 malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
521 assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
522 for (i = 1; i < narenas_auto; i++) {
523 if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
525 * Choose the first arena that has the lowest
526 * number of threads assigned to it.
528 for (j = 0; j < 2; j++) {
529 if (arena_nthreads_get(arena_get(
530 tsd_tsdn(tsd), i, false), !!j) <
531 arena_nthreads_get(arena_get(
532 tsd_tsdn(tsd), choose[j], false),
537 } else if (first_null == narenas_auto) {
539 * Record the index of the first uninitialized
540 * arena, in case all extant arenas are in use.
542 * NB: It is possible for there to be
543 * discontinuities in terms of initialized
544 * versus uninitialized arenas, due to the
545 * "thread.arena" mallctl.
551 for (j = 0; j < 2; j++) {
552 if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
553 choose[j], false), !!j) == 0 || first_null ==
556 * Use an unloaded arena, or the least loaded
557 * arena if all arenas are already initialized.
559 if (!!j == internal) {
560 ret = arena_get(tsd_tsdn(tsd),
566 /* Initialize a new arena. */
567 choose[j] = first_null;
568 arena = arena_init_locked(tsd_tsdn(tsd),
570 (extent_hooks_t *)&extent_hooks_default);
572 malloc_mutex_unlock(tsd_tsdn(tsd),
576 is_new_arena[j] = true;
577 if (!!j == internal) {
581 arena_bind(tsd, choose[j], !!j);
583 malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
585 for (j = 0; j < 2; j++) {
586 if (is_new_arena[j]) {
587 assert(choose[j] > 0);
588 arena_new_create_background_thread(
589 tsd_tsdn(tsd), choose[j]);
594 ret = arena_get(tsd_tsdn(tsd), 0, false);
595 arena_bind(tsd, 0, false);
596 arena_bind(tsd, 0, true);
603 iarena_cleanup(tsd_t *tsd) {
606 iarena = tsd_iarena_get(tsd);
607 if (iarena != NULL) {
608 arena_unbind(tsd, arena_ind_get(iarena), true);
613 arena_cleanup(tsd_t *tsd) {
616 arena = tsd_arena_get(tsd);
618 arena_unbind(tsd, arena_ind_get(arena), false);
623 arenas_tdata_cleanup(tsd_t *tsd) {
624 arena_tdata_t *arenas_tdata;
626 /* Prevent tsd->arenas_tdata from being (re)created. */
627 *tsd_arenas_tdata_bypassp_get(tsd) = true;
629 arenas_tdata = tsd_arenas_tdata_get(tsd);
630 if (arenas_tdata != NULL) {
631 tsd_arenas_tdata_set(tsd, NULL);
632 a0dalloc(arenas_tdata);
637 stats_print_atexit(void) {
645 * Merge stats from extant threads. This is racy, since
646 * individual threads do not lock when recording tcache stats
647 * events. As a consequence, the final stats may be slightly
648 * out of date by the time they are reported, if other threads
649 * continue to allocate.
651 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
652 arena_t *arena = arena_get(tsdn, i, false);
656 malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
657 ql_foreach(tcache, &arena->tcache_ql, link) {
658 tcache_stats_merge(tsdn, tcache, arena);
660 malloc_mutex_unlock(tsdn,
661 &arena->tcache_ql_mtx);
665 je_malloc_stats_print(NULL, NULL, opt_stats_print_opts);
669 * Ensure that we don't hold any locks upon entry to or exit from allocator
670 * code (in a "broad" sense that doesn't count a reentrant allocation as an
673 JEMALLOC_ALWAYS_INLINE void
674 check_entry_exit_locking(tsdn_t *tsdn) {
678 if (tsdn_null(tsdn)) {
681 tsd_t *tsd = tsdn_tsd(tsdn);
683 * It's possible we hold locks at entry/exit if we're in a nested
686 int8_t reentrancy_level = tsd_reentrancy_level_get(tsd);
687 if (reentrancy_level != 0) {
690 witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
694 * End miscellaneous support functions.
696 /******************************************************************************/
698 * Begin initialization functions.
702 jemalloc_secure_getenv(const char *name) {
703 #ifdef JEMALLOC_HAVE_SECURE_GETENV
704 return secure_getenv(name);
706 # ifdef JEMALLOC_HAVE_ISSETUGID
707 if (issetugid() != 0) {
722 result = si.dwNumberOfProcessors;
723 #elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
725 * glibc >= 2.6 has the CPU_COUNT macro.
727 * glibc's sysconf() uses isspace(). glibc allocates for the first time
728 * *before* setting up the isspace tables. Therefore we need a
729 * different method to get the number of CPUs.
734 pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
735 result = CPU_COUNT(&set);
738 result = sysconf(_SC_NPROCESSORS_ONLN);
740 return ((result == -1) ? 1 : (unsigned)result);
744 init_opt_stats_print_opts(const char *v, size_t vlen) {
745 size_t opts_len = strlen(opt_stats_print_opts);
746 assert(opts_len <= stats_print_tot_num_options);
748 for (size_t i = 0; i < vlen; i++) {
750 #define OPTION(o, v, d, s) case o: break;
756 if (strchr(opt_stats_print_opts, v[i]) != NULL) {
757 /* Ignore repeated. */
761 opt_stats_print_opts[opts_len++] = v[i];
762 opt_stats_print_opts[opts_len] = '\0';
763 assert(opts_len <= stats_print_tot_num_options);
765 assert(opts_len == strlen(opt_stats_print_opts));
769 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
770 char const **v_p, size_t *vlen_p) {
772 const char *opts = *opts_p;
776 for (accept = false; !accept;) {
778 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
779 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
780 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
781 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
783 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
784 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
785 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
786 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
788 case '0': case '1': case '2': case '3': case '4': case '5':
789 case '6': case '7': case '8': case '9':
795 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
800 if (opts != *opts_p) {
801 malloc_write("<jemalloc>: Conf string ends "
806 malloc_write("<jemalloc>: Malformed conf string\n");
811 for (accept = false; !accept;) {
816 * Look ahead one character here, because the next time
817 * this function is called, it will assume that end of
818 * input has been cleanly reached if no input remains,
819 * but we have optimistically already consumed the
820 * comma if one exists.
823 malloc_write("<jemalloc>: Conf string ends "
826 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
830 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
844 malloc_abort_invalid_conf(void) {
845 assert(opt_abort_conf);
846 malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf "
847 "value (see above).\n");
852 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
854 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
856 /* If abort_conf is set, error out after processing all options. */
857 had_conf_error = true;
861 malloc_slow_flag_init(void) {
863 * Combine the runtime options into malloc_slow for fast path. Called
864 * after processing all the options.
866 malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
867 | (opt_junk_free ? flag_opt_junk_free : 0)
868 | (opt_zero ? flag_opt_zero : 0)
869 | (opt_utrace ? flag_opt_utrace : 0)
870 | (opt_xmalloc ? flag_opt_xmalloc : 0);
872 malloc_slow = (malloc_slow_flags != 0);
876 malloc_conf_init(void) {
878 char buf[PATH_MAX + 1];
879 const char *opts, *k, *v;
882 for (i = 0; i < 4; i++) {
883 /* Get runtime configuration. */
886 opts = config_malloc_conf;
889 if (je_malloc_conf != NULL) {
891 * Use options that were compiled into the
894 opts = je_malloc_conf;
896 /* No configuration specified. */
904 int saved_errno = errno;
905 const char *linkname =
906 # ifdef JEMALLOC_PREFIX
907 "/etc/"JEMALLOC_PREFIX"malloc.conf"
914 * Try to use the contents of the "/etc/malloc.conf"
915 * symbolic link's name.
917 linklen = readlink(linkname, buf, sizeof(buf) - 1);
919 /* No configuration specified. */
922 set_errno(saved_errno);
929 const char *envname =
930 #ifdef JEMALLOC_PREFIX
931 JEMALLOC_CPREFIX"MALLOC_CONF"
937 if ((opts = jemalloc_secure_getenv(envname)) != NULL) {
939 * Do nothing; opts is already initialized to
940 * the value of the MALLOC_CONF environment
944 /* No configuration specified. */
955 while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
957 #define CONF_MATCH(n) \
958 (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
959 #define CONF_MATCH_VALUE(n) \
960 (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
961 #define CONF_HANDLE_BOOL(o, n) \
962 if (CONF_MATCH(n)) { \
963 if (CONF_MATCH_VALUE("true")) { \
965 } else if (CONF_MATCH_VALUE("false")) { \
969 "Invalid conf value", \
974 #define CONF_MIN_no(um, min) false
975 #define CONF_MIN_yes(um, min) ((um) < (min))
976 #define CONF_MAX_no(um, max) false
977 #define CONF_MAX_yes(um, max) ((um) > (max))
978 #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
979 if (CONF_MATCH(n)) { \
984 um = malloc_strtoumax(v, &end, 0); \
985 if (get_errno() != 0 || (uintptr_t)end -\
986 (uintptr_t)v != vlen) { \
988 "Invalid conf value", \
991 if (CONF_MIN_##check_min(um, \
995 CONF_MAX_##check_max(um, \
1002 if (CONF_MIN_##check_min(um, \
1004 CONF_MAX_##check_max(um, \
1006 malloc_conf_error( \
1009 k, klen, v, vlen); \
1016 #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
1018 CONF_HANDLE_T_U(unsigned, o, n, min, max, \
1019 check_min, check_max, clip)
1020 #define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \
1021 CONF_HANDLE_T_U(size_t, o, n, min, max, \
1022 check_min, check_max, clip)
1023 #define CONF_HANDLE_SSIZE_T(o, n, min, max) \
1024 if (CONF_MATCH(n)) { \
1029 l = strtol(v, &end, 0); \
1030 if (get_errno() != 0 || (uintptr_t)end -\
1031 (uintptr_t)v != vlen) { \
1032 malloc_conf_error( \
1033 "Invalid conf value", \
1034 k, klen, v, vlen); \
1035 } else if (l < (ssize_t)(min) || l > \
1037 malloc_conf_error( \
1038 "Out-of-range conf value", \
1039 k, klen, v, vlen); \
1045 #define CONF_HANDLE_CHAR_P(o, n, d) \
1046 if (CONF_MATCH(n)) { \
1047 size_t cpylen = (vlen <= \
1048 sizeof(o)-1) ? vlen : \
1050 strncpy(o, v, cpylen); \
1055 CONF_HANDLE_BOOL(opt_abort, "abort")
1056 CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf")
1057 if (strncmp("metadata_thp", k, klen) == 0) {
1060 for (i = 0; i < metadata_thp_mode_limit; i++) {
1061 if (strncmp(metadata_thp_mode_names[i],
1063 opt_metadata_thp = i;
1069 malloc_conf_error("Invalid conf value",
1074 CONF_HANDLE_BOOL(opt_retain, "retain")
1075 if (strncmp("dss", k, klen) == 0) {
1078 for (i = 0; i < dss_prec_limit; i++) {
1079 if (strncmp(dss_prec_names[i], v, vlen)
1081 if (extent_dss_prec_set(i)) {
1083 "Error setting dss",
1094 malloc_conf_error("Invalid conf value",
1099 CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
1100 UINT_MAX, yes, no, false)
1101 CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms,
1102 "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
1103 QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
1105 CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms,
1106 "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
1107 QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
1109 CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
1110 if (CONF_MATCH("stats_print_opts")) {
1111 init_opt_stats_print_opts(v, vlen);
1115 if (CONF_MATCH("junk")) {
1116 if (CONF_MATCH_VALUE("true")) {
1118 opt_junk_alloc = opt_junk_free =
1120 } else if (CONF_MATCH_VALUE("false")) {
1122 opt_junk_alloc = opt_junk_free =
1124 } else if (CONF_MATCH_VALUE("alloc")) {
1126 opt_junk_alloc = true;
1127 opt_junk_free = false;
1128 } else if (CONF_MATCH_VALUE("free")) {
1130 opt_junk_alloc = false;
1131 opt_junk_free = true;
1134 "Invalid conf value", k,
1139 CONF_HANDLE_BOOL(opt_zero, "zero")
1141 if (config_utrace) {
1142 CONF_HANDLE_BOOL(opt_utrace, "utrace")
1144 if (config_xmalloc) {
1145 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
1147 CONF_HANDLE_BOOL(opt_tcache, "tcache")
1148 CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit,
1149 "lg_extent_max_active_fit", 0,
1150 (sizeof(size_t) << 3), yes, yes, false)
1151 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max",
1152 -1, (sizeof(size_t) << 3) - 1)
1153 if (strncmp("percpu_arena", k, klen) == 0) {
1155 for (int i = percpu_arena_mode_names_base; i <
1156 percpu_arena_mode_names_limit; i++) {
1157 if (strncmp(percpu_arena_mode_names[i],
1159 if (!have_percpu_arena) {
1161 "No getcpu support",
1164 opt_percpu_arena = i;
1170 malloc_conf_error("Invalid conf value",
1175 CONF_HANDLE_BOOL(opt_background_thread,
1176 "background_thread");
1177 CONF_HANDLE_SIZE_T(opt_max_background_threads,
1178 "max_background_threads", 1,
1179 opt_max_background_threads, yes, yes,
1182 CONF_HANDLE_BOOL(opt_prof, "prof")
1183 CONF_HANDLE_CHAR_P(opt_prof_prefix,
1184 "prof_prefix", "jeprof")
1185 CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
1186 CONF_HANDLE_BOOL(opt_prof_thread_active_init,
1187 "prof_thread_active_init")
1188 CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
1189 "lg_prof_sample", 0, (sizeof(uint64_t) << 3)
1191 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
1192 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
1193 "lg_prof_interval", -1,
1194 (sizeof(uint64_t) << 3) - 1)
1195 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
1196 CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
1197 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
1200 if (CONF_MATCH("log")) {
1202 vlen <= sizeof(log_var_names) ?
1203 vlen : sizeof(log_var_names) - 1);
1204 strncpy(log_var_names, v, cpylen);
1205 log_var_names[cpylen] = '\0';
1209 if (CONF_MATCH("thp")) {
1211 for (int i = 0; i < thp_mode_names_limit; i++) {
1212 if (strncmp(thp_mode_names[i],v, vlen)
1214 if (!have_madvise_huge) {
1225 malloc_conf_error("Invalid conf value",
1230 malloc_conf_error("Invalid conf pair", k, klen, v,
1233 #undef CONF_MATCH_VALUE
1234 #undef CONF_HANDLE_BOOL
1239 #undef CONF_HANDLE_T_U
1240 #undef CONF_HANDLE_UNSIGNED
1241 #undef CONF_HANDLE_SIZE_T
1242 #undef CONF_HANDLE_SSIZE_T
1243 #undef CONF_HANDLE_CHAR_P
1245 if (opt_abort_conf && had_conf_error) {
1246 malloc_abort_invalid_conf();
1249 atomic_store_b(&log_init_done, true, ATOMIC_RELEASE);
1253 malloc_init_hard_needed(void) {
1254 if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
1255 malloc_init_recursible)) {
1257 * Another thread initialized the allocator before this one
1258 * acquired init_lock, or this thread is the initializing
1259 * thread, and it is recursively allocating.
1263 #ifdef JEMALLOC_THREADED_INIT
1264 if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
1265 /* Busy-wait until the initializing thread completes. */
1266 spin_t spinner = SPIN_INITIALIZER;
1268 malloc_mutex_unlock(TSDN_NULL, &init_lock);
1269 spin_adaptive(&spinner);
1270 malloc_mutex_lock(TSDN_NULL, &init_lock);
1271 } while (!malloc_initialized());
1279 malloc_init_hard_a0_locked() {
1280 malloc_initializer = INITIALIZER;
1286 if (opt_stats_print) {
1287 /* Print statistics at exit. */
1288 if (atexit(stats_print_atexit) != 0) {
1289 malloc_write("<jemalloc>: Error in atexit()\n");
1298 if (base_boot(TSDN_NULL)) {
1301 if (extent_boot()) {
1311 if (tcache_boot(TSDN_NULL)) {
1314 if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS,
1315 malloc_mutex_rank_exclusive)) {
1319 * Create enough scaffolding to allow recursive allocation in
1323 memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
1325 * Initialize one arena here. The rest are lazily created in
1326 * arena_choose_hard().
1328 if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default)
1332 a0 = arena_get(TSDN_NULL, 0, false);
1333 malloc_init_state = malloc_init_a0_initialized;
1339 malloc_init_hard_a0(void) {
1342 malloc_mutex_lock(TSDN_NULL, &init_lock);
1343 ret = malloc_init_hard_a0_locked();
1344 malloc_mutex_unlock(TSDN_NULL, &init_lock);
1348 /* Initialize data structures which may trigger recursive allocation. */
1350 malloc_init_hard_recursible(void) {
1351 malloc_init_state = malloc_init_recursible;
1353 ncpus = malloc_ncpus();
1355 #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
1356 && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
1357 !defined(__native_client__))
1358 /* LinuxThreads' pthread_atfork() allocates. */
1359 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
1360 jemalloc_postfork_child) != 0) {
1361 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1369 if (background_thread_boot0()) {
1377 malloc_narenas_default(void) {
1380 * For SMP systems, create more than one arena per CPU by
1390 static percpu_arena_mode_t
1391 percpu_arena_as_initialized(percpu_arena_mode_t mode) {
1392 assert(!malloc_initialized());
1393 assert(mode <= percpu_arena_disabled);
1395 if (mode != percpu_arena_disabled) {
1396 mode += percpu_arena_mode_enabled_base;
1403 malloc_init_narenas(void) {
1406 if (opt_percpu_arena != percpu_arena_disabled) {
1407 if (!have_percpu_arena || malloc_getcpu() < 0) {
1408 opt_percpu_arena = percpu_arena_disabled;
1409 malloc_printf("<jemalloc>: perCPU arena getcpu() not "
1410 "available. Setting narenas to %u.\n", opt_narenas ?
1411 opt_narenas : malloc_narenas_default());
1416 if (ncpus >= MALLOCX_ARENA_LIMIT) {
1417 malloc_printf("<jemalloc>: narenas w/ percpu"
1418 "arena beyond limit (%d)\n", ncpus);
1424 /* NB: opt_percpu_arena isn't fully initialized yet. */
1425 if (percpu_arena_as_initialized(opt_percpu_arena) ==
1426 per_phycpu_arena && ncpus % 2 != 0) {
1427 malloc_printf("<jemalloc>: invalid "
1428 "configuration -- per physical CPU arena "
1429 "with odd number (%u) of CPUs (no hyper "
1430 "threading?).\n", ncpus);
1434 unsigned n = percpu_arena_ind_limit(
1435 percpu_arena_as_initialized(opt_percpu_arena));
1436 if (opt_narenas < n) {
1438 * If narenas is specified with percpu_arena
1439 * enabled, actual narenas is set as the greater
1440 * of the two. percpu_arena_choose will be free
1441 * to use any of the arenas based on CPU
1442 * id. This is conservative (at a small cost)
1443 * but ensures correctness.
1445 * If for some reason the ncpus determined at
1446 * boot is not the actual number (e.g. because
1447 * of affinity setting from numactl), reserving
1448 * narenas this way provides a workaround for
1455 if (opt_narenas == 0) {
1456 opt_narenas = malloc_narenas_default();
1458 assert(opt_narenas > 0);
1460 narenas_auto = opt_narenas;
1462 * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
1464 if (narenas_auto >= MALLOCX_ARENA_LIMIT) {
1465 narenas_auto = MALLOCX_ARENA_LIMIT - 1;
1466 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
1469 narenas_total_set(narenas_auto);
1475 malloc_init_percpu(void) {
1476 opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena);
1480 malloc_init_hard_finish(void) {
1481 if (malloc_mutex_boot()) {
1485 malloc_init_state = malloc_init_initialized;
1486 malloc_slow_flag_init();
1492 malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) {
1493 malloc_mutex_assert_owner(tsdn, &init_lock);
1494 malloc_mutex_unlock(tsdn, &init_lock);
1495 if (reentrancy_set) {
1496 assert(!tsdn_null(tsdn));
1497 tsd_t *tsd = tsdn_tsd(tsdn);
1498 assert(tsd_reentrancy_level_get(tsd) > 0);
1499 post_reentrancy(tsd);
1504 malloc_init_hard(void) {
1507 #if defined(_WIN32) && _WIN32_WINNT < 0x0600
1510 malloc_mutex_lock(TSDN_NULL, &init_lock);
1512 #define UNLOCK_RETURN(tsdn, ret, reentrancy) \
1513 malloc_init_hard_cleanup(tsdn, reentrancy); \
1516 if (!malloc_init_hard_needed()) {
1517 UNLOCK_RETURN(TSDN_NULL, false, false)
1520 if (malloc_init_state != malloc_init_a0_initialized &&
1521 malloc_init_hard_a0_locked()) {
1522 UNLOCK_RETURN(TSDN_NULL, true, false)
1525 malloc_mutex_unlock(TSDN_NULL, &init_lock);
1526 /* Recursive allocation relies on functional tsd. */
1527 tsd = malloc_tsd_boot0();
1531 if (malloc_init_hard_recursible()) {
1535 malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
1536 /* Set reentrancy level to 1 during init. */
1537 pre_reentrancy(tsd, NULL);
1538 /* Initialize narenas before prof_boot2 (for allocation). */
1539 if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) {
1540 UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1542 if (config_prof && prof_boot2(tsd)) {
1543 UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1546 malloc_init_percpu();
1548 if (malloc_init_hard_finish()) {
1549 UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1551 post_reentrancy(tsd);
1552 malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
1554 witness_assert_lockless(witness_tsd_tsdn(
1555 tsd_witness_tsdp_get_unsafe(tsd)));
1557 /* Update TSD after tsd_boot1. */
1559 if (opt_background_thread) {
1560 assert(have_background_thread);
1562 * Need to finish init & unlock first before creating background
1563 * threads (pthread_create depends on malloc). ctl_init (which
1564 * sets isthreaded) needs to be called without holding any lock.
1566 background_thread_ctl_init(tsd_tsdn(tsd));
1568 malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
1569 bool err = background_thread_create(tsd, 0);
1570 malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
1575 #undef UNLOCK_RETURN
1580 * End initialization functions.
1582 /******************************************************************************/
1584 * Begin allocation-path internal functions and data structures.
1588 * Settings determined by the documented behavior of the allocation functions.
1590 typedef struct static_opts_s static_opts_t;
1591 struct static_opts_s {
1592 /* Whether or not allocation size may overflow. */
1594 /* Whether or not allocations of size 0 should be treated as size 1. */
1595 bool bump_empty_alloc;
1597 * Whether to assert that allocations are not of size 0 (after any
1600 bool assert_nonempty_alloc;
1603 * Whether or not to modify the 'result' argument to malloc in case of
1606 bool null_out_result_on_error;
1607 /* Whether to set errno when we encounter an error condition. */
1608 bool set_errno_on_error;
1611 * The minimum valid alignment for functions requesting aligned storage.
1613 size_t min_alignment;
1615 /* The error string to use if we oom. */
1616 const char *oom_string;
1617 /* The error string to use if the passed-in alignment is invalid. */
1618 const char *invalid_alignment_string;
1621 * False if we're configured to skip some time-consuming operations.
1623 * This isn't really a malloc "behavior", but it acts as a useful
1624 * summary of several other static (or at least, static after program
1625 * initialization) options.
1630 JEMALLOC_ALWAYS_INLINE void
1631 static_opts_init(static_opts_t *static_opts) {
1632 static_opts->may_overflow = false;
1633 static_opts->bump_empty_alloc = false;
1634 static_opts->assert_nonempty_alloc = false;
1635 static_opts->null_out_result_on_error = false;
1636 static_opts->set_errno_on_error = false;
1637 static_opts->min_alignment = 0;
1638 static_opts->oom_string = "";
1639 static_opts->invalid_alignment_string = "";
1640 static_opts->slow = false;
1644 * These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we
1645 * should have one constant here per magic value there. Note however that the
1646 * representations need not be related.
1648 #define TCACHE_IND_NONE ((unsigned)-1)
1649 #define TCACHE_IND_AUTOMATIC ((unsigned)-2)
1650 #define ARENA_IND_AUTOMATIC ((unsigned)-1)
1652 typedef struct dynamic_opts_s dynamic_opts_t;
1653 struct dynamic_opts_s {
1659 unsigned tcache_ind;
1663 JEMALLOC_ALWAYS_INLINE void
1664 dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
1665 dynamic_opts->result = NULL;
1666 dynamic_opts->num_items = 0;
1667 dynamic_opts->item_size = 0;
1668 dynamic_opts->alignment = 0;
1669 dynamic_opts->zero = false;
1670 dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC;
1671 dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC;
1674 /* ind is ignored if dopts->alignment > 0. */
1675 JEMALLOC_ALWAYS_INLINE void *
1676 imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
1677 size_t size, size_t usize, szind_t ind) {
1681 /* Fill in the tcache. */
1682 if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) {
1683 if (likely(!sopts->slow)) {
1684 /* Getting tcache ptr unconditionally. */
1685 tcache = tsd_tcachep_get(tsd);
1686 assert(tcache == tcache_get(tsd));
1688 tcache = tcache_get(tsd);
1690 } else if (dopts->tcache_ind == TCACHE_IND_NONE) {
1693 tcache = tcaches_get(tsd, dopts->tcache_ind);
1696 /* Fill in the arena. */
1697 if (dopts->arena_ind == ARENA_IND_AUTOMATIC) {
1699 * In case of automatic arena management, we defer arena
1700 * computation until as late as we can, hoping to fill the
1701 * allocation out of the tcache.
1705 arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true);
1708 if (unlikely(dopts->alignment != 0)) {
1709 return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment,
1710 dopts->zero, tcache, arena);
1713 return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false,
1714 arena, sopts->slow);
1717 JEMALLOC_ALWAYS_INLINE void *
1718 imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
1719 size_t usize, szind_t ind) {
1723 * For small allocations, sampling bumps the usize. If so, we allocate
1724 * from the ind_large bucket.
1727 size_t bumped_usize = usize;
1729 if (usize <= SMALL_MAXCLASS) {
1730 assert(((dopts->alignment == 0) ? sz_s2u(LARGE_MINCLASS) :
1731 sz_sa2u(LARGE_MINCLASS, dopts->alignment))
1733 ind_large = sz_size2index(LARGE_MINCLASS);
1734 bumped_usize = sz_s2u(LARGE_MINCLASS);
1735 ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
1736 bumped_usize, ind_large);
1737 if (unlikely(ret == NULL)) {
1740 arena_prof_promote(tsd_tsdn(tsd), ret, usize);
1742 ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind);
1749 * Returns true if the allocation will overflow, and false otherwise. Sets
1750 * *size to the product either way.
1752 JEMALLOC_ALWAYS_INLINE bool
1753 compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts,
1756 * This function is just num_items * item_size, except that we may have
1757 * to check for overflow.
1760 if (!may_overflow) {
1761 assert(dopts->num_items == 1);
1762 *size = dopts->item_size;
1766 /* A size_t with its high-half bits all set to 1. */
1767 static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2);
1769 *size = dopts->item_size * dopts->num_items;
1771 if (unlikely(*size == 0)) {
1772 return (dopts->num_items != 0 && dopts->item_size != 0);
1776 * We got a non-zero size, but we don't know if we overflowed to get
1777 * there. To avoid having to do a divide, we'll be clever and note that
1778 * if both A and B can be represented in N/2 bits, then their product
1779 * can be represented in N bits (without the possibility of overflow).
1781 if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) {
1784 if (likely(*size / dopts->item_size == dopts->num_items)) {
1790 JEMALLOC_ALWAYS_INLINE int
1791 imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
1792 /* Where the actual allocated memory will live. */
1793 void *allocation = NULL;
1794 /* Filled in by compute_size_with_overflow below. */
1797 * For unaligned allocations, we need only ind. For aligned
1798 * allocations, or in case of stats or profiling we need usize.
1800 * These are actually dead stores, in that their values are reset before
1801 * any branch on their value is taken. Sometimes though, it's
1802 * convenient to pass them as arguments before this point. To avoid
1803 * undefined behavior then, we initialize them with dummy stores.
1808 /* Reentrancy is only checked on slow path. */
1809 int8_t reentrancy_level;
1811 /* Compute the amount of memory the user wants. */
1812 if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts,
1817 /* Validate the user input. */
1818 if (sopts->bump_empty_alloc) {
1819 if (unlikely(size == 0)) {
1824 if (sopts->assert_nonempty_alloc) {
1828 if (unlikely(dopts->alignment < sopts->min_alignment
1829 || (dopts->alignment & (dopts->alignment - 1)) != 0)) {
1830 goto label_invalid_alignment;
1833 /* This is the beginning of the "core" algorithm. */
1835 if (dopts->alignment == 0) {
1836 ind = sz_size2index(size);
1837 if (unlikely(ind >= NSIZES)) {
1840 if (config_stats || (config_prof && opt_prof)) {
1841 usize = sz_index2size(ind);
1842 assert(usize > 0 && usize <= LARGE_MAXCLASS);
1845 usize = sz_sa2u(size, dopts->alignment);
1846 if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
1851 check_entry_exit_locking(tsd_tsdn(tsd));
1854 * If we need to handle reentrancy, we can do it out of a
1855 * known-initialized arena (i.e. arena 0).
1857 reentrancy_level = tsd_reentrancy_level_get(tsd);
1858 if (sopts->slow && unlikely(reentrancy_level > 0)) {
1860 * We should never specify particular arenas or tcaches from
1861 * within our internal allocations.
1863 assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC ||
1864 dopts->tcache_ind == TCACHE_IND_NONE);
1865 assert(dopts->arena_ind == ARENA_IND_AUTOMATIC);
1866 dopts->tcache_ind = TCACHE_IND_NONE;
1867 /* We know that arena 0 has already been initialized. */
1868 dopts->arena_ind = 0;
1871 /* If profiling is on, get our profiling context. */
1872 if (config_prof && opt_prof) {
1874 * Note that if we're going down this path, usize must have been
1875 * initialized in the previous if statement.
1877 prof_tctx_t *tctx = prof_alloc_prep(
1878 tsd, usize, prof_active_get_unlocked(), true);
1880 alloc_ctx_t alloc_ctx;
1881 if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
1882 alloc_ctx.slab = (usize <= SMALL_MAXCLASS);
1883 allocation = imalloc_no_sample(
1884 sopts, dopts, tsd, usize, usize, ind);
1885 } else if ((uintptr_t)tctx > (uintptr_t)1U) {
1887 * Note that ind might still be 0 here. This is fine;
1888 * imalloc_sample ignores ind if dopts->alignment > 0.
1890 allocation = imalloc_sample(
1891 sopts, dopts, tsd, usize, ind);
1892 alloc_ctx.slab = false;
1897 if (unlikely(allocation == NULL)) {
1898 prof_alloc_rollback(tsd, tctx, true);
1901 prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx);
1904 * If dopts->alignment > 0, then ind is still 0, but usize was
1905 * computed in the previous if statement. Down the positive
1906 * alignment path, imalloc_no_sample ignores ind and size
1907 * (relying only on usize).
1909 allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize,
1911 if (unlikely(allocation == NULL)) {
1917 * Allocation has been done at this point. We still have some
1918 * post-allocation work to do though.
1920 assert(dopts->alignment == 0
1921 || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0));
1924 assert(usize == isalloc(tsd_tsdn(tsd), allocation));
1925 *tsd_thread_allocatedp_get(tsd) += usize;
1929 UTRACE(0, size, allocation);
1933 check_entry_exit_locking(tsd_tsdn(tsd));
1934 *dopts->result = allocation;
1938 if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) {
1939 malloc_write(sopts->oom_string);
1944 UTRACE(NULL, size, NULL);
1947 check_entry_exit_locking(tsd_tsdn(tsd));
1949 if (sopts->set_errno_on_error) {
1953 if (sopts->null_out_result_on_error) {
1954 *dopts->result = NULL;
1960 * This label is only jumped to by one goto; we move it out of line
1961 * anyways to avoid obscuring the non-error paths, and for symmetry with
1964 label_invalid_alignment:
1965 if (config_xmalloc && unlikely(opt_xmalloc)) {
1966 malloc_write(sopts->invalid_alignment_string);
1970 if (sopts->set_errno_on_error) {
1975 UTRACE(NULL, size, NULL);
1978 check_entry_exit_locking(tsd_tsdn(tsd));
1980 if (sopts->null_out_result_on_error) {
1981 *dopts->result = NULL;
1987 /* Returns the errno-style error code of the allocation. */
1988 JEMALLOC_ALWAYS_INLINE int
1989 imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
1990 if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) {
1991 if (config_xmalloc && unlikely(opt_xmalloc)) {
1992 malloc_write(sopts->oom_string);
1995 UTRACE(NULL, dopts->num_items * dopts->item_size, NULL);
1997 *dopts->result = NULL;
2002 /* We always need the tsd. Let's grab it right away. */
2003 tsd_t *tsd = tsd_fetch();
2005 if (likely(tsd_fast(tsd))) {
2006 /* Fast and common path. */
2007 tsd_assert_fast(tsd);
2008 sopts->slow = false;
2009 return imalloc_body(sopts, dopts, tsd);
2012 return imalloc_body(sopts, dopts, tsd);
2015 /******************************************************************************/
2017 * Begin malloc(3)-compatible functions.
2020 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2021 void JEMALLOC_NOTHROW *
2022 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2023 je_malloc(size_t size) {
2025 static_opts_t sopts;
2026 dynamic_opts_t dopts;
2028 LOG("core.malloc.entry", "size: %zu", size);
2030 static_opts_init(&sopts);
2031 dynamic_opts_init(&dopts);
2033 sopts.bump_empty_alloc = true;
2034 sopts.null_out_result_on_error = true;
2035 sopts.set_errno_on_error = true;
2036 sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n";
2038 dopts.result = &ret;
2039 dopts.num_items = 1;
2040 dopts.item_size = size;
2042 imalloc(&sopts, &dopts);
2044 LOG("core.malloc.exit", "result: %p", ret);
2049 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2050 JEMALLOC_ATTR(nonnull(1))
2051 je_posix_memalign(void **memptr, size_t alignment, size_t size) {
2053 static_opts_t sopts;
2054 dynamic_opts_t dopts;
2056 LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, "
2057 "size: %zu", memptr, alignment, size);
2059 static_opts_init(&sopts);
2060 dynamic_opts_init(&dopts);
2062 sopts.bump_empty_alloc = true;
2063 sopts.min_alignment = sizeof(void *);
2065 "<jemalloc>: Error allocating aligned memory: out of memory\n";
2066 sopts.invalid_alignment_string =
2067 "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2069 dopts.result = memptr;
2070 dopts.num_items = 1;
2071 dopts.item_size = size;
2072 dopts.alignment = alignment;
2074 ret = imalloc(&sopts, &dopts);
2076 LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret,
2082 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2083 void JEMALLOC_NOTHROW *
2084 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
2085 je_aligned_alloc(size_t alignment, size_t size) {
2088 static_opts_t sopts;
2089 dynamic_opts_t dopts;
2091 LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n",
2094 static_opts_init(&sopts);
2095 dynamic_opts_init(&dopts);
2097 sopts.bump_empty_alloc = true;
2098 sopts.null_out_result_on_error = true;
2099 sopts.set_errno_on_error = true;
2100 sopts.min_alignment = 1;
2102 "<jemalloc>: Error allocating aligned memory: out of memory\n";
2103 sopts.invalid_alignment_string =
2104 "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2106 dopts.result = &ret;
2107 dopts.num_items = 1;
2108 dopts.item_size = size;
2109 dopts.alignment = alignment;
2111 imalloc(&sopts, &dopts);
2113 LOG("core.aligned_alloc.exit", "result: %p", ret);
2118 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2119 void JEMALLOC_NOTHROW *
2120 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
2121 je_calloc(size_t num, size_t size) {
2123 static_opts_t sopts;
2124 dynamic_opts_t dopts;
2126 LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size);
2128 static_opts_init(&sopts);
2129 dynamic_opts_init(&dopts);
2131 sopts.may_overflow = true;
2132 sopts.bump_empty_alloc = true;
2133 sopts.null_out_result_on_error = true;
2134 sopts.set_errno_on_error = true;
2135 sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n";
2137 dopts.result = &ret;
2138 dopts.num_items = num;
2139 dopts.item_size = size;
2142 imalloc(&sopts, &dopts);
2144 LOG("core.calloc.exit", "result: %p", ret);
2150 irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
2151 prof_tctx_t *tctx) {
2157 if (usize <= SMALL_MAXCLASS) {
2158 p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
2162 arena_prof_promote(tsd_tsdn(tsd), p, usize);
2164 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
2170 JEMALLOC_ALWAYS_INLINE void *
2171 irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
2172 alloc_ctx_t *alloc_ctx) {
2175 prof_tctx_t *old_tctx, *tctx;
2177 prof_active = prof_active_get_unlocked();
2178 old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
2179 tctx = prof_alloc_prep(tsd, usize, prof_active, true);
2180 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2181 p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
2183 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
2185 if (unlikely(p == NULL)) {
2186 prof_alloc_rollback(tsd, tctx, true);
2189 prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
2195 JEMALLOC_ALWAYS_INLINE void
2196 ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
2198 tsd_assert_fast(tsd);
2200 check_entry_exit_locking(tsd_tsdn(tsd));
2201 if (tsd_reentrancy_level_get(tsd) != 0) {
2205 assert(ptr != NULL);
2206 assert(malloc_initialized() || IS_INITIALIZER);
2208 alloc_ctx_t alloc_ctx;
2209 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2210 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2211 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2212 assert(alloc_ctx.szind != NSIZES);
2215 if (config_prof && opt_prof) {
2216 usize = sz_index2size(alloc_ctx.szind);
2217 prof_free(tsd, ptr, usize, &alloc_ctx);
2218 } else if (config_stats) {
2219 usize = sz_index2size(alloc_ctx.szind);
2222 *tsd_thread_deallocatedp_get(tsd) += usize;
2225 if (likely(!slow_path)) {
2226 idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
2229 idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
2234 JEMALLOC_ALWAYS_INLINE void
2235 isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
2237 tsd_assert_fast(tsd);
2239 check_entry_exit_locking(tsd_tsdn(tsd));
2240 if (tsd_reentrancy_level_get(tsd) != 0) {
2244 assert(ptr != NULL);
2245 assert(malloc_initialized() || IS_INITIALIZER);
2247 alloc_ctx_t alloc_ctx, *ctx;
2248 if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) {
2250 * When cache_oblivious is disabled and ptr is not page aligned,
2251 * the allocation was not sampled -- usize can be used to
2252 * determine szind directly.
2254 alloc_ctx.szind = sz_size2index(usize);
2255 alloc_ctx.slab = true;
2258 alloc_ctx_t dbg_ctx;
2259 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2260 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree,
2261 rtree_ctx, (uintptr_t)ptr, true, &dbg_ctx.szind,
2263 assert(dbg_ctx.szind == alloc_ctx.szind);
2264 assert(dbg_ctx.slab == alloc_ctx.slab);
2266 } else if (config_prof && opt_prof) {
2267 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2268 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2269 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2270 assert(alloc_ctx.szind == sz_size2index(usize));
2276 if (config_prof && opt_prof) {
2277 prof_free(tsd, ptr, usize, ctx);
2280 *tsd_thread_deallocatedp_get(tsd) += usize;
2283 if (likely(!slow_path)) {
2284 isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false);
2286 isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true);
2290 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2291 void JEMALLOC_NOTHROW *
2292 JEMALLOC_ALLOC_SIZE(2)
2293 je_realloc(void *ptr, size_t size) {
2295 tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
2296 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
2297 size_t old_usize = 0;
2299 LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size);
2301 if (unlikely(size == 0)) {
2303 /* realloc(ptr, 0) is equivalent to free(ptr). */
2306 tsd_t *tsd = tsd_fetch();
2307 if (tsd_reentrancy_level_get(tsd) == 0) {
2308 tcache = tcache_get(tsd);
2312 ifree(tsd, ptr, tcache, true);
2314 LOG("core.realloc.exit", "result: %p", NULL);
2320 if (likely(ptr != NULL)) {
2321 assert(malloc_initialized() || IS_INITIALIZER);
2322 tsd_t *tsd = tsd_fetch();
2324 check_entry_exit_locking(tsd_tsdn(tsd));
2326 alloc_ctx_t alloc_ctx;
2327 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2328 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2329 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2330 assert(alloc_ctx.szind != NSIZES);
2331 old_usize = sz_index2size(alloc_ctx.szind);
2332 assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2333 if (config_prof && opt_prof) {
2334 usize = sz_s2u(size);
2335 ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ?
2336 NULL : irealloc_prof(tsd, ptr, old_usize, usize,
2340 usize = sz_s2u(size);
2342 ret = iralloc(tsd, ptr, old_usize, size, 0, false);
2344 tsdn = tsd_tsdn(tsd);
2346 /* realloc(NULL, size) is equivalent to malloc(size). */
2347 void *ret = je_malloc(size);
2348 LOG("core.realloc.exit", "result: %p", ret);
2352 if (unlikely(ret == NULL)) {
2353 if (config_xmalloc && unlikely(opt_xmalloc)) {
2354 malloc_write("<jemalloc>: Error in realloc(): "
2360 if (config_stats && likely(ret != NULL)) {
2363 assert(usize == isalloc(tsdn, ret));
2364 tsd = tsdn_tsd(tsdn);
2365 *tsd_thread_allocatedp_get(tsd) += usize;
2366 *tsd_thread_deallocatedp_get(tsd) += old_usize;
2368 UTRACE(ptr, size, ret);
2369 check_entry_exit_locking(tsdn);
2371 LOG("core.realloc.exit", "result: %p", ret);
2375 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2376 je_free(void *ptr) {
2377 LOG("core.free.entry", "ptr: %p", ptr);
2380 if (likely(ptr != NULL)) {
2382 * We avoid setting up tsd fully (e.g. tcache, arena binding)
2383 * based on only free() calls -- other activities trigger the
2384 * minimal to full transition. This is because free() may
2385 * happen during thread shutdown after tls deallocation: if a
2386 * thread never had any malloc activities until then, a
2387 * fully-setup tsd won't be destructed properly.
2389 tsd_t *tsd = tsd_fetch_min();
2390 check_entry_exit_locking(tsd_tsdn(tsd));
2393 if (likely(tsd_fast(tsd))) {
2394 tsd_assert_fast(tsd);
2395 /* Unconditionally get tcache ptr on fast path. */
2396 tcache = tsd_tcachep_get(tsd);
2397 ifree(tsd, ptr, tcache, false);
2399 if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
2400 tcache = tcache_get(tsd);
2404 ifree(tsd, ptr, tcache, true);
2406 check_entry_exit_locking(tsd_tsdn(tsd));
2408 LOG("core.free.exit", "");
2412 * End malloc(3)-compatible functions.
2414 /******************************************************************************/
2416 * Begin non-standard override functions.
2419 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
2420 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2421 void JEMALLOC_NOTHROW *
2422 JEMALLOC_ATTR(malloc)
2423 je_memalign(size_t alignment, size_t size) {
2425 static_opts_t sopts;
2426 dynamic_opts_t dopts;
2428 LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment,
2431 static_opts_init(&sopts);
2432 dynamic_opts_init(&dopts);
2434 sopts.bump_empty_alloc = true;
2435 sopts.min_alignment = 1;
2437 "<jemalloc>: Error allocating aligned memory: out of memory\n";
2438 sopts.invalid_alignment_string =
2439 "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2440 sopts.null_out_result_on_error = true;
2442 dopts.result = &ret;
2443 dopts.num_items = 1;
2444 dopts.item_size = size;
2445 dopts.alignment = alignment;
2447 imalloc(&sopts, &dopts);
2449 LOG("core.memalign.exit", "result: %p", ret);
2454 #ifdef JEMALLOC_OVERRIDE_VALLOC
2455 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2456 void JEMALLOC_NOTHROW *
2457 JEMALLOC_ATTR(malloc)
2458 je_valloc(size_t size) {
2461 static_opts_t sopts;
2462 dynamic_opts_t dopts;
2464 LOG("core.valloc.entry", "size: %zu\n", size);
2466 static_opts_init(&sopts);
2467 dynamic_opts_init(&dopts);
2469 sopts.bump_empty_alloc = true;
2470 sopts.null_out_result_on_error = true;
2471 sopts.min_alignment = PAGE;
2473 "<jemalloc>: Error allocating aligned memory: out of memory\n";
2474 sopts.invalid_alignment_string =
2475 "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2477 dopts.result = &ret;
2478 dopts.num_items = 1;
2479 dopts.item_size = size;
2480 dopts.alignment = PAGE;
2482 imalloc(&sopts, &dopts);
2484 LOG("core.valloc.exit", "result: %p\n", ret);
2489 #if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)
2491 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
2492 * to inconsistently reference libc's malloc(3)-compatible functions
2493 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
2495 * These definitions interpose hooks in glibc. The functions are actually
2496 * passed an extra argument for the caller return address, which will be
2499 JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
2500 JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
2501 JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
2502 # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
2503 JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
2509 * To enable static linking with glibc, the libc specific malloc interface must
2510 * be implemented also, so none of glibc's malloc.o functions are added to the
2513 # define ALIAS(je_fn) __attribute__((alias (#je_fn), used))
2514 /* To force macro expansion of je_ prefix before stringification. */
2515 # define PREALIAS(je_fn) ALIAS(je_fn)
2516 # ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC
2517 void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
2519 # ifdef JEMALLOC_OVERRIDE___LIBC_FREE
2520 void __libc_free(void* ptr) PREALIAS(je_free);
2522 # ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC
2523 void *__libc_malloc(size_t size) PREALIAS(je_malloc);
2525 # ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
2526 void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
2528 # ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC
2529 void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
2531 # ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC
2532 void *__libc_valloc(size_t size) PREALIAS(je_valloc);
2534 # ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
2535 int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign);
2543 * End non-standard override functions.
2545 /******************************************************************************/
2547 * Begin non-standard functions.
2550 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2551 void JEMALLOC_NOTHROW *
2552 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2553 je_mallocx(size_t size, int flags) {
2555 static_opts_t sopts;
2556 dynamic_opts_t dopts;
2558 LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags);
2560 static_opts_init(&sopts);
2561 dynamic_opts_init(&dopts);
2563 sopts.assert_nonempty_alloc = true;
2564 sopts.null_out_result_on_error = true;
2565 sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n";
2567 dopts.result = &ret;
2568 dopts.num_items = 1;
2569 dopts.item_size = size;
2570 if (unlikely(flags != 0)) {
2571 if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) {
2572 dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
2575 dopts.zero = MALLOCX_ZERO_GET(flags);
2577 if ((flags & MALLOCX_TCACHE_MASK) != 0) {
2578 if ((flags & MALLOCX_TCACHE_MASK)
2579 == MALLOCX_TCACHE_NONE) {
2580 dopts.tcache_ind = TCACHE_IND_NONE;
2582 dopts.tcache_ind = MALLOCX_TCACHE_GET(flags);
2585 dopts.tcache_ind = TCACHE_IND_AUTOMATIC;
2588 if ((flags & MALLOCX_ARENA_MASK) != 0)
2589 dopts.arena_ind = MALLOCX_ARENA_GET(flags);
2592 imalloc(&sopts, &dopts);
2594 LOG("core.mallocx.exit", "result: %p", ret);
2599 irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
2600 size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
2601 prof_tctx_t *tctx) {
2607 if (usize <= SMALL_MAXCLASS) {
2608 p = iralloct(tsdn, old_ptr, old_usize, LARGE_MINCLASS,
2609 alignment, zero, tcache, arena);
2613 arena_prof_promote(tsdn, p, usize);
2615 p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero,
2622 JEMALLOC_ALWAYS_INLINE void *
2623 irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
2624 size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
2625 arena_t *arena, alloc_ctx_t *alloc_ctx) {
2628 prof_tctx_t *old_tctx, *tctx;
2630 prof_active = prof_active_get_unlocked();
2631 old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
2632 tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
2633 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2634 p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize,
2635 *usize, alignment, zero, tcache, arena, tctx);
2637 p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment,
2638 zero, tcache, arena);
2640 if (unlikely(p == NULL)) {
2641 prof_alloc_rollback(tsd, tctx, false);
2645 if (p == old_ptr && alignment != 0) {
2647 * The allocation did not move, so it is possible that the size
2648 * class is smaller than would guarantee the requested
2649 * alignment, and that the alignment constraint was
2650 * serendipitously satisfied. Additionally, old_usize may not
2651 * be the same as the current usize because of in-place large
2652 * reallocation. Therefore, query the actual value of usize.
2654 *usize = isalloc(tsd_tsdn(tsd), p);
2656 prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
2657 old_usize, old_tctx);
2662 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2663 void JEMALLOC_NOTHROW *
2664 JEMALLOC_ALLOC_SIZE(2)
2665 je_rallocx(void *ptr, size_t size, int flags) {
2670 size_t alignment = MALLOCX_ALIGN_GET(flags);
2671 bool zero = flags & MALLOCX_ZERO;
2675 LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
2679 assert(ptr != NULL);
2681 assert(malloc_initialized() || IS_INITIALIZER);
2683 check_entry_exit_locking(tsd_tsdn(tsd));
2685 if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
2686 unsigned arena_ind = MALLOCX_ARENA_GET(flags);
2687 arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
2688 if (unlikely(arena == NULL)) {
2695 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2696 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
2699 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2702 tcache = tcache_get(tsd);
2705 alloc_ctx_t alloc_ctx;
2706 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2707 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2708 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2709 assert(alloc_ctx.szind != NSIZES);
2710 old_usize = sz_index2size(alloc_ctx.szind);
2711 assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2712 if (config_prof && opt_prof) {
2713 usize = (alignment == 0) ?
2714 sz_s2u(size) : sz_sa2u(size, alignment);
2715 if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
2718 p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
2719 zero, tcache, arena, &alloc_ctx);
2720 if (unlikely(p == NULL)) {
2724 p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment,
2725 zero, tcache, arena);
2726 if (unlikely(p == NULL)) {
2730 usize = isalloc(tsd_tsdn(tsd), p);
2733 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2736 *tsd_thread_allocatedp_get(tsd) += usize;
2737 *tsd_thread_deallocatedp_get(tsd) += old_usize;
2739 UTRACE(ptr, size, p);
2740 check_entry_exit_locking(tsd_tsdn(tsd));
2742 LOG("core.rallocx.exit", "result: %p", p);
2745 if (config_xmalloc && unlikely(opt_xmalloc)) {
2746 malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
2749 UTRACE(ptr, size, 0);
2750 check_entry_exit_locking(tsd_tsdn(tsd));
2752 LOG("core.rallocx.exit", "result: %p", NULL);
2756 JEMALLOC_ALWAYS_INLINE size_t
2757 ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
2758 size_t extra, size_t alignment, bool zero) {
2761 if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) {
2764 usize = isalloc(tsdn, ptr);
2770 ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
2771 size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) {
2777 usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
2783 JEMALLOC_ALWAYS_INLINE size_t
2784 ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2785 size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) {
2786 size_t usize_max, usize;
2788 prof_tctx_t *old_tctx, *tctx;
2790 prof_active = prof_active_get_unlocked();
2791 old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
2793 * usize isn't knowable before ixalloc() returns when extra is non-zero.
2794 * Therefore, compute its maximum possible value and use that in
2795 * prof_alloc_prep() to decide whether to capture a backtrace.
2796 * prof_realloc() will use the actual usize to decide whether to sample.
2798 if (alignment == 0) {
2799 usize_max = sz_s2u(size+extra);
2800 assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS);
2802 usize_max = sz_sa2u(size+extra, alignment);
2803 if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) {
2805 * usize_max is out of range, and chances are that
2806 * allocation will fail, but use the maximum possible
2807 * value and carry on with prof_alloc_prep(), just in
2808 * case allocation succeeds.
2810 usize_max = LARGE_MAXCLASS;
2813 tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
2815 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2816 usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
2817 size, extra, alignment, zero, tctx);
2819 usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2820 extra, alignment, zero);
2822 if (usize == old_usize) {
2823 prof_alloc_rollback(tsd, tctx, false);
2826 prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
2832 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2833 je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
2835 size_t usize, old_usize;
2836 size_t alignment = MALLOCX_ALIGN_GET(flags);
2837 bool zero = flags & MALLOCX_ZERO;
2839 LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, "
2840 "flags: %d", ptr, size, extra, flags);
2842 assert(ptr != NULL);
2844 assert(SIZE_T_MAX - size >= extra);
2845 assert(malloc_initialized() || IS_INITIALIZER);
2847 check_entry_exit_locking(tsd_tsdn(tsd));
2849 alloc_ctx_t alloc_ctx;
2850 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2851 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2852 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2853 assert(alloc_ctx.szind != NSIZES);
2854 old_usize = sz_index2size(alloc_ctx.szind);
2855 assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2857 * The API explicitly absolves itself of protecting against (size +
2858 * extra) numerical overflow, but we may need to clamp extra to avoid
2859 * exceeding LARGE_MAXCLASS.
2861 * Ordinarily, size limit checking is handled deeper down, but here we
2862 * have to check as part of (size + extra) clamping, since we need the
2863 * clamped value in the above helper functions.
2865 if (unlikely(size > LARGE_MAXCLASS)) {
2867 goto label_not_resized;
2869 if (unlikely(LARGE_MAXCLASS - size < extra)) {
2870 extra = LARGE_MAXCLASS - size;
2873 if (config_prof && opt_prof) {
2874 usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
2875 alignment, zero, &alloc_ctx);
2877 usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2878 extra, alignment, zero);
2880 if (unlikely(usize == old_usize)) {
2881 goto label_not_resized;
2885 *tsd_thread_allocatedp_get(tsd) += usize;
2886 *tsd_thread_deallocatedp_get(tsd) += old_usize;
2889 UTRACE(ptr, size, ptr);
2890 check_entry_exit_locking(tsd_tsdn(tsd));
2892 LOG("core.xallocx.exit", "result: %zu", usize);
2896 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2898 je_sallocx(const void *ptr, UNUSED int flags) {
2902 LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags);
2904 assert(malloc_initialized() || IS_INITIALIZER);
2905 assert(ptr != NULL);
2907 tsdn = tsdn_fetch();
2908 check_entry_exit_locking(tsdn);
2910 if (config_debug || force_ivsalloc) {
2911 usize = ivsalloc(tsdn, ptr);
2912 assert(force_ivsalloc || usize != 0);
2914 usize = isalloc(tsdn, ptr);
2917 check_entry_exit_locking(tsdn);
2919 LOG("core.sallocx.exit", "result: %zu", usize);
2923 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2924 je_dallocx(void *ptr, int flags) {
2925 LOG("core.dallocx.entry", "ptr: %p, flags: %d", ptr, flags);
2927 assert(ptr != NULL);
2928 assert(malloc_initialized() || IS_INITIALIZER);
2930 tsd_t *tsd = tsd_fetch();
2931 bool fast = tsd_fast(tsd);
2932 check_entry_exit_locking(tsd_tsdn(tsd));
2935 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2936 /* Not allowed to be reentrant and specify a custom tcache. */
2937 assert(tsd_reentrancy_level_get(tsd) == 0);
2938 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
2941 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2945 tcache = tsd_tcachep_get(tsd);
2946 assert(tcache == tcache_get(tsd));
2948 if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
2949 tcache = tcache_get(tsd);
2958 tsd_assert_fast(tsd);
2959 ifree(tsd, ptr, tcache, false);
2961 ifree(tsd, ptr, tcache, true);
2963 check_entry_exit_locking(tsd_tsdn(tsd));
2965 LOG("core.dallocx.exit", "");
2968 JEMALLOC_ALWAYS_INLINE size_t
2969 inallocx(tsdn_t *tsdn, size_t size, int flags) {
2970 check_entry_exit_locking(tsdn);
2973 if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) {
2974 usize = sz_s2u(size);
2976 usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
2978 check_entry_exit_locking(tsdn);
2982 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2983 je_sdallocx(void *ptr, size_t size, int flags) {
2984 assert(ptr != NULL);
2985 assert(malloc_initialized() || IS_INITIALIZER);
2987 LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
2990 tsd_t *tsd = tsd_fetch();
2991 bool fast = tsd_fast(tsd);
2992 size_t usize = inallocx(tsd_tsdn(tsd), size, flags);
2993 assert(usize == isalloc(tsd_tsdn(tsd), ptr));
2994 check_entry_exit_locking(tsd_tsdn(tsd));
2997 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2998 /* Not allowed to be reentrant and specify a custom tcache. */
2999 assert(tsd_reentrancy_level_get(tsd) == 0);
3000 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
3003 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
3007 tcache = tsd_tcachep_get(tsd);
3008 assert(tcache == tcache_get(tsd));
3010 if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
3011 tcache = tcache_get(tsd);
3020 tsd_assert_fast(tsd);
3021 isfree(tsd, ptr, usize, tcache, false);
3023 isfree(tsd, ptr, usize, tcache, true);
3025 check_entry_exit_locking(tsd_tsdn(tsd));
3027 LOG("core.sdallocx.exit", "");
3030 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
3032 je_nallocx(size_t size, int flags) {
3038 if (unlikely(malloc_init())) {
3039 LOG("core.nallocx.exit", "result: %zu", ZU(0));
3043 tsdn = tsdn_fetch();
3044 check_entry_exit_locking(tsdn);
3046 usize = inallocx(tsdn, size, flags);
3047 if (unlikely(usize > LARGE_MAXCLASS)) {
3048 LOG("core.nallocx.exit", "result: %zu", ZU(0));
3052 check_entry_exit_locking(tsdn);
3053 LOG("core.nallocx.exit", "result: %zu", usize);
3057 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
3058 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
3063 LOG("core.mallctl.entry", "name: %s", name);
3065 if (unlikely(malloc_init())) {
3066 LOG("core.mallctl.exit", "result: %d", EAGAIN);
3071 check_entry_exit_locking(tsd_tsdn(tsd));
3072 ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
3073 check_entry_exit_locking(tsd_tsdn(tsd));
3075 LOG("core.mallctl.exit", "result: %d", ret);
3079 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
3080 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) {
3083 LOG("core.mallctlnametomib.entry", "name: %s", name);
3085 if (unlikely(malloc_init())) {
3086 LOG("core.mallctlnametomib.exit", "result: %d", EAGAIN);
3090 tsd_t *tsd = tsd_fetch();
3091 check_entry_exit_locking(tsd_tsdn(tsd));
3092 ret = ctl_nametomib(tsd, name, mibp, miblenp);
3093 check_entry_exit_locking(tsd_tsdn(tsd));
3095 LOG("core.mallctlnametomib.exit", "result: %d", ret);
3099 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
3100 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
3101 void *newp, size_t newlen) {
3105 LOG("core.mallctlbymib.entry", "");
3107 if (unlikely(malloc_init())) {
3108 LOG("core.mallctlbymib.exit", "result: %d", EAGAIN);
3113 check_entry_exit_locking(tsd_tsdn(tsd));
3114 ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
3115 check_entry_exit_locking(tsd_tsdn(tsd));
3116 LOG("core.mallctlbymib.exit", "result: %d", ret);
3120 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
3121 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
3125 LOG("core.malloc_stats_print.entry", "");
3127 tsdn = tsdn_fetch();
3128 check_entry_exit_locking(tsdn);
3129 stats_print(write_cb, cbopaque, opts);
3130 check_entry_exit_locking(tsdn);
3131 LOG("core.malloc_stats_print.exit", "");
3134 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
3135 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
3139 LOG("core.malloc_usable_size.entry", "ptr: %p", ptr);
3141 assert(malloc_initialized() || IS_INITIALIZER);
3143 tsdn = tsdn_fetch();
3144 check_entry_exit_locking(tsdn);
3146 if (unlikely(ptr == NULL)) {
3149 if (config_debug || force_ivsalloc) {
3150 ret = ivsalloc(tsdn, ptr);
3151 assert(force_ivsalloc || ret != 0);
3153 ret = isalloc(tsdn, ptr);
3157 check_entry_exit_locking(tsdn);
3158 LOG("core.malloc_usable_size.exit", "result: %zu", ret);
3163 * End non-standard functions.
3165 /******************************************************************************/
3167 * Begin compatibility functions.
3170 #define ALLOCM_LG_ALIGN(la) (la)
3171 #define ALLOCM_ALIGN(a) (ffsl(a)-1)
3172 #define ALLOCM_ZERO ((int)0x40)
3173 #define ALLOCM_NO_MOVE ((int)0x80)
3175 #define ALLOCM_SUCCESS 0
3176 #define ALLOCM_ERR_OOM 1
3177 #define ALLOCM_ERR_NOT_MOVED 2
3180 je_allocm(void **ptr, size_t *rsize, size_t size, int flags) {
3181 assert(ptr != NULL);
3183 void *p = je_mallocx(size, flags);
3185 return (ALLOCM_ERR_OOM);
3187 if (rsize != NULL) {
3188 *rsize = isalloc(tsdn_fetch(), p);
3191 return ALLOCM_SUCCESS;
3195 je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) {
3196 assert(ptr != NULL);
3197 assert(*ptr != NULL);
3199 assert(SIZE_T_MAX - size >= extra);
3202 bool no_move = flags & ALLOCM_NO_MOVE;
3205 size_t usize = je_xallocx(*ptr, size, extra, flags);
3206 ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED;
3207 if (rsize != NULL) {
3211 void *p = je_rallocx(*ptr, size+extra, flags);
3214 ret = ALLOCM_SUCCESS;
3216 ret = ALLOCM_ERR_OOM;
3218 if (rsize != NULL) {
3219 *rsize = isalloc(tsdn_fetch(), *ptr);
3226 je_sallocm(const void *ptr, size_t *rsize, int flags) {
3227 assert(rsize != NULL);
3228 *rsize = je_sallocx(ptr, flags);
3229 return ALLOCM_SUCCESS;
3233 je_dallocm(void *ptr, int flags) {
3234 je_dallocx(ptr, flags);
3235 return ALLOCM_SUCCESS;
3239 je_nallocm(size_t *rsize, size_t size, int flags) {
3240 size_t usize = je_nallocx(size, flags);
3242 return ALLOCM_ERR_OOM;
3244 if (rsize != NULL) {
3247 return ALLOCM_SUCCESS;
3250 #undef ALLOCM_LG_ALIGN
3253 #undef ALLOCM_NO_MOVE
3255 #undef ALLOCM_SUCCESS
3256 #undef ALLOCM_ERR_OOM
3257 #undef ALLOCM_ERR_NOT_MOVED
3260 * End compatibility functions.
3262 /******************************************************************************/
3264 * The following functions are used by threading libraries for protection of
3265 * malloc during fork().
3269 * If an application creates a thread before doing any allocation in the main
3270 * thread, then calls fork(2) in the main thread followed by memory allocation
3271 * in the child process, a race can occur that results in deadlock within the
3272 * child: the main thread may have forked while the created thread had
3273 * partially initialized the allocator. Ordinarily jemalloc prevents
3274 * fork/malloc races via the following functions it registers during
3275 * initialization using pthread_atfork(), but of course that does no good if
3276 * the allocator isn't fully initialized at fork time. The following library
3277 * constructor is a partial solution to this problem. It may still be possible
3278 * to trigger the deadlock described above, but doing so would involve forking
3279 * via a library constructor that runs before jemalloc's runs.
3281 #ifndef JEMALLOC_JET
3282 JEMALLOC_ATTR(constructor)
3284 jemalloc_constructor(void) {
3289 #ifndef JEMALLOC_MUTEX_INIT_CB
3291 jemalloc_prefork(void)
3293 JEMALLOC_EXPORT void
3294 _malloc_prefork(void)
3298 unsigned i, j, narenas;
3301 #ifdef JEMALLOC_MUTEX_INIT_CB
3302 if (!malloc_initialized()) {
3306 assert(malloc_initialized());
3310 narenas = narenas_total_get();
3312 witness_prefork(tsd_witness_tsdp_get(tsd));
3313 /* Acquire all mutexes in a safe order. */
3314 ctl_prefork(tsd_tsdn(tsd));
3315 tcache_prefork(tsd_tsdn(tsd));
3316 malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
3317 if (have_background_thread) {
3318 background_thread_prefork0(tsd_tsdn(tsd));
3320 prof_prefork0(tsd_tsdn(tsd));
3321 if (have_background_thread) {
3322 background_thread_prefork1(tsd_tsdn(tsd));
3324 /* Break arena prefork into stages to preserve lock order. */
3325 for (i = 0; i < 8; i++) {
3326 for (j = 0; j < narenas; j++) {
3327 if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
3331 arena_prefork0(tsd_tsdn(tsd), arena);
3334 arena_prefork1(tsd_tsdn(tsd), arena);
3337 arena_prefork2(tsd_tsdn(tsd), arena);
3340 arena_prefork3(tsd_tsdn(tsd), arena);
3343 arena_prefork4(tsd_tsdn(tsd), arena);
3346 arena_prefork5(tsd_tsdn(tsd), arena);
3349 arena_prefork6(tsd_tsdn(tsd), arena);
3352 arena_prefork7(tsd_tsdn(tsd), arena);
3354 default: not_reached();
3359 prof_prefork1(tsd_tsdn(tsd));
3362 #ifndef JEMALLOC_MUTEX_INIT_CB
3364 jemalloc_postfork_parent(void)
3366 JEMALLOC_EXPORT void
3367 _malloc_postfork(void)
3371 unsigned i, narenas;
3373 #ifdef JEMALLOC_MUTEX_INIT_CB
3374 if (!malloc_initialized()) {
3378 assert(malloc_initialized());
3382 witness_postfork_parent(tsd_witness_tsdp_get(tsd));
3383 /* Release all mutexes, now that fork() has completed. */
3384 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
3387 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
3388 arena_postfork_parent(tsd_tsdn(tsd), arena);
3391 prof_postfork_parent(tsd_tsdn(tsd));
3392 if (have_background_thread) {
3393 background_thread_postfork_parent(tsd_tsdn(tsd));
3395 malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
3396 tcache_postfork_parent(tsd_tsdn(tsd));
3397 ctl_postfork_parent(tsd_tsdn(tsd));
3401 jemalloc_postfork_child(void) {
3403 unsigned i, narenas;
3405 assert(malloc_initialized());
3409 witness_postfork_child(tsd_witness_tsdp_get(tsd));
3410 /* Release all mutexes, now that fork() has completed. */
3411 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
3414 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
3415 arena_postfork_child(tsd_tsdn(tsd), arena);
3418 prof_postfork_child(tsd_tsdn(tsd));
3419 if (have_background_thread) {
3420 background_thread_postfork_child(tsd_tsdn(tsd));
3422 malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
3423 tcache_postfork_child(tsd_tsdn(tsd));
3424 ctl_postfork_child(tsd_tsdn(tsd));
3428 _malloc_first_thread(void)
3431 (void)malloc_mutex_first_thread();
3434 /******************************************************************************/