2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */
8 const char *__malloc_options_1_0 = NULL;
9 __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
11 /* Runtime configuration options. */
12 const char *je_malloc_conf JEMALLOC_ATTR(weak);
20 const char *opt_junk =
21 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
28 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
35 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
42 size_t opt_quarantine = ZU(0);
43 bool opt_redzone = false;
44 bool opt_utrace = false;
45 bool opt_xmalloc = false;
46 bool opt_zero = false;
47 unsigned opt_narenas = 0;
49 /* Initialized to true if the process is running inside Valgrind. */
54 /* Protects arenas initialization. */
55 static malloc_mutex_t arenas_lock;
57 * Arenas that are used to service external requests. Not all elements of the
58 * arenas array are necessarily used; arenas are created lazily as needed.
60 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
61 * arenas. arenas[narenas_auto..narenas_total) are only used if the application
62 * takes some action to create them and allocate from them.
65 static unsigned narenas_total; /* Use narenas_total_*(). */
66 static arena_t *a0; /* arenas[0]; read-only after initialization. */
67 static unsigned narenas_auto; /* Read-only after initialization. */
70 malloc_init_uninitialized = 3,
71 malloc_init_a0_initialized = 2,
72 malloc_init_recursible = 1,
73 malloc_init_initialized = 0 /* Common case --> jnz. */
75 static malloc_init_t malloc_init_state = malloc_init_uninitialized;
77 /* 0 should be the common case. Set to true to trigger initialization. */
78 static bool malloc_slow = true;
80 /* When malloc_slow != 0, set the corresponding bits for sanity check. */
82 flag_opt_junk_alloc = (1U),
83 flag_opt_junk_free = (1U << 1),
84 flag_opt_quarantine = (1U << 2),
85 flag_opt_zero = (1U << 3),
86 flag_opt_utrace = (1U << 4),
87 flag_in_valgrind = (1U << 5),
88 flag_opt_xmalloc = (1U << 6)
90 static uint8_t malloc_slow_flags;
92 /* Last entry for overflow detection only. */
93 JEMALLOC_ALIGNED(CACHELINE)
94 const size_t index2size_tab[NSIZES+1] = {
95 #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
96 ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
102 JEMALLOC_ALIGNED(CACHELINE)
103 const uint8_t size2index_tab[] = {
105 #warning "Dangerous LG_TINY_MIN"
107 #elif LG_TINY_MIN == 1
108 #warning "Dangerous LG_TINY_MIN"
110 #elif LG_TINY_MIN == 2
111 #warning "Dangerous LG_TINY_MIN"
113 #elif LG_TINY_MIN == 3
115 #elif LG_TINY_MIN == 4
117 #elif LG_TINY_MIN == 5
119 #elif LG_TINY_MIN == 6
121 #elif LG_TINY_MIN == 7
123 #elif LG_TINY_MIN == 8
125 #elif LG_TINY_MIN == 9
127 #elif LG_TINY_MIN == 10
129 #elif LG_TINY_MIN == 11
132 #error "Unsupported LG_TINY_MIN"
135 #define S2B_1(i) S2B_0(i) S2B_0(i)
138 #define S2B_2(i) S2B_1(i) S2B_1(i)
141 #define S2B_3(i) S2B_2(i) S2B_2(i)
144 #define S2B_4(i) S2B_3(i) S2B_3(i)
147 #define S2B_5(i) S2B_4(i) S2B_4(i)
150 #define S2B_6(i) S2B_5(i) S2B_5(i)
153 #define S2B_7(i) S2B_6(i) S2B_6(i)
156 #define S2B_8(i) S2B_7(i) S2B_7(i)
159 #define S2B_9(i) S2B_8(i) S2B_8(i)
162 #define S2B_10(i) S2B_9(i) S2B_9(i)
165 #define S2B_11(i) S2B_10(i) S2B_10(i)
168 #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
169 S2B_##lg_delta_lookup(index)
184 #ifdef JEMALLOC_THREADED_INIT
185 /* Used to let the initializing thread recursively allocate. */
186 # define NO_INITIALIZER ((unsigned long)0)
187 # define INITIALIZER pthread_self()
188 # define IS_INITIALIZER (malloc_initializer == pthread_self())
189 static pthread_t malloc_initializer = NO_INITIALIZER;
191 # define NO_INITIALIZER false
192 # define INITIALIZER true
193 # define IS_INITIALIZER malloc_initializer
194 static bool malloc_initializer = NO_INITIALIZER;
197 /* Used to avoid initialization races. */
199 #if _WIN32_WINNT >= 0x0600
200 static malloc_mutex_t init_lock = SRWLOCK_INIT;
202 static malloc_mutex_t init_lock;
203 static bool init_lock_initialized = false;
205 JEMALLOC_ATTR(constructor)
207 _init_init_lock(void)
210 /* If another constructor in the same binary is using mallctl to
211 * e.g. setup chunk hooks, it may end up running before this one,
212 * and malloc_init_hard will crash trying to lock the uninitialized
213 * lock. So we force an initialization of the lock in
214 * malloc_init_hard as well. We don't try to care about atomicity
215 * of the accessed to the init_lock_initialized boolean, since it
216 * really only matters early in the process creation, before any
217 * separate thread normally starts doing anything. */
218 if (!init_lock_initialized)
219 malloc_mutex_init(&init_lock);
220 init_lock_initialized = true;
224 # pragma section(".CRT$XCU", read)
225 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
226 static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
230 static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
234 void *p; /* Input pointer (as in realloc(p, s)). */
235 size_t s; /* Request size. */
236 void *r; /* Result pointer. */
239 #ifdef JEMALLOC_UTRACE
240 # define UTRACE(a, b, c) do { \
241 if (unlikely(opt_utrace)) { \
242 int utrace_serrno = errno; \
243 malloc_utrace_t ut; \
247 utrace(&ut, sizeof(ut)); \
248 errno = utrace_serrno; \
252 # define UTRACE(a, b, c)
255 /******************************************************************************/
257 * Function prototypes for static functions that are referenced prior to
261 static bool malloc_init_hard_a0(void);
262 static bool malloc_init_hard(void);
264 /******************************************************************************/
266 * Begin miscellaneous support functions.
269 JEMALLOC_ALWAYS_INLINE_C bool
270 malloc_initialized(void)
273 return (malloc_init_state == malloc_init_initialized);
276 JEMALLOC_ALWAYS_INLINE_C void
277 malloc_thread_init(void)
281 * TSD initialization can't be safely done as a side effect of
282 * deallocation, because it is possible for a thread to do nothing but
283 * deallocate its TLS data via free(), in which case writing to TLS
284 * would cause write-after-free memory corruption. The quarantine
285 * facility *only* gets used as a side effect of deallocation, so make
286 * a best effort attempt at initializing its TSD by hooking all
289 if (config_fill && unlikely(opt_quarantine))
290 quarantine_alloc_hook();
293 JEMALLOC_ALWAYS_INLINE_C bool
297 if (unlikely(malloc_init_state == malloc_init_uninitialized))
298 return (malloc_init_hard_a0());
302 JEMALLOC_ALWAYS_INLINE_C bool
306 if (unlikely(!malloc_initialized()) && malloc_init_hard())
308 malloc_thread_init();
314 * The a0*() functions are used instead of i[mcd]alloc() in situations that
315 * cannot tolerate TLS variable access.
319 a0ialloc(size_t size, bool zero, bool is_metadata)
322 if (unlikely(malloc_init_a0()))
325 return (iallocztm(NULL, size, size2index(size), zero, false,
326 is_metadata, arena_get(0, false), true));
330 a0idalloc(void *ptr, bool is_metadata)
333 idalloctm(NULL, ptr, false, is_metadata, true);
337 a0malloc(size_t size)
340 return (a0ialloc(size, false, true));
347 a0idalloc(ptr, true);
351 * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
352 * situations that cannot tolerate TLS variable access (TLS allocation and very
353 * early internal data structure initialization).
357 bootstrap_malloc(size_t size)
360 if (unlikely(size == 0))
363 return (a0ialloc(size, false, false));
367 bootstrap_calloc(size_t num, size_t size)
371 num_size = num * size;
372 if (unlikely(num_size == 0)) {
373 assert(num == 0 || size == 0);
377 return (a0ialloc(num_size, true, false));
381 bootstrap_free(void *ptr)
384 if (unlikely(ptr == NULL))
387 a0idalloc(ptr, false);
391 arena_set(unsigned ind, arena_t *arena)
394 atomic_write_p((void **)&arenas[ind], arena);
398 narenas_total_set(unsigned narenas)
401 atomic_write_u(&narenas_total, narenas);
405 narenas_total_inc(void)
408 atomic_add_u(&narenas_total, 1);
412 narenas_total_get(void)
415 return (atomic_read_u(&narenas_total));
418 /* Create a new arena and insert it into the arenas array at index ind. */
420 arena_init_locked(unsigned ind)
424 assert(ind <= narenas_total_get());
425 if (ind > MALLOCX_ARENA_MAX)
427 if (ind == narenas_total_get())
431 * Another thread may have already initialized arenas[ind] if it's an
434 arena = arena_get(ind, false);
436 assert(ind < narenas_auto);
440 /* Actually initialize the arena. */
441 arena = arena_new(ind);
442 arena_set(ind, arena);
447 arena_init(unsigned ind)
451 malloc_mutex_lock(&arenas_lock);
452 arena = arena_init_locked(ind);
453 malloc_mutex_unlock(&arenas_lock);
458 arena_bind(tsd_t *tsd, unsigned ind)
462 arena = arena_get(ind, false);
463 arena_nthreads_inc(arena);
465 if (tsd_nominal(tsd))
466 tsd_arena_set(tsd, arena);
470 arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
472 arena_t *oldarena, *newarena;
474 oldarena = arena_get(oldind, false);
475 newarena = arena_get(newind, false);
476 arena_nthreads_dec(oldarena);
477 arena_nthreads_inc(newarena);
478 tsd_arena_set(tsd, newarena);
482 arena_unbind(tsd_t *tsd, unsigned ind)
486 arena = arena_get(ind, false);
487 arena_nthreads_dec(arena);
488 tsd_arena_set(tsd, NULL);
492 arena_tdata_get_hard(tsd_t *tsd, unsigned ind)
494 arena_tdata_t *tdata, *arenas_tdata_old;
495 arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
496 unsigned narenas_tdata_old, i;
497 unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
498 unsigned narenas_actual = narenas_total_get();
501 * Dissociate old tdata array (and set up for deallocation upon return)
504 if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
505 arenas_tdata_old = arenas_tdata;
506 narenas_tdata_old = narenas_tdata;
509 tsd_arenas_tdata_set(tsd, arenas_tdata);
510 tsd_narenas_tdata_set(tsd, narenas_tdata);
512 arenas_tdata_old = NULL;
513 narenas_tdata_old = 0;
516 /* Allocate tdata array if it's missing. */
517 if (arenas_tdata == NULL) {
518 bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
519 narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
521 if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
522 *arenas_tdata_bypassp = true;
523 arenas_tdata = (arena_tdata_t *)a0malloc(
524 sizeof(arena_tdata_t) * narenas_tdata);
525 *arenas_tdata_bypassp = false;
527 if (arenas_tdata == NULL) {
531 assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
532 tsd_arenas_tdata_set(tsd, arenas_tdata);
533 tsd_narenas_tdata_set(tsd, narenas_tdata);
537 * Copy to tdata array. It's possible that the actual number of arenas
538 * has increased since narenas_total_get() was called above, but that
539 * causes no correctness issues unless two threads concurrently execute
540 * the arenas.extend mallctl, which we trust mallctl synchronization to
544 /* Copy/initialize tickers. */
545 for (i = 0; i < narenas_actual; i++) {
546 if (i < narenas_tdata_old) {
547 ticker_copy(&arenas_tdata[i].decay_ticker,
548 &arenas_tdata_old[i].decay_ticker);
550 ticker_init(&arenas_tdata[i].decay_ticker,
551 DECAY_NTICKS_PER_UPDATE);
554 if (narenas_tdata > narenas_actual) {
555 memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
556 * (narenas_tdata - narenas_actual));
559 /* Read the refreshed tdata array. */
560 tdata = &arenas_tdata[ind];
562 if (arenas_tdata_old != NULL)
563 a0dalloc(arenas_tdata_old);
567 /* Slow path, called only by arena_choose(). */
569 arena_choose_hard(tsd_t *tsd)
573 if (narenas_auto > 1) {
574 unsigned i, choose, first_null;
577 first_null = narenas_auto;
578 malloc_mutex_lock(&arenas_lock);
579 assert(arena_get(0, false) != NULL);
580 for (i = 1; i < narenas_auto; i++) {
581 if (arena_get(i, false) != NULL) {
583 * Choose the first arena that has the lowest
584 * number of threads assigned to it.
586 if (arena_nthreads_get(arena_get(i, false)) <
587 arena_nthreads_get(arena_get(choose,
590 } else if (first_null == narenas_auto) {
592 * Record the index of the first uninitialized
593 * arena, in case all extant arenas are in use.
595 * NB: It is possible for there to be
596 * discontinuities in terms of initialized
597 * versus uninitialized arenas, due to the
598 * "thread.arena" mallctl.
604 if (arena_nthreads_get(arena_get(choose, false)) == 0
605 || first_null == narenas_auto) {
607 * Use an unloaded arena, or the least loaded arena if
608 * all arenas are already initialized.
610 ret = arena_get(choose, false);
612 /* Initialize a new arena. */
614 ret = arena_init_locked(choose);
616 malloc_mutex_unlock(&arenas_lock);
620 arena_bind(tsd, choose);
621 malloc_mutex_unlock(&arenas_lock);
623 ret = arena_get(0, false);
631 thread_allocated_cleanup(tsd_t *tsd)
638 thread_deallocated_cleanup(tsd_t *tsd)
645 arena_cleanup(tsd_t *tsd)
649 arena = tsd_arena_get(tsd);
651 arena_unbind(tsd, arena->ind);
655 arenas_tdata_cleanup(tsd_t *tsd)
657 arena_tdata_t *arenas_tdata;
659 /* Prevent tsd->arenas_tdata from being (re)created. */
660 *tsd_arenas_tdata_bypassp_get(tsd) = true;
662 arenas_tdata = tsd_arenas_tdata_get(tsd);
663 if (arenas_tdata != NULL) {
664 tsd_arenas_tdata_set(tsd, NULL);
665 a0dalloc(arenas_tdata);
670 narenas_tdata_cleanup(tsd_t *tsd)
677 arenas_tdata_bypass_cleanup(tsd_t *tsd)
684 stats_print_atexit(void)
687 if (config_tcache && config_stats) {
691 * Merge stats from extant threads. This is racy, since
692 * individual threads do not lock when recording tcache stats
693 * events. As a consequence, the final stats may be slightly
694 * out of date by the time they are reported, if other threads
695 * continue to allocate.
697 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
698 arena_t *arena = arena_get(i, false);
703 * tcache_stats_merge() locks bins, so if any
704 * code is introduced that acquires both arena
705 * and bin locks in the opposite order,
706 * deadlocks may result.
708 malloc_mutex_lock(&arena->lock);
709 ql_foreach(tcache, &arena->tcache_ql, link) {
710 tcache_stats_merge(tcache, arena);
712 malloc_mutex_unlock(&arena->lock);
716 je_malloc_stats_print(NULL, NULL, NULL);
720 * End miscellaneous support functions.
722 /******************************************************************************/
724 * Begin initialization functions.
727 #ifndef JEMALLOC_HAVE_SECURE_GETENV
729 secure_getenv(const char *name)
732 # ifdef JEMALLOC_HAVE_ISSETUGID
733 if (issetugid() != 0)
736 return (getenv(name));
748 result = si.dwNumberOfProcessors;
750 result = sysconf(_SC_NPROCESSORS_ONLN);
752 return ((result == -1) ? 1 : (unsigned)result);
756 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
757 char const **v_p, size_t *vlen_p)
760 const char *opts = *opts_p;
764 for (accept = false; !accept;) {
766 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
767 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
768 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
769 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
771 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
772 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
773 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
774 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
776 case '0': case '1': case '2': case '3': case '4': case '5':
777 case '6': case '7': case '8': case '9':
783 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
788 if (opts != *opts_p) {
789 malloc_write("<jemalloc>: Conf string ends "
794 malloc_write("<jemalloc>: Malformed conf string\n");
799 for (accept = false; !accept;) {
804 * Look ahead one character here, because the next time
805 * this function is called, it will assume that end of
806 * input has been cleanly reached if no input remains,
807 * but we have optimistically already consumed the
808 * comma if one exists.
811 malloc_write("<jemalloc>: Conf string ends "
814 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
818 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
832 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
836 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
841 malloc_slow_flag_init(void)
844 * Combine the runtime options into malloc_slow for fast path. Called
845 * after processing all the options.
847 malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
848 | (opt_junk_free ? flag_opt_junk_free : 0)
849 | (opt_quarantine ? flag_opt_quarantine : 0)
850 | (opt_zero ? flag_opt_zero : 0)
851 | (opt_utrace ? flag_opt_utrace : 0)
852 | (opt_xmalloc ? flag_opt_xmalloc : 0);
855 malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0);
857 malloc_slow = (malloc_slow_flags != 0);
861 malloc_conf_init(void)
864 char buf[PATH_MAX + 1];
865 const char *opts, *k, *v;
869 * Automatically configure valgrind before processing options. The
870 * valgrind option remains in jemalloc 3.x for compatibility reasons.
872 if (config_valgrind) {
873 in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
874 if (config_fill && unlikely(in_valgrind)) {
876 opt_junk_alloc = false;
877 opt_junk_free = false;
879 opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
882 if (config_tcache && unlikely(in_valgrind))
886 for (i = 0; i < 4; i++) {
887 /* Get runtime configuration. */
890 opts = config_malloc_conf;
893 if (je_malloc_conf != NULL) {
895 * Use options that were compiled into the
898 opts = je_malloc_conf;
900 /* No configuration specified. */
908 int saved_errno = errno;
909 const char *linkname =
910 # ifdef JEMALLOC_PREFIX
911 "/etc/"JEMALLOC_PREFIX"malloc.conf"
918 * Try to use the contents of the "/etc/malloc.conf"
919 * symbolic link's name.
921 linklen = readlink(linkname, buf, sizeof(buf) - 1);
923 /* No configuration specified. */
926 set_errno(saved_errno);
933 const char *envname =
934 #ifdef JEMALLOC_PREFIX
935 JEMALLOC_CPREFIX"MALLOC_CONF"
941 if ((opts = secure_getenv(envname)) != NULL) {
943 * Do nothing; opts is already initialized to
944 * the value of the MALLOC_CONF environment
948 /* No configuration specified. */
959 while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
961 #define CONF_MATCH(n) \
962 (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
963 #define CONF_MATCH_VALUE(n) \
964 (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
965 #define CONF_HANDLE_BOOL(o, n, cont) \
966 if (CONF_MATCH(n)) { \
967 if (CONF_MATCH_VALUE("true")) \
969 else if (CONF_MATCH_VALUE("false")) \
973 "Invalid conf value", \
979 #define CONF_HANDLE_T_U(t, o, n, min, max, clip) \
980 if (CONF_MATCH(n)) { \
985 um = malloc_strtoumax(v, &end, 0); \
986 if (get_errno() != 0 || (uintptr_t)end -\
987 (uintptr_t)v != vlen) { \
989 "Invalid conf value", \
992 if ((min) != 0 && um < (min)) \
994 else if (um > (max)) \
999 if (((min) != 0 && um < (min)) \
1001 malloc_conf_error( \
1004 k, klen, v, vlen); \
1010 #define CONF_HANDLE_UNSIGNED(o, n, min, max, clip) \
1011 CONF_HANDLE_T_U(unsigned, o, n, min, max, clip)
1012 #define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
1013 CONF_HANDLE_T_U(size_t, o, n, min, max, clip)
1014 #define CONF_HANDLE_SSIZE_T(o, n, min, max) \
1015 if (CONF_MATCH(n)) { \
1020 l = strtol(v, &end, 0); \
1021 if (get_errno() != 0 || (uintptr_t)end -\
1022 (uintptr_t)v != vlen) { \
1023 malloc_conf_error( \
1024 "Invalid conf value", \
1025 k, klen, v, vlen); \
1026 } else if (l < (ssize_t)(min) || l > \
1028 malloc_conf_error( \
1029 "Out-of-range conf value", \
1030 k, klen, v, vlen); \
1035 #define CONF_HANDLE_CHAR_P(o, n, d) \
1036 if (CONF_MATCH(n)) { \
1037 size_t cpylen = (vlen <= \
1038 sizeof(o)-1) ? vlen : \
1040 strncpy(o, v, cpylen); \
1045 CONF_HANDLE_BOOL(opt_abort, "abort", true)
1047 * Chunks always require at least one header page,
1048 * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and
1049 * possibly an additional page in the presence of
1050 * redzones. In order to simplify options processing,
1051 * use a conservative bound that accommodates all these
1054 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
1055 LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
1056 (sizeof(size_t) << 3) - 1, true)
1057 if (strncmp("dss", k, klen) == 0) {
1060 for (i = 0; i < dss_prec_limit; i++) {
1061 if (strncmp(dss_prec_names[i], v, vlen)
1063 if (chunk_dss_prec_set(i)) {
1065 "Error setting dss",
1076 malloc_conf_error("Invalid conf value",
1081 CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
1083 if (strncmp("purge", k, klen) == 0) {
1086 for (i = 0; i < purge_mode_limit; i++) {
1087 if (strncmp(purge_mode_names[i], v,
1089 opt_purge = (purge_mode_t)i;
1095 malloc_conf_error("Invalid conf value",
1100 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
1101 -1, (sizeof(size_t) << 3) - 1)
1102 CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1,
1104 CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
1106 if (CONF_MATCH("junk")) {
1107 if (CONF_MATCH_VALUE("true")) {
1109 opt_junk_alloc = opt_junk_free =
1111 } else if (CONF_MATCH_VALUE("false")) {
1113 opt_junk_alloc = opt_junk_free =
1115 } else if (CONF_MATCH_VALUE("alloc")) {
1117 opt_junk_alloc = true;
1118 opt_junk_free = false;
1119 } else if (CONF_MATCH_VALUE("free")) {
1121 opt_junk_alloc = false;
1122 opt_junk_free = true;
1125 "Invalid conf value", k,
1130 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
1131 0, SIZE_T_MAX, false)
1132 CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
1133 CONF_HANDLE_BOOL(opt_zero, "zero", true)
1135 if (config_utrace) {
1136 CONF_HANDLE_BOOL(opt_utrace, "utrace", true)
1138 if (config_xmalloc) {
1139 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
1141 if (config_tcache) {
1142 CONF_HANDLE_BOOL(opt_tcache, "tcache",
1143 !config_valgrind || !in_valgrind)
1144 if (CONF_MATCH("tcache")) {
1145 assert(config_valgrind && in_valgrind);
1149 "tcache cannot be enabled "
1150 "while running inside Valgrind",
1155 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
1156 "lg_tcache_max", -1,
1157 (sizeof(size_t) << 3) - 1)
1160 CONF_HANDLE_BOOL(opt_prof, "prof", true)
1161 CONF_HANDLE_CHAR_P(opt_prof_prefix,
1162 "prof_prefix", "jeprof")
1163 CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
1165 CONF_HANDLE_BOOL(opt_prof_thread_active_init,
1166 "prof_thread_active_init", true)
1167 CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
1168 "lg_prof_sample", 0,
1169 (sizeof(uint64_t) << 3) - 1, true)
1170 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
1172 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
1173 "lg_prof_interval", -1,
1174 (sizeof(uint64_t) << 3) - 1)
1175 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump",
1177 CONF_HANDLE_BOOL(opt_prof_final, "prof_final",
1179 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak",
1182 malloc_conf_error("Invalid conf pair", k, klen, v,
1185 #undef CONF_HANDLE_BOOL
1186 #undef CONF_HANDLE_SIZE_T
1187 #undef CONF_HANDLE_SSIZE_T
1188 #undef CONF_HANDLE_CHAR_P
1193 /* init_lock must be held. */
1195 malloc_init_hard_needed(void)
1198 if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
1199 malloc_init_recursible)) {
1201 * Another thread initialized the allocator before this one
1202 * acquired init_lock, or this thread is the initializing
1203 * thread, and it is recursively allocating.
1207 #ifdef JEMALLOC_THREADED_INIT
1208 if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
1209 /* Busy-wait until the initializing thread completes. */
1211 malloc_mutex_unlock(&init_lock);
1213 malloc_mutex_lock(&init_lock);
1214 } while (!malloc_initialized());
1221 /* init_lock must be held. */
1223 malloc_init_hard_a0_locked(void)
1226 malloc_initializer = INITIALIZER;
1231 if (opt_stats_print) {
1232 /* Print statistics at exit. */
1233 if (atexit(stats_print_atexit) != 0) {
1234 malloc_write("<jemalloc>: Error in atexit()\n");
1249 if (config_tcache && tcache_boot())
1251 if (malloc_mutex_init(&arenas_lock))
1254 * Create enough scaffolding to allow recursive allocation in
1258 narenas_total_set(narenas_auto);
1260 memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
1262 * Initialize one arena here. The rest are lazily created in
1263 * arena_choose_hard().
1265 if (arena_init(0) == NULL)
1267 malloc_init_state = malloc_init_a0_initialized;
1272 malloc_init_hard_a0(void)
1276 malloc_mutex_lock(&init_lock);
1277 ret = malloc_init_hard_a0_locked();
1278 malloc_mutex_unlock(&init_lock);
1283 * Initialize data structures which may trigger recursive allocation.
1285 * init_lock must be held.
1288 malloc_init_hard_recursible(void)
1292 malloc_init_state = malloc_init_recursible;
1293 malloc_mutex_unlock(&init_lock);
1295 /* LinuxThreads' pthread_setspecific() allocates. */
1296 if (malloc_tsd_boot0()) {
1301 ncpus = malloc_ncpus();
1303 #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
1304 && !defined(_WIN32) && !defined(__native_client__))
1305 /* LinuxThreads' pthread_atfork() allocates. */
1306 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
1307 jemalloc_postfork_child) != 0) {
1309 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1316 malloc_mutex_lock(&init_lock);
1320 /* init_lock must be held. */
1322 malloc_init_hard_finish(void)
1328 if (opt_narenas == 0) {
1330 * For SMP systems, create more than one arena per CPU by
1334 opt_narenas = ncpus << 2;
1338 narenas_auto = opt_narenas;
1340 * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
1342 if (narenas_auto > MALLOCX_ARENA_MAX) {
1343 narenas_auto = MALLOCX_ARENA_MAX;
1344 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
1347 narenas_total_set(narenas_auto);
1349 /* Allocate and initialize arenas. */
1350 arenas = (arena_t **)base_alloc(sizeof(arena_t *) *
1351 (MALLOCX_ARENA_MAX+1));
1354 /* Copy the pointer to the one arena that was already initialized. */
1357 malloc_init_state = malloc_init_initialized;
1358 malloc_slow_flag_init();
1364 malloc_init_hard(void)
1367 #if defined(_WIN32) && _WIN32_WINNT < 0x0600
1370 malloc_mutex_lock(&init_lock);
1371 if (!malloc_init_hard_needed()) {
1372 malloc_mutex_unlock(&init_lock);
1376 if (malloc_init_state != malloc_init_a0_initialized &&
1377 malloc_init_hard_a0_locked()) {
1378 malloc_mutex_unlock(&init_lock);
1382 if (malloc_init_hard_recursible()) {
1383 malloc_mutex_unlock(&init_lock);
1387 if (config_prof && prof_boot2()) {
1388 malloc_mutex_unlock(&init_lock);
1392 if (malloc_init_hard_finish()) {
1393 malloc_mutex_unlock(&init_lock);
1397 malloc_mutex_unlock(&init_lock);
1403 * End initialization functions.
1405 /******************************************************************************/
1407 * Begin malloc(3)-compatible functions.
1411 imalloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind,
1412 prof_tctx_t *tctx, bool slow_path)
1418 if (usize <= SMALL_MAXCLASS) {
1419 szind_t ind_large = size2index(LARGE_MINCLASS);
1420 p = imalloc(tsd, LARGE_MINCLASS, ind_large, slow_path);
1423 arena_prof_promoted(p, usize);
1425 p = imalloc(tsd, usize, ind, slow_path);
1430 JEMALLOC_ALWAYS_INLINE_C void *
1431 imalloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool slow_path)
1436 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
1437 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1438 p = imalloc_prof_sample(tsd, usize, ind, tctx, slow_path);
1440 p = imalloc(tsd, usize, ind, slow_path);
1441 if (unlikely(p == NULL)) {
1442 prof_alloc_rollback(tsd, tctx, true);
1445 prof_malloc(p, usize, tctx);
1450 JEMALLOC_ALWAYS_INLINE_C void *
1451 imalloc_body(size_t size, tsd_t **tsd, size_t *usize, bool slow_path)
1455 if (slow_path && unlikely(malloc_init()))
1458 ind = size2index(size);
1459 if (unlikely(ind >= NSIZES))
1462 if (config_stats || (config_prof && opt_prof) || (slow_path &&
1463 config_valgrind && unlikely(in_valgrind))) {
1464 *usize = index2size(ind);
1465 assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
1468 if (config_prof && opt_prof)
1469 return (imalloc_prof(*tsd, *usize, ind, slow_path));
1471 return (imalloc(*tsd, size, ind, slow_path));
1474 JEMALLOC_ALWAYS_INLINE_C void
1475 imalloc_post_check(void *ret, tsd_t *tsd, size_t usize, bool slow_path)
1477 if (unlikely(ret == NULL)) {
1478 if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) {
1479 malloc_write("<jemalloc>: Error in malloc(): "
1485 if (config_stats && likely(ret != NULL)) {
1486 assert(usize == isalloc(ret, config_prof));
1487 *tsd_thread_allocatedp_get(tsd) += usize;
1491 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1492 void JEMALLOC_NOTHROW *
1493 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
1494 je_malloc(size_t size)
1498 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1503 if (likely(!malloc_slow)) {
1505 * imalloc_body() is inlined so that fast and slow paths are
1506 * generated separately with statically known slow_path.
1508 ret = imalloc_body(size, &tsd, &usize, false);
1509 imalloc_post_check(ret, tsd, usize, false);
1511 ret = imalloc_body(size, &tsd, &usize, true);
1512 imalloc_post_check(ret, tsd, usize, true);
1513 UTRACE(0, size, ret);
1514 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
1521 imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
1528 if (usize <= SMALL_MAXCLASS) {
1529 assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS);
1530 p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
1533 arena_prof_promoted(p, usize);
1535 p = ipalloc(tsd, usize, alignment, false);
1540 JEMALLOC_ALWAYS_INLINE_C void *
1541 imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
1546 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
1547 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1548 p = imemalign_prof_sample(tsd, alignment, usize, tctx);
1550 p = ipalloc(tsd, usize, alignment, false);
1551 if (unlikely(p == NULL)) {
1552 prof_alloc_rollback(tsd, tctx, true);
1555 prof_malloc(p, usize, tctx);
1560 JEMALLOC_ATTR(nonnull(1))
1562 imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
1569 assert(min_alignment != 0);
1571 if (unlikely(malloc_init())) {
1579 /* Make sure that alignment is a large enough power of 2. */
1580 if (unlikely(((alignment - 1) & alignment) != 0
1581 || (alignment < min_alignment))) {
1582 if (config_xmalloc && unlikely(opt_xmalloc)) {
1583 malloc_write("<jemalloc>: Error allocating "
1584 "aligned memory: invalid alignment\n");
1592 usize = sa2u(size, alignment);
1593 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
1598 if (config_prof && opt_prof)
1599 result = imemalign_prof(tsd, alignment, usize);
1601 result = ipalloc(tsd, usize, alignment, false);
1602 if (unlikely(result == NULL))
1604 assert(((uintptr_t)result & (alignment - 1)) == ZU(0));
1609 if (config_stats && likely(result != NULL)) {
1610 assert(usize == isalloc(result, config_prof));
1611 *tsd_thread_allocatedp_get(tsd) += usize;
1613 UTRACE(0, size, result);
1616 assert(result == NULL);
1617 if (config_xmalloc && unlikely(opt_xmalloc)) {
1618 malloc_write("<jemalloc>: Error allocating aligned memory: "
1626 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
1627 JEMALLOC_ATTR(nonnull(1))
1628 je_posix_memalign(void **memptr, size_t alignment, size_t size)
1630 int ret = imemalign(memptr, alignment, size, sizeof(void *));
1631 JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
1632 config_prof), false);
1636 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1637 void JEMALLOC_NOTHROW *
1638 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
1639 je_aligned_alloc(size_t alignment, size_t size)
1644 if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) {
1648 JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
1654 icalloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, prof_tctx_t *tctx)
1660 if (usize <= SMALL_MAXCLASS) {
1661 szind_t ind_large = size2index(LARGE_MINCLASS);
1662 p = icalloc(tsd, LARGE_MINCLASS, ind_large);
1665 arena_prof_promoted(p, usize);
1667 p = icalloc(tsd, usize, ind);
1672 JEMALLOC_ALWAYS_INLINE_C void *
1673 icalloc_prof(tsd_t *tsd, size_t usize, szind_t ind)
1678 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
1679 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1680 p = icalloc_prof_sample(tsd, usize, ind, tctx);
1682 p = icalloc(tsd, usize, ind);
1683 if (unlikely(p == NULL)) {
1684 prof_alloc_rollback(tsd, tctx, true);
1687 prof_malloc(p, usize, tctx);
1692 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1693 void JEMALLOC_NOTHROW *
1694 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
1695 je_calloc(size_t num, size_t size)
1701 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1703 if (unlikely(malloc_init())) {
1710 num_size = num * size;
1711 if (unlikely(num_size == 0)) {
1712 if (num == 0 || size == 0)
1719 * Try to avoid division here. We know that it isn't possible to
1720 * overflow during multiplication if neither operand uses any of the
1721 * most significant half of the bits in a size_t.
1723 } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) <<
1724 2))) && (num_size / size != num))) {
1725 /* size_t overflow. */
1730 ind = size2index(num_size);
1731 if (unlikely(ind >= NSIZES)) {
1735 if (config_prof && opt_prof) {
1736 usize = index2size(ind);
1737 ret = icalloc_prof(tsd, usize, ind);
1739 if (config_stats || (config_valgrind && unlikely(in_valgrind)))
1740 usize = index2size(ind);
1741 ret = icalloc(tsd, num_size, ind);
1745 if (unlikely(ret == NULL)) {
1746 if (config_xmalloc && unlikely(opt_xmalloc)) {
1747 malloc_write("<jemalloc>: Error in calloc(): out of "
1753 if (config_stats && likely(ret != NULL)) {
1754 assert(usize == isalloc(ret, config_prof));
1755 *tsd_thread_allocatedp_get(tsd) += usize;
1757 UTRACE(0, num_size, ret);
1758 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
1763 irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
1770 if (usize <= SMALL_MAXCLASS) {
1771 p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
1774 arena_prof_promoted(p, usize);
1776 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
1781 JEMALLOC_ALWAYS_INLINE_C void *
1782 irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize)
1786 prof_tctx_t *old_tctx, *tctx;
1788 prof_active = prof_active_get_unlocked();
1789 old_tctx = prof_tctx_get(old_ptr);
1790 tctx = prof_alloc_prep(tsd, usize, prof_active, true);
1791 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1792 p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
1794 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
1795 if (unlikely(p == NULL)) {
1796 prof_alloc_rollback(tsd, tctx, true);
1799 prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
1805 JEMALLOC_INLINE_C void
1806 ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
1809 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1811 assert(ptr != NULL);
1812 assert(malloc_initialized() || IS_INITIALIZER);
1814 if (config_prof && opt_prof) {
1815 usize = isalloc(ptr, config_prof);
1816 prof_free(tsd, ptr, usize);
1817 } else if (config_stats || config_valgrind)
1818 usize = isalloc(ptr, config_prof);
1820 *tsd_thread_deallocatedp_get(tsd) += usize;
1822 if (likely(!slow_path))
1823 iqalloc(tsd, ptr, tcache, false);
1825 if (config_valgrind && unlikely(in_valgrind))
1827 iqalloc(tsd, ptr, tcache, true);
1828 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1832 JEMALLOC_INLINE_C void
1833 isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache)
1835 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1837 assert(ptr != NULL);
1838 assert(malloc_initialized() || IS_INITIALIZER);
1840 if (config_prof && opt_prof)
1841 prof_free(tsd, ptr, usize);
1843 *tsd_thread_deallocatedp_get(tsd) += usize;
1844 if (config_valgrind && unlikely(in_valgrind))
1846 isqalloc(tsd, ptr, usize, tcache);
1847 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1850 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1851 void JEMALLOC_NOTHROW *
1852 JEMALLOC_ALLOC_SIZE(2)
1853 je_realloc(void *ptr, size_t size)
1856 tsd_t *tsd JEMALLOC_CC_SILENCE_INIT(NULL);
1857 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1858 size_t old_usize = 0;
1859 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1861 if (unlikely(size == 0)) {
1863 /* realloc(ptr, 0) is equivalent to free(ptr). */
1866 ifree(tsd, ptr, tcache_get(tsd, false), true);
1872 if (likely(ptr != NULL)) {
1873 assert(malloc_initialized() || IS_INITIALIZER);
1874 malloc_thread_init();
1877 old_usize = isalloc(ptr, config_prof);
1878 if (config_valgrind && unlikely(in_valgrind))
1879 old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
1881 if (config_prof && opt_prof) {
1883 ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ?
1884 NULL : irealloc_prof(tsd, ptr, old_usize, usize);
1886 if (config_stats || (config_valgrind &&
1887 unlikely(in_valgrind)))
1889 ret = iralloc(tsd, ptr, old_usize, size, 0, false);
1892 /* realloc(NULL, size) is equivalent to malloc(size). */
1893 if (likely(!malloc_slow))
1894 ret = imalloc_body(size, &tsd, &usize, false);
1896 ret = imalloc_body(size, &tsd, &usize, true);
1899 if (unlikely(ret == NULL)) {
1900 if (config_xmalloc && unlikely(opt_xmalloc)) {
1901 malloc_write("<jemalloc>: Error in realloc(): "
1907 if (config_stats && likely(ret != NULL)) {
1908 assert(usize == isalloc(ret, config_prof));
1909 *tsd_thread_allocatedp_get(tsd) += usize;
1910 *tsd_thread_deallocatedp_get(tsd) += old_usize;
1912 UTRACE(ptr, size, ret);
1913 JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize,
1914 old_rzsize, true, false);
1918 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
1923 if (likely(ptr != NULL)) {
1924 tsd_t *tsd = tsd_fetch();
1925 if (likely(!malloc_slow))
1926 ifree(tsd, ptr, tcache_get(tsd, false), false);
1928 ifree(tsd, ptr, tcache_get(tsd, false), true);
1933 * End malloc(3)-compatible functions.
1935 /******************************************************************************/
1937 * Begin non-standard override functions.
1940 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
1941 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1942 void JEMALLOC_NOTHROW *
1943 JEMALLOC_ATTR(malloc)
1944 je_memalign(size_t alignment, size_t size)
1946 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1947 if (unlikely(imemalign(&ret, alignment, size, 1) != 0))
1949 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1954 #ifdef JEMALLOC_OVERRIDE_VALLOC
1955 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1956 void JEMALLOC_NOTHROW *
1957 JEMALLOC_ATTR(malloc)
1958 je_valloc(size_t size)
1960 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1961 if (unlikely(imemalign(&ret, PAGE, size, 1) != 0))
1963 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1969 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1970 * #define je_malloc malloc
1972 #define malloc_is_malloc 1
1973 #define is_malloc_(a) malloc_is_ ## a
1974 #define is_malloc(a) is_malloc_(a)
1976 #if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK))
1978 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1979 * to inconsistently reference libc's malloc(3)-compatible functions
1980 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1982 * These definitions interpose hooks in glibc. The functions are actually
1983 * passed an extra argument for the caller return address, which will be
1986 JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
1987 JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
1988 JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
1989 # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
1990 JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
1996 * End non-standard override functions.
1998 /******************************************************************************/
2000 * Begin non-standard functions.
2003 JEMALLOC_ALWAYS_INLINE_C bool
2004 imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize,
2005 size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
2008 if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) {
2012 *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
2013 *usize = sa2u(size, *alignment);
2015 if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS))
2017 *zero = MALLOCX_ZERO_GET(flags);
2018 if ((flags & MALLOCX_TCACHE_MASK) != 0) {
2019 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2022 *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2024 *tcache = tcache_get(tsd, true);
2025 if ((flags & MALLOCX_ARENA_MASK) != 0) {
2026 unsigned arena_ind = MALLOCX_ARENA_GET(flags);
2027 *arena = arena_get(arena_ind, true);
2028 if (unlikely(*arena == NULL))
2035 JEMALLOC_ALWAYS_INLINE_C bool
2036 imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
2037 size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
2040 if (likely(flags == 0)) {
2042 if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS))
2046 *tcache = tcache_get(tsd, true);
2050 return (imallocx_flags_decode_hard(tsd, size, flags, usize,
2051 alignment, zero, tcache, arena));
2055 JEMALLOC_ALWAYS_INLINE_C void *
2056 imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
2057 tcache_t *tcache, arena_t *arena)
2061 if (unlikely(alignment != 0))
2062 return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
2063 ind = size2index(usize);
2064 assert(ind < NSIZES);
2066 return (icalloct(tsd, usize, ind, tcache, arena));
2067 return (imalloct(tsd, usize, ind, tcache, arena));
2071 imallocx_prof_sample(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
2072 tcache_t *tcache, arena_t *arena)
2076 if (usize <= SMALL_MAXCLASS) {
2077 assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
2078 sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
2079 p = imallocx_flags(tsd, LARGE_MINCLASS, alignment, zero, tcache,
2083 arena_prof_promoted(p, usize);
2085 p = imallocx_flags(tsd, usize, alignment, zero, tcache, arena);
2090 JEMALLOC_ALWAYS_INLINE_C void *
2091 imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
2100 if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
2101 &zero, &tcache, &arena)))
2103 tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true);
2104 if (likely((uintptr_t)tctx == (uintptr_t)1U))
2105 p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
2106 else if ((uintptr_t)tctx > (uintptr_t)1U) {
2107 p = imallocx_prof_sample(tsd, *usize, alignment, zero, tcache,
2111 if (unlikely(p == NULL)) {
2112 prof_alloc_rollback(tsd, tctx, true);
2115 prof_malloc(p, *usize, tctx);
2117 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2121 JEMALLOC_ALWAYS_INLINE_C void *
2122 imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
2130 if (likely(flags == 0)) {
2131 szind_t ind = size2index(size);
2132 if (unlikely(ind >= NSIZES))
2134 if (config_stats || (config_valgrind &&
2135 unlikely(in_valgrind))) {
2136 *usize = index2size(ind);
2137 assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
2139 return (imalloc(tsd, size, ind, true));
2142 if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize,
2143 &alignment, &zero, &tcache, &arena)))
2145 p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
2146 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2150 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2151 void JEMALLOC_NOTHROW *
2152 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2153 je_mallocx(size_t size, int flags)
2161 if (unlikely(malloc_init()))
2165 if (config_prof && opt_prof)
2166 p = imallocx_prof(tsd, size, flags, &usize);
2168 p = imallocx_no_prof(tsd, size, flags, &usize);
2169 if (unlikely(p == NULL))
2173 assert(usize == isalloc(p, config_prof));
2174 *tsd_thread_allocatedp_get(tsd) += usize;
2177 JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags));
2180 if (config_xmalloc && unlikely(opt_xmalloc)) {
2181 malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
2189 irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize,
2190 size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
2197 if (usize <= SMALL_MAXCLASS) {
2198 p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment,
2199 zero, tcache, arena);
2202 arena_prof_promoted(p, usize);
2204 p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero,
2211 JEMALLOC_ALWAYS_INLINE_C void *
2212 irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
2213 size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
2218 prof_tctx_t *old_tctx, *tctx;
2220 prof_active = prof_active_get_unlocked();
2221 old_tctx = prof_tctx_get(old_ptr);
2222 tctx = prof_alloc_prep(tsd, *usize, prof_active, true);
2223 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2224 p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize,
2225 alignment, zero, tcache, arena, tctx);
2227 p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero,
2230 if (unlikely(p == NULL)) {
2231 prof_alloc_rollback(tsd, tctx, true);
2235 if (p == old_ptr && alignment != 0) {
2237 * The allocation did not move, so it is possible that the size
2238 * class is smaller than would guarantee the requested
2239 * alignment, and that the alignment constraint was
2240 * serendipitously satisfied. Additionally, old_usize may not
2241 * be the same as the current usize because of in-place large
2242 * reallocation. Therefore, query the actual value of usize.
2244 *usize = isalloc(p, config_prof);
2246 prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr,
2247 old_usize, old_tctx);
2252 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2253 void JEMALLOC_NOTHROW *
2254 JEMALLOC_ALLOC_SIZE(2)
2255 je_rallocx(void *ptr, size_t size, int flags)
2261 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
2262 size_t alignment = MALLOCX_ALIGN_GET(flags);
2263 bool zero = flags & MALLOCX_ZERO;
2267 assert(ptr != NULL);
2269 assert(malloc_initialized() || IS_INITIALIZER);
2270 malloc_thread_init();
2273 if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
2274 unsigned arena_ind = MALLOCX_ARENA_GET(flags);
2275 arena = arena_get(arena_ind, true);
2276 if (unlikely(arena == NULL))
2281 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2282 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2285 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2287 tcache = tcache_get(tsd, true);
2289 old_usize = isalloc(ptr, config_prof);
2290 if (config_valgrind && unlikely(in_valgrind))
2291 old_rzsize = u2rz(old_usize);
2293 if (config_prof && opt_prof) {
2294 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
2295 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
2297 p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
2298 zero, tcache, arena);
2299 if (unlikely(p == NULL))
2302 p = iralloct(tsd, ptr, old_usize, size, alignment, zero,
2304 if (unlikely(p == NULL))
2306 if (config_stats || (config_valgrind && unlikely(in_valgrind)))
2307 usize = isalloc(p, config_prof);
2309 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2312 *tsd_thread_allocatedp_get(tsd) += usize;
2313 *tsd_thread_deallocatedp_get(tsd) += old_usize;
2315 UTRACE(ptr, size, p);
2316 JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize,
2317 old_rzsize, false, zero);
2320 if (config_xmalloc && unlikely(opt_xmalloc)) {
2321 malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
2324 UTRACE(ptr, size, 0);
2328 JEMALLOC_ALWAYS_INLINE_C size_t
2329 ixallocx_helper(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2330 size_t extra, size_t alignment, bool zero)
2334 if (ixalloc(tsd, ptr, old_usize, size, extra, alignment, zero))
2336 usize = isalloc(ptr, config_prof);
2342 ixallocx_prof_sample(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2343 size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx)
2349 usize = ixallocx_helper(tsd, ptr, old_usize, size, extra, alignment,
2355 JEMALLOC_ALWAYS_INLINE_C size_t
2356 ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2357 size_t extra, size_t alignment, bool zero)
2359 size_t usize_max, usize;
2361 prof_tctx_t *old_tctx, *tctx;
2363 prof_active = prof_active_get_unlocked();
2364 old_tctx = prof_tctx_get(ptr);
2366 * usize isn't knowable before ixalloc() returns when extra is non-zero.
2367 * Therefore, compute its maximum possible value and use that in
2368 * prof_alloc_prep() to decide whether to capture a backtrace.
2369 * prof_realloc() will use the actual usize to decide whether to sample.
2371 if (alignment == 0) {
2372 usize_max = s2u(size+extra);
2373 assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS);
2375 usize_max = sa2u(size+extra, alignment);
2376 if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) {
2378 * usize_max is out of range, and chances are that
2379 * allocation will fail, but use the maximum possible
2380 * value and carry on with prof_alloc_prep(), just in
2381 * case allocation succeeds.
2383 usize_max = HUGE_MAXCLASS;
2386 tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
2388 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2389 usize = ixallocx_prof_sample(tsd, ptr, old_usize, size, extra,
2390 alignment, zero, tctx);
2392 usize = ixallocx_helper(tsd, ptr, old_usize, size, extra,
2395 if (usize == old_usize) {
2396 prof_alloc_rollback(tsd, tctx, false);
2399 prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
2405 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2406 je_xallocx(void *ptr, size_t size, size_t extra, int flags)
2409 size_t usize, old_usize;
2410 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
2411 size_t alignment = MALLOCX_ALIGN_GET(flags);
2412 bool zero = flags & MALLOCX_ZERO;
2414 assert(ptr != NULL);
2416 assert(SIZE_T_MAX - size >= extra);
2417 assert(malloc_initialized() || IS_INITIALIZER);
2418 malloc_thread_init();
2421 old_usize = isalloc(ptr, config_prof);
2424 * The API explicitly absolves itself of protecting against (size +
2425 * extra) numerical overflow, but we may need to clamp extra to avoid
2426 * exceeding HUGE_MAXCLASS.
2428 * Ordinarily, size limit checking is handled deeper down, but here we
2429 * have to check as part of (size + extra) clamping, since we need the
2430 * clamped value in the above helper functions.
2432 if (unlikely(size > HUGE_MAXCLASS)) {
2434 goto label_not_resized;
2436 if (unlikely(HUGE_MAXCLASS - size < extra))
2437 extra = HUGE_MAXCLASS - size;
2439 if (config_valgrind && unlikely(in_valgrind))
2440 old_rzsize = u2rz(old_usize);
2442 if (config_prof && opt_prof) {
2443 usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
2446 usize = ixallocx_helper(tsd, ptr, old_usize, size, extra,
2449 if (unlikely(usize == old_usize))
2450 goto label_not_resized;
2453 *tsd_thread_allocatedp_get(tsd) += usize;
2454 *tsd_thread_deallocatedp_get(tsd) += old_usize;
2456 JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize,
2457 old_rzsize, false, zero);
2459 UTRACE(ptr, size, ptr);
2463 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2465 je_sallocx(const void *ptr, int flags)
2469 assert(malloc_initialized() || IS_INITIALIZER);
2470 malloc_thread_init();
2472 if (config_ivsalloc)
2473 usize = ivsalloc(ptr, config_prof);
2475 usize = isalloc(ptr, config_prof);
2480 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2481 je_dallocx(void *ptr, int flags)
2486 assert(ptr != NULL);
2487 assert(malloc_initialized() || IS_INITIALIZER);
2490 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2491 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2494 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2496 tcache = tcache_get(tsd, false);
2499 ifree(tsd_fetch(), ptr, tcache, true);
2502 JEMALLOC_ALWAYS_INLINE_C size_t
2503 inallocx(size_t size, int flags)
2507 if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
2510 usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
2514 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2515 je_sdallocx(void *ptr, size_t size, int flags)
2521 assert(ptr != NULL);
2522 assert(malloc_initialized() || IS_INITIALIZER);
2523 usize = inallocx(size, flags);
2524 assert(usize == isalloc(ptr, config_prof));
2527 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2528 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2531 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2533 tcache = tcache_get(tsd, false);
2536 isfree(tsd, ptr, usize, tcache);
2539 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2541 je_nallocx(size_t size, int flags)
2547 if (unlikely(malloc_init()))
2550 usize = inallocx(size, flags);
2551 if (unlikely(usize > HUGE_MAXCLASS))
2557 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2558 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
2562 if (unlikely(malloc_init()))
2565 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
2568 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2569 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
2572 if (unlikely(malloc_init()))
2575 return (ctl_nametomib(name, mibp, miblenp));
2578 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2579 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
2580 void *newp, size_t newlen)
2583 if (unlikely(malloc_init()))
2586 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
2589 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2590 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
2594 stats_print(write_cb, cbopaque, opts);
2597 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2598 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
2602 assert(malloc_initialized() || IS_INITIALIZER);
2603 malloc_thread_init();
2605 if (config_ivsalloc)
2606 ret = ivsalloc(ptr, config_prof);
2608 ret = (ptr == NULL) ? 0 : isalloc(ptr, config_prof);
2614 * End non-standard functions.
2616 /******************************************************************************/
2618 * Begin compatibility functions.
2621 #define ALLOCM_LG_ALIGN(la) (la)
2622 #define ALLOCM_ALIGN(a) (ffsl(a)-1)
2623 #define ALLOCM_ZERO ((int)0x40)
2624 #define ALLOCM_NO_MOVE ((int)0x80)
2626 #define ALLOCM_SUCCESS 0
2627 #define ALLOCM_ERR_OOM 1
2628 #define ALLOCM_ERR_NOT_MOVED 2
2631 je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
2635 assert(ptr != NULL);
2637 p = je_mallocx(size, flags);
2639 return (ALLOCM_ERR_OOM);
2641 *rsize = isalloc(p, config_prof);
2643 return (ALLOCM_SUCCESS);
2647 je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
2650 bool no_move = flags & ALLOCM_NO_MOVE;
2652 assert(ptr != NULL);
2653 assert(*ptr != NULL);
2655 assert(SIZE_T_MAX - size >= extra);
2658 size_t usize = je_xallocx(*ptr, size, extra, flags);
2659 ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED;
2663 void *p = je_rallocx(*ptr, size+extra, flags);
2666 ret = ALLOCM_SUCCESS;
2668 ret = ALLOCM_ERR_OOM;
2670 *rsize = isalloc(*ptr, config_prof);
2676 je_sallocm(const void *ptr, size_t *rsize, int flags)
2679 assert(rsize != NULL);
2680 *rsize = je_sallocx(ptr, flags);
2681 return (ALLOCM_SUCCESS);
2685 je_dallocm(void *ptr, int flags)
2688 je_dallocx(ptr, flags);
2689 return (ALLOCM_SUCCESS);
2693 je_nallocm(size_t *rsize, size_t size, int flags)
2697 usize = je_nallocx(size, flags);
2699 return (ALLOCM_ERR_OOM);
2702 return (ALLOCM_SUCCESS);
2705 #undef ALLOCM_LG_ALIGN
2708 #undef ALLOCM_NO_MOVE
2710 #undef ALLOCM_SUCCESS
2711 #undef ALLOCM_ERR_OOM
2712 #undef ALLOCM_ERR_NOT_MOVED
2715 * End compatibility functions.
2717 /******************************************************************************/
2719 * The following functions are used by threading libraries for protection of
2720 * malloc during fork().
2724 * If an application creates a thread before doing any allocation in the main
2725 * thread, then calls fork(2) in the main thread followed by memory allocation
2726 * in the child process, a race can occur that results in deadlock within the
2727 * child: the main thread may have forked while the created thread had
2728 * partially initialized the allocator. Ordinarily jemalloc prevents
2729 * fork/malloc races via the following functions it registers during
2730 * initialization using pthread_atfork(), but of course that does no good if
2731 * the allocator isn't fully initialized at fork time. The following library
2732 * constructor is a partial solution to this problem. It may still be possible
2733 * to trigger the deadlock described above, but doing so would involve forking
2734 * via a library constructor that runs before jemalloc's runs.
2736 JEMALLOC_ATTR(constructor)
2738 jemalloc_constructor(void)
2744 #ifndef JEMALLOC_MUTEX_INIT_CB
2746 jemalloc_prefork(void)
2748 JEMALLOC_EXPORT void
2749 _malloc_prefork(void)
2752 unsigned i, narenas;
2754 #ifdef JEMALLOC_MUTEX_INIT_CB
2755 if (!malloc_initialized())
2758 assert(malloc_initialized());
2760 /* Acquire all mutexes in a safe order. */
2763 malloc_mutex_prefork(&arenas_lock);
2764 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
2767 if ((arena = arena_get(i, false)) != NULL)
2768 arena_prefork(arena);
2774 #ifndef JEMALLOC_MUTEX_INIT_CB
2776 jemalloc_postfork_parent(void)
2778 JEMALLOC_EXPORT void
2779 _malloc_postfork(void)
2782 unsigned i, narenas;
2784 #ifdef JEMALLOC_MUTEX_INIT_CB
2785 if (!malloc_initialized())
2788 assert(malloc_initialized());
2790 /* Release all mutexes, now that fork() has completed. */
2791 base_postfork_parent();
2792 chunk_postfork_parent();
2793 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
2796 if ((arena = arena_get(i, false)) != NULL)
2797 arena_postfork_parent(arena);
2799 malloc_mutex_postfork_parent(&arenas_lock);
2800 prof_postfork_parent();
2801 ctl_postfork_parent();
2805 jemalloc_postfork_child(void)
2807 unsigned i, narenas;
2809 assert(malloc_initialized());
2811 /* Release all mutexes, now that fork() has completed. */
2812 base_postfork_child();
2813 chunk_postfork_child();
2814 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
2817 if ((arena = arena_get(i, false)) != NULL)
2818 arena_postfork_child(arena);
2820 malloc_mutex_postfork_child(&arenas_lock);
2821 prof_postfork_child();
2822 ctl_postfork_child();
2826 _malloc_first_thread(void)
2829 (void)malloc_mutex_first_thread();
2832 /******************************************************************************/