2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */
8 const char *__malloc_options_1_0 = NULL;
9 __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
11 /* Runtime configuration options. */
12 const char *je_malloc_conf JEMALLOC_ATTR(weak);
20 const char *opt_junk =
21 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
28 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
35 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
42 size_t opt_quarantine = ZU(0);
43 bool opt_redzone = false;
44 bool opt_utrace = false;
45 bool opt_xmalloc = false;
46 bool opt_zero = false;
47 unsigned opt_narenas = 0;
49 /* Initialized to true if the process is running inside Valgrind. */
54 /* Protects arenas initialization. */
55 static malloc_mutex_t arenas_lock;
57 * Arenas that are used to service external requests. Not all elements of the
58 * arenas array are necessarily used; arenas are created lazily as needed.
60 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
61 * arenas. arenas[narenas_auto..narenas_total) are only used if the application
62 * takes some action to create them and allocate from them.
65 static unsigned narenas_total; /* Use narenas_total_*(). */
66 static arena_t *a0; /* arenas[0]; read-only after initialization. */
67 unsigned narenas_auto; /* Read-only after initialization. */
70 malloc_init_uninitialized = 3,
71 malloc_init_a0_initialized = 2,
72 malloc_init_recursible = 1,
73 malloc_init_initialized = 0 /* Common case --> jnz. */
75 static malloc_init_t malloc_init_state = malloc_init_uninitialized;
77 /* False should be the common case. Set to true to trigger initialization. */
78 static bool malloc_slow = true;
80 /* When malloc_slow is true, set the corresponding bits for sanity check. */
82 flag_opt_junk_alloc = (1U),
83 flag_opt_junk_free = (1U << 1),
84 flag_opt_quarantine = (1U << 2),
85 flag_opt_zero = (1U << 3),
86 flag_opt_utrace = (1U << 4),
87 flag_in_valgrind = (1U << 5),
88 flag_opt_xmalloc = (1U << 6)
90 static uint8_t malloc_slow_flags;
92 /* Last entry for overflow detection only. */
93 JEMALLOC_ALIGNED(CACHELINE)
94 const size_t index2size_tab[NSIZES+1] = {
95 #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
96 ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
102 JEMALLOC_ALIGNED(CACHELINE)
103 const uint8_t size2index_tab[] = {
105 #warning "Dangerous LG_TINY_MIN"
107 #elif LG_TINY_MIN == 1
108 #warning "Dangerous LG_TINY_MIN"
110 #elif LG_TINY_MIN == 2
111 #warning "Dangerous LG_TINY_MIN"
113 #elif LG_TINY_MIN == 3
115 #elif LG_TINY_MIN == 4
117 #elif LG_TINY_MIN == 5
119 #elif LG_TINY_MIN == 6
121 #elif LG_TINY_MIN == 7
123 #elif LG_TINY_MIN == 8
125 #elif LG_TINY_MIN == 9
127 #elif LG_TINY_MIN == 10
129 #elif LG_TINY_MIN == 11
132 #error "Unsupported LG_TINY_MIN"
135 #define S2B_1(i) S2B_0(i) S2B_0(i)
138 #define S2B_2(i) S2B_1(i) S2B_1(i)
141 #define S2B_3(i) S2B_2(i) S2B_2(i)
144 #define S2B_4(i) S2B_3(i) S2B_3(i)
147 #define S2B_5(i) S2B_4(i) S2B_4(i)
150 #define S2B_6(i) S2B_5(i) S2B_5(i)
153 #define S2B_7(i) S2B_6(i) S2B_6(i)
156 #define S2B_8(i) S2B_7(i) S2B_7(i)
159 #define S2B_9(i) S2B_8(i) S2B_8(i)
162 #define S2B_10(i) S2B_9(i) S2B_9(i)
165 #define S2B_11(i) S2B_10(i) S2B_10(i)
168 #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
169 S2B_##lg_delta_lookup(index)
184 #ifdef JEMALLOC_THREADED_INIT
185 /* Used to let the initializing thread recursively allocate. */
186 # define NO_INITIALIZER ((unsigned long)0)
187 # define INITIALIZER pthread_self()
188 # define IS_INITIALIZER (malloc_initializer == pthread_self())
189 static pthread_t malloc_initializer = NO_INITIALIZER;
191 # define NO_INITIALIZER false
192 # define INITIALIZER true
193 # define IS_INITIALIZER malloc_initializer
194 static bool malloc_initializer = NO_INITIALIZER;
197 /* Used to avoid initialization races. */
199 #if _WIN32_WINNT >= 0x0600
200 static malloc_mutex_t init_lock = SRWLOCK_INIT;
202 static malloc_mutex_t init_lock;
203 static bool init_lock_initialized = false;
205 JEMALLOC_ATTR(constructor)
207 _init_init_lock(void)
210 /* If another constructor in the same binary is using mallctl to
211 * e.g. setup chunk hooks, it may end up running before this one,
212 * and malloc_init_hard will crash trying to lock the uninitialized
213 * lock. So we force an initialization of the lock in
214 * malloc_init_hard as well. We don't try to care about atomicity
215 * of the accessed to the init_lock_initialized boolean, since it
216 * really only matters early in the process creation, before any
217 * separate thread normally starts doing anything. */
218 if (!init_lock_initialized)
219 malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT);
220 init_lock_initialized = true;
224 # pragma section(".CRT$XCU", read)
225 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
226 static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
230 static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
234 void *p; /* Input pointer (as in realloc(p, s)). */
235 size_t s; /* Request size. */
236 void *r; /* Result pointer. */
239 #ifdef JEMALLOC_UTRACE
240 # define UTRACE(a, b, c) do { \
241 if (unlikely(opt_utrace)) { \
242 int utrace_serrno = errno; \
243 malloc_utrace_t ut; \
247 utrace(&ut, sizeof(ut)); \
248 errno = utrace_serrno; \
252 # define UTRACE(a, b, c)
255 /******************************************************************************/
257 * Function prototypes for static functions that are referenced prior to
261 static bool malloc_init_hard_a0(void);
262 static bool malloc_init_hard(void);
264 /******************************************************************************/
266 * Begin miscellaneous support functions.
269 JEMALLOC_ALWAYS_INLINE_C bool
270 malloc_initialized(void)
273 return (malloc_init_state == malloc_init_initialized);
276 JEMALLOC_ALWAYS_INLINE_C void
277 malloc_thread_init(void)
281 * TSD initialization can't be safely done as a side effect of
282 * deallocation, because it is possible for a thread to do nothing but
283 * deallocate its TLS data via free(), in which case writing to TLS
284 * would cause write-after-free memory corruption. The quarantine
285 * facility *only* gets used as a side effect of deallocation, so make
286 * a best effort attempt at initializing its TSD by hooking all
289 if (config_fill && unlikely(opt_quarantine))
290 quarantine_alloc_hook();
293 JEMALLOC_ALWAYS_INLINE_C bool
297 if (unlikely(malloc_init_state == malloc_init_uninitialized))
298 return (malloc_init_hard_a0());
302 JEMALLOC_ALWAYS_INLINE_C bool
306 if (unlikely(!malloc_initialized()) && malloc_init_hard())
308 malloc_thread_init();
314 * The a0*() functions are used instead of i{d,}alloc() in situations that
315 * cannot tolerate TLS variable access.
319 a0ialloc(size_t size, bool zero, bool is_metadata)
322 if (unlikely(malloc_init_a0()))
325 return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL,
326 is_metadata, arena_get(TSDN_NULL, 0, true), true));
330 a0idalloc(void *ptr, bool is_metadata)
333 idalloctm(TSDN_NULL, ptr, false, is_metadata, true);
337 a0malloc(size_t size)
340 return (a0ialloc(size, false, true));
347 a0idalloc(ptr, true);
351 * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
352 * situations that cannot tolerate TLS variable access (TLS allocation and very
353 * early internal data structure initialization).
357 bootstrap_malloc(size_t size)
360 if (unlikely(size == 0))
363 return (a0ialloc(size, false, false));
367 bootstrap_calloc(size_t num, size_t size)
371 num_size = num * size;
372 if (unlikely(num_size == 0)) {
373 assert(num == 0 || size == 0);
377 return (a0ialloc(num_size, true, false));
381 bootstrap_free(void *ptr)
384 if (unlikely(ptr == NULL))
387 a0idalloc(ptr, false);
391 arena_set(unsigned ind, arena_t *arena)
394 atomic_write_p((void **)&arenas[ind], arena);
398 narenas_total_set(unsigned narenas)
401 atomic_write_u(&narenas_total, narenas);
405 narenas_total_inc(void)
408 atomic_add_u(&narenas_total, 1);
412 narenas_total_get(void)
415 return (atomic_read_u(&narenas_total));
418 /* Create a new arena and insert it into the arenas array at index ind. */
420 arena_init_locked(tsdn_t *tsdn, unsigned ind)
424 assert(ind <= narenas_total_get());
425 if (ind > MALLOCX_ARENA_MAX)
427 if (ind == narenas_total_get())
431 * Another thread may have already initialized arenas[ind] if it's an
434 arena = arena_get(tsdn, ind, false);
436 assert(ind < narenas_auto);
440 /* Actually initialize the arena. */
441 arena = arena_new(tsdn, ind);
442 arena_set(ind, arena);
447 arena_init(tsdn_t *tsdn, unsigned ind)
451 malloc_mutex_lock(tsdn, &arenas_lock);
452 arena = arena_init_locked(tsdn, ind);
453 malloc_mutex_unlock(tsdn, &arenas_lock);
458 arena_bind(tsd_t *tsd, unsigned ind, bool internal)
462 arena = arena_get(tsd_tsdn(tsd), ind, false);
463 arena_nthreads_inc(arena, internal);
465 if (tsd_nominal(tsd)) {
467 tsd_iarena_set(tsd, arena);
469 tsd_arena_set(tsd, arena);
474 arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
476 arena_t *oldarena, *newarena;
478 oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
479 newarena = arena_get(tsd_tsdn(tsd), newind, false);
480 arena_nthreads_dec(oldarena, false);
481 arena_nthreads_inc(newarena, false);
482 tsd_arena_set(tsd, newarena);
486 arena_unbind(tsd_t *tsd, unsigned ind, bool internal)
490 arena = arena_get(tsd_tsdn(tsd), ind, false);
491 arena_nthreads_dec(arena, internal);
493 tsd_iarena_set(tsd, NULL);
495 tsd_arena_set(tsd, NULL);
499 arena_tdata_get_hard(tsd_t *tsd, unsigned ind)
501 arena_tdata_t *tdata, *arenas_tdata_old;
502 arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
503 unsigned narenas_tdata_old, i;
504 unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
505 unsigned narenas_actual = narenas_total_get();
508 * Dissociate old tdata array (and set up for deallocation upon return)
511 if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
512 arenas_tdata_old = arenas_tdata;
513 narenas_tdata_old = narenas_tdata;
516 tsd_arenas_tdata_set(tsd, arenas_tdata);
517 tsd_narenas_tdata_set(tsd, narenas_tdata);
519 arenas_tdata_old = NULL;
520 narenas_tdata_old = 0;
523 /* Allocate tdata array if it's missing. */
524 if (arenas_tdata == NULL) {
525 bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
526 narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
528 if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
529 *arenas_tdata_bypassp = true;
530 arenas_tdata = (arena_tdata_t *)a0malloc(
531 sizeof(arena_tdata_t) * narenas_tdata);
532 *arenas_tdata_bypassp = false;
534 if (arenas_tdata == NULL) {
538 assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
539 tsd_arenas_tdata_set(tsd, arenas_tdata);
540 tsd_narenas_tdata_set(tsd, narenas_tdata);
544 * Copy to tdata array. It's possible that the actual number of arenas
545 * has increased since narenas_total_get() was called above, but that
546 * causes no correctness issues unless two threads concurrently execute
547 * the arenas.extend mallctl, which we trust mallctl synchronization to
551 /* Copy/initialize tickers. */
552 for (i = 0; i < narenas_actual; i++) {
553 if (i < narenas_tdata_old) {
554 ticker_copy(&arenas_tdata[i].decay_ticker,
555 &arenas_tdata_old[i].decay_ticker);
557 ticker_init(&arenas_tdata[i].decay_ticker,
558 DECAY_NTICKS_PER_UPDATE);
561 if (narenas_tdata > narenas_actual) {
562 memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
563 * (narenas_tdata - narenas_actual));
566 /* Read the refreshed tdata array. */
567 tdata = &arenas_tdata[ind];
569 if (arenas_tdata_old != NULL)
570 a0dalloc(arenas_tdata_old);
574 /* Slow path, called only by arena_choose(). */
576 arena_choose_hard(tsd_t *tsd, bool internal)
578 arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
580 if (narenas_auto > 1) {
581 unsigned i, j, choose[2], first_null;
584 * Determine binding for both non-internal and internal
587 * choose[0]: For application allocation.
588 * choose[1]: For internal metadata allocation.
591 for (j = 0; j < 2; j++)
594 first_null = narenas_auto;
595 malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
596 assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
597 for (i = 1; i < narenas_auto; i++) {
598 if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
600 * Choose the first arena that has the lowest
601 * number of threads assigned to it.
603 for (j = 0; j < 2; j++) {
604 if (arena_nthreads_get(arena_get(
605 tsd_tsdn(tsd), i, false), !!j) <
606 arena_nthreads_get(arena_get(
607 tsd_tsdn(tsd), choose[j], false),
611 } else if (first_null == narenas_auto) {
613 * Record the index of the first uninitialized
614 * arena, in case all extant arenas are in use.
616 * NB: It is possible for there to be
617 * discontinuities in terms of initialized
618 * versus uninitialized arenas, due to the
619 * "thread.arena" mallctl.
625 for (j = 0; j < 2; j++) {
626 if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
627 choose[j], false), !!j) == 0 || first_null ==
630 * Use an unloaded arena, or the least loaded
631 * arena if all arenas are already initialized.
633 if (!!j == internal) {
634 ret = arena_get(tsd_tsdn(tsd),
640 /* Initialize a new arena. */
641 choose[j] = first_null;
642 arena = arena_init_locked(tsd_tsdn(tsd),
645 malloc_mutex_unlock(tsd_tsdn(tsd),
652 arena_bind(tsd, choose[j], !!j);
654 malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
656 ret = arena_get(tsd_tsdn(tsd), 0, false);
657 arena_bind(tsd, 0, false);
658 arena_bind(tsd, 0, true);
665 thread_allocated_cleanup(tsd_t *tsd)
672 thread_deallocated_cleanup(tsd_t *tsd)
679 iarena_cleanup(tsd_t *tsd)
683 iarena = tsd_iarena_get(tsd);
685 arena_unbind(tsd, iarena->ind, true);
689 arena_cleanup(tsd_t *tsd)
693 arena = tsd_arena_get(tsd);
695 arena_unbind(tsd, arena->ind, false);
699 arenas_tdata_cleanup(tsd_t *tsd)
701 arena_tdata_t *arenas_tdata;
703 /* Prevent tsd->arenas_tdata from being (re)created. */
704 *tsd_arenas_tdata_bypassp_get(tsd) = true;
706 arenas_tdata = tsd_arenas_tdata_get(tsd);
707 if (arenas_tdata != NULL) {
708 tsd_arenas_tdata_set(tsd, NULL);
709 a0dalloc(arenas_tdata);
714 narenas_tdata_cleanup(tsd_t *tsd)
721 arenas_tdata_bypass_cleanup(tsd_t *tsd)
728 stats_print_atexit(void)
731 if (config_tcache && config_stats) {
738 * Merge stats from extant threads. This is racy, since
739 * individual threads do not lock when recording tcache stats
740 * events. As a consequence, the final stats may be slightly
741 * out of date by the time they are reported, if other threads
742 * continue to allocate.
744 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
745 arena_t *arena = arena_get(tsdn, i, false);
750 * tcache_stats_merge() locks bins, so if any
751 * code is introduced that acquires both arena
752 * and bin locks in the opposite order,
753 * deadlocks may result.
755 malloc_mutex_lock(tsdn, &arena->lock);
756 ql_foreach(tcache, &arena->tcache_ql, link) {
757 tcache_stats_merge(tsdn, tcache, arena);
759 malloc_mutex_unlock(tsdn, &arena->lock);
763 je_malloc_stats_print(NULL, NULL, NULL);
767 * End miscellaneous support functions.
769 /******************************************************************************/
771 * Begin initialization functions.
774 #ifndef JEMALLOC_HAVE_SECURE_GETENV
776 secure_getenv(const char *name)
779 # ifdef JEMALLOC_HAVE_ISSETUGID
780 if (issetugid() != 0)
783 return (getenv(name));
795 result = si.dwNumberOfProcessors;
797 result = sysconf(_SC_NPROCESSORS_ONLN);
799 return ((result == -1) ? 1 : (unsigned)result);
803 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
804 char const **v_p, size_t *vlen_p)
807 const char *opts = *opts_p;
811 for (accept = false; !accept;) {
813 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
814 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
815 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
816 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
818 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
819 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
820 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
821 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
823 case '0': case '1': case '2': case '3': case '4': case '5':
824 case '6': case '7': case '8': case '9':
830 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
835 if (opts != *opts_p) {
836 malloc_write("<jemalloc>: Conf string ends "
841 malloc_write("<jemalloc>: Malformed conf string\n");
846 for (accept = false; !accept;) {
851 * Look ahead one character here, because the next time
852 * this function is called, it will assume that end of
853 * input has been cleanly reached if no input remains,
854 * but we have optimistically already consumed the
855 * comma if one exists.
858 malloc_write("<jemalloc>: Conf string ends "
861 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
865 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
879 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
883 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
888 malloc_slow_flag_init(void)
891 * Combine the runtime options into malloc_slow for fast path. Called
892 * after processing all the options.
894 malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
895 | (opt_junk_free ? flag_opt_junk_free : 0)
896 | (opt_quarantine ? flag_opt_quarantine : 0)
897 | (opt_zero ? flag_opt_zero : 0)
898 | (opt_utrace ? flag_opt_utrace : 0)
899 | (opt_xmalloc ? flag_opt_xmalloc : 0);
902 malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0);
904 malloc_slow = (malloc_slow_flags != 0);
908 malloc_conf_init(void)
911 char buf[PATH_MAX + 1];
912 const char *opts, *k, *v;
916 * Automatically configure valgrind before processing options. The
917 * valgrind option remains in jemalloc 3.x for compatibility reasons.
919 if (config_valgrind) {
920 in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
921 if (config_fill && unlikely(in_valgrind)) {
923 opt_junk_alloc = false;
924 opt_junk_free = false;
926 opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
929 if (config_tcache && unlikely(in_valgrind))
933 for (i = 0; i < 4; i++) {
934 /* Get runtime configuration. */
937 opts = config_malloc_conf;
940 if (je_malloc_conf != NULL) {
942 * Use options that were compiled into the
945 opts = je_malloc_conf;
947 /* No configuration specified. */
955 int saved_errno = errno;
956 const char *linkname =
957 # ifdef JEMALLOC_PREFIX
958 "/etc/"JEMALLOC_PREFIX"malloc.conf"
965 * Try to use the contents of the "/etc/malloc.conf"
966 * symbolic link's name.
968 linklen = readlink(linkname, buf, sizeof(buf) - 1);
970 /* No configuration specified. */
973 set_errno(saved_errno);
980 const char *envname =
981 #ifdef JEMALLOC_PREFIX
982 JEMALLOC_CPREFIX"MALLOC_CONF"
988 if ((opts = secure_getenv(envname)) != NULL) {
990 * Do nothing; opts is already initialized to
991 * the value of the MALLOC_CONF environment
995 /* No configuration specified. */
1006 while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
1008 #define CONF_MATCH(n) \
1009 (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
1010 #define CONF_MATCH_VALUE(n) \
1011 (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
1012 #define CONF_HANDLE_BOOL(o, n, cont) \
1013 if (CONF_MATCH(n)) { \
1014 if (CONF_MATCH_VALUE("true")) \
1016 else if (CONF_MATCH_VALUE("false")) \
1019 malloc_conf_error( \
1020 "Invalid conf value", \
1021 k, klen, v, vlen); \
1026 #define CONF_HANDLE_T_U(t, o, n, min, max, clip) \
1027 if (CONF_MATCH(n)) { \
1032 um = malloc_strtoumax(v, &end, 0); \
1033 if (get_errno() != 0 || (uintptr_t)end -\
1034 (uintptr_t)v != vlen) { \
1035 malloc_conf_error( \
1036 "Invalid conf value", \
1037 k, klen, v, vlen); \
1038 } else if (clip) { \
1039 if ((min) != 0 && um < (min)) \
1041 else if (um > (max)) \
1046 if (((min) != 0 && um < (min)) \
1048 malloc_conf_error( \
1051 k, klen, v, vlen); \
1057 #define CONF_HANDLE_UNSIGNED(o, n, min, max, clip) \
1058 CONF_HANDLE_T_U(unsigned, o, n, min, max, clip)
1059 #define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
1060 CONF_HANDLE_T_U(size_t, o, n, min, max, clip)
1061 #define CONF_HANDLE_SSIZE_T(o, n, min, max) \
1062 if (CONF_MATCH(n)) { \
1067 l = strtol(v, &end, 0); \
1068 if (get_errno() != 0 || (uintptr_t)end -\
1069 (uintptr_t)v != vlen) { \
1070 malloc_conf_error( \
1071 "Invalid conf value", \
1072 k, klen, v, vlen); \
1073 } else if (l < (ssize_t)(min) || l > \
1075 malloc_conf_error( \
1076 "Out-of-range conf value", \
1077 k, klen, v, vlen); \
1082 #define CONF_HANDLE_CHAR_P(o, n, d) \
1083 if (CONF_MATCH(n)) { \
1084 size_t cpylen = (vlen <= \
1085 sizeof(o)-1) ? vlen : \
1087 strncpy(o, v, cpylen); \
1092 CONF_HANDLE_BOOL(opt_abort, "abort", true)
1094 * Chunks always require at least one header page,
1095 * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and
1096 * possibly an additional page in the presence of
1097 * redzones. In order to simplify options processing,
1098 * use a conservative bound that accommodates all these
1101 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
1102 LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
1103 (sizeof(size_t) << 3) - 1, true)
1104 if (strncmp("dss", k, klen) == 0) {
1107 for (i = 0; i < dss_prec_limit; i++) {
1108 if (strncmp(dss_prec_names[i], v, vlen)
1110 if (chunk_dss_prec_set(NULL,
1113 "Error setting dss",
1124 malloc_conf_error("Invalid conf value",
1129 CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
1131 if (strncmp("purge", k, klen) == 0) {
1134 for (i = 0; i < purge_mode_limit; i++) {
1135 if (strncmp(purge_mode_names[i], v,
1137 opt_purge = (purge_mode_t)i;
1143 malloc_conf_error("Invalid conf value",
1148 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
1149 -1, (sizeof(size_t) << 3) - 1)
1150 CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1,
1152 CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
1154 if (CONF_MATCH("junk")) {
1155 if (CONF_MATCH_VALUE("true")) {
1157 opt_junk_alloc = opt_junk_free =
1159 } else if (CONF_MATCH_VALUE("false")) {
1161 opt_junk_alloc = opt_junk_free =
1163 } else if (CONF_MATCH_VALUE("alloc")) {
1165 opt_junk_alloc = true;
1166 opt_junk_free = false;
1167 } else if (CONF_MATCH_VALUE("free")) {
1169 opt_junk_alloc = false;
1170 opt_junk_free = true;
1173 "Invalid conf value", k,
1178 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
1179 0, SIZE_T_MAX, false)
1180 CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
1181 CONF_HANDLE_BOOL(opt_zero, "zero", true)
1183 if (config_utrace) {
1184 CONF_HANDLE_BOOL(opt_utrace, "utrace", true)
1186 if (config_xmalloc) {
1187 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
1189 if (config_tcache) {
1190 CONF_HANDLE_BOOL(opt_tcache, "tcache",
1191 !config_valgrind || !in_valgrind)
1192 if (CONF_MATCH("tcache")) {
1193 assert(config_valgrind && in_valgrind);
1197 "tcache cannot be enabled "
1198 "while running inside Valgrind",
1203 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
1204 "lg_tcache_max", -1,
1205 (sizeof(size_t) << 3) - 1)
1208 CONF_HANDLE_BOOL(opt_prof, "prof", true)
1209 CONF_HANDLE_CHAR_P(opt_prof_prefix,
1210 "prof_prefix", "jeprof")
1211 CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
1213 CONF_HANDLE_BOOL(opt_prof_thread_active_init,
1214 "prof_thread_active_init", true)
1215 CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
1216 "lg_prof_sample", 0,
1217 (sizeof(uint64_t) << 3) - 1, true)
1218 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
1220 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
1221 "lg_prof_interval", -1,
1222 (sizeof(uint64_t) << 3) - 1)
1223 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump",
1225 CONF_HANDLE_BOOL(opt_prof_final, "prof_final",
1227 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak",
1230 malloc_conf_error("Invalid conf pair", k, klen, v,
1233 #undef CONF_HANDLE_BOOL
1234 #undef CONF_HANDLE_SIZE_T
1235 #undef CONF_HANDLE_SSIZE_T
1236 #undef CONF_HANDLE_CHAR_P
1242 malloc_init_hard_needed(void)
1245 if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
1246 malloc_init_recursible)) {
1248 * Another thread initialized the allocator before this one
1249 * acquired init_lock, or this thread is the initializing
1250 * thread, and it is recursively allocating.
1254 #ifdef JEMALLOC_THREADED_INIT
1255 if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
1256 /* Busy-wait until the initializing thread completes. */
1258 malloc_mutex_unlock(NULL, &init_lock);
1260 malloc_mutex_lock(NULL, &init_lock);
1261 } while (!malloc_initialized());
1269 malloc_init_hard_a0_locked()
1272 malloc_initializer = INITIALIZER;
1277 if (opt_stats_print) {
1278 /* Print statistics at exit. */
1279 if (atexit(stats_print_atexit) != 0) {
1280 malloc_write("<jemalloc>: Error in atexit()\n");
1296 if (config_tcache && tcache_boot(TSDN_NULL))
1298 if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS))
1301 * Create enough scaffolding to allow recursive allocation in
1305 narenas_total_set(narenas_auto);
1307 memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
1309 * Initialize one arena here. The rest are lazily created in
1310 * arena_choose_hard().
1312 if (arena_init(TSDN_NULL, 0) == NULL)
1315 malloc_init_state = malloc_init_a0_initialized;
1321 malloc_init_hard_a0(void)
1325 malloc_mutex_lock(TSDN_NULL, &init_lock);
1326 ret = malloc_init_hard_a0_locked();
1327 malloc_mutex_unlock(TSDN_NULL, &init_lock);
1331 /* Initialize data structures which may trigger recursive allocation. */
1333 malloc_init_hard_recursible(void)
1336 malloc_init_state = malloc_init_recursible;
1338 ncpus = malloc_ncpus();
1340 #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
1341 && !defined(_WIN32) && !defined(__native_client__))
1342 /* LinuxThreads' pthread_atfork() allocates. */
1343 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
1344 jemalloc_postfork_child) != 0) {
1345 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1356 malloc_init_hard_finish(tsdn_t *tsdn)
1359 if (malloc_mutex_boot())
1362 if (opt_narenas == 0) {
1364 * For SMP systems, create more than one arena per CPU by
1368 opt_narenas = ncpus << 2;
1372 narenas_auto = opt_narenas;
1374 * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
1376 if (narenas_auto > MALLOCX_ARENA_MAX) {
1377 narenas_auto = MALLOCX_ARENA_MAX;
1378 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
1381 narenas_total_set(narenas_auto);
1383 /* Allocate and initialize arenas. */
1384 arenas = (arena_t **)base_alloc(tsdn, sizeof(arena_t *) *
1385 (MALLOCX_ARENA_MAX+1));
1388 /* Copy the pointer to the one arena that was already initialized. */
1391 malloc_init_state = malloc_init_initialized;
1392 malloc_slow_flag_init();
1398 malloc_init_hard(void)
1402 #if defined(_WIN32) && _WIN32_WINNT < 0x0600
1405 malloc_mutex_lock(TSDN_NULL, &init_lock);
1406 if (!malloc_init_hard_needed()) {
1407 malloc_mutex_unlock(TSDN_NULL, &init_lock);
1411 if (malloc_init_state != malloc_init_a0_initialized &&
1412 malloc_init_hard_a0_locked()) {
1413 malloc_mutex_unlock(TSDN_NULL, &init_lock);
1417 malloc_mutex_unlock(TSDN_NULL, &init_lock);
1418 /* Recursive allocation relies on functional tsd. */
1419 tsd = malloc_tsd_boot0();
1422 if (malloc_init_hard_recursible())
1424 malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
1426 if (config_prof && prof_boot2(tsd_tsdn(tsd))) {
1427 malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
1431 if (malloc_init_hard_finish(tsd_tsdn(tsd))) {
1432 malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
1436 malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
1442 * End initialization functions.
1444 /******************************************************************************/
1446 * Begin malloc(3)-compatible functions.
1450 ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero,
1451 prof_tctx_t *tctx, bool slow_path)
1457 if (usize <= SMALL_MAXCLASS) {
1458 szind_t ind_large = size2index(LARGE_MINCLASS);
1459 p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path);
1462 arena_prof_promoted(tsd_tsdn(tsd), p, usize);
1464 p = ialloc(tsd, usize, ind, zero, slow_path);
1469 JEMALLOC_ALWAYS_INLINE_C void *
1470 ialloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool zero, bool slow_path)
1475 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
1476 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1477 p = ialloc_prof_sample(tsd, usize, ind, zero, tctx, slow_path);
1479 p = ialloc(tsd, usize, ind, zero, slow_path);
1480 if (unlikely(p == NULL)) {
1481 prof_alloc_rollback(tsd, tctx, true);
1484 prof_malloc(tsd_tsdn(tsd), p, usize, tctx);
1490 * ialloc_body() is inlined so that fast and slow paths are generated separately
1491 * with statically known slow_path.
1493 * This function guarantees that *tsdn is non-NULL on success.
1495 JEMALLOC_ALWAYS_INLINE_C void *
1496 ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize,
1502 if (slow_path && unlikely(malloc_init())) {
1508 *tsdn = tsd_tsdn(tsd);
1509 witness_assert_lockless(tsd_tsdn(tsd));
1511 ind = size2index(size);
1512 if (unlikely(ind >= NSIZES))
1515 if (config_stats || (config_prof && opt_prof) || (slow_path &&
1516 config_valgrind && unlikely(in_valgrind))) {
1517 *usize = index2size(ind);
1518 assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
1521 if (config_prof && opt_prof)
1522 return (ialloc_prof(tsd, *usize, ind, zero, slow_path));
1524 return (ialloc(tsd, size, ind, zero, slow_path));
1527 JEMALLOC_ALWAYS_INLINE_C void
1528 ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func,
1529 bool update_errno, bool slow_path)
1532 assert(!tsdn_null(tsdn) || ret == NULL);
1534 if (unlikely(ret == NULL)) {
1535 if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) {
1536 malloc_printf("<jemalloc>: Error in %s(): out of "
1543 if (config_stats && likely(ret != NULL)) {
1544 assert(usize == isalloc(tsdn, ret, config_prof));
1545 *tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize;
1547 witness_assert_lockless(tsdn);
1550 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1551 void JEMALLOC_NOTHROW *
1552 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
1553 je_malloc(size_t size)
1557 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1562 if (likely(!malloc_slow)) {
1563 ret = ialloc_body(size, false, &tsdn, &usize, false);
1564 ialloc_post_check(ret, tsdn, usize, "malloc", true, false);
1566 ret = ialloc_body(size, false, &tsdn, &usize, true);
1567 ialloc_post_check(ret, tsdn, usize, "malloc", true, true);
1568 UTRACE(0, size, ret);
1569 JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false);
1576 imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
1583 if (usize <= SMALL_MAXCLASS) {
1584 assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS);
1585 p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
1588 arena_prof_promoted(tsd_tsdn(tsd), p, usize);
1590 p = ipalloc(tsd, usize, alignment, false);
1595 JEMALLOC_ALWAYS_INLINE_C void *
1596 imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
1601 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
1602 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1603 p = imemalign_prof_sample(tsd, alignment, usize, tctx);
1605 p = ipalloc(tsd, usize, alignment, false);
1606 if (unlikely(p == NULL)) {
1607 prof_alloc_rollback(tsd, tctx, true);
1610 prof_malloc(tsd_tsdn(tsd), p, usize, tctx);
1615 JEMALLOC_ATTR(nonnull(1))
1617 imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
1624 assert(min_alignment != 0);
1626 if (unlikely(malloc_init())) {
1632 witness_assert_lockless(tsd_tsdn(tsd));
1636 /* Make sure that alignment is a large enough power of 2. */
1637 if (unlikely(((alignment - 1) & alignment) != 0
1638 || (alignment < min_alignment))) {
1639 if (config_xmalloc && unlikely(opt_xmalloc)) {
1640 malloc_write("<jemalloc>: Error allocating "
1641 "aligned memory: invalid alignment\n");
1649 usize = sa2u(size, alignment);
1650 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
1655 if (config_prof && opt_prof)
1656 result = imemalign_prof(tsd, alignment, usize);
1658 result = ipalloc(tsd, usize, alignment, false);
1659 if (unlikely(result == NULL))
1661 assert(((uintptr_t)result & (alignment - 1)) == ZU(0));
1666 if (config_stats && likely(result != NULL)) {
1667 assert(usize == isalloc(tsd_tsdn(tsd), result, config_prof));
1668 *tsd_thread_allocatedp_get(tsd) += usize;
1670 UTRACE(0, size, result);
1671 JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize,
1673 witness_assert_lockless(tsd_tsdn(tsd));
1676 assert(result == NULL);
1677 if (config_xmalloc && unlikely(opt_xmalloc)) {
1678 malloc_write("<jemalloc>: Error allocating aligned memory: "
1683 witness_assert_lockless(tsd_tsdn(tsd));
1687 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
1688 JEMALLOC_ATTR(nonnull(1))
1689 je_posix_memalign(void **memptr, size_t alignment, size_t size)
1693 ret = imemalign(memptr, alignment, size, sizeof(void *));
1698 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1699 void JEMALLOC_NOTHROW *
1700 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
1701 je_aligned_alloc(size_t alignment, size_t size)
1706 if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) {
1714 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1715 void JEMALLOC_NOTHROW *
1716 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
1717 je_calloc(size_t num, size_t size)
1722 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1724 num_size = num * size;
1725 if (unlikely(num_size == 0)) {
1726 if (num == 0 || size == 0)
1729 num_size = HUGE_MAXCLASS + 1; /* Trigger OOM. */
1731 * Try to avoid division here. We know that it isn't possible to
1732 * overflow during multiplication if neither operand uses any of the
1733 * most significant half of the bits in a size_t.
1735 } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) <<
1736 2))) && (num_size / size != num)))
1737 num_size = HUGE_MAXCLASS + 1; /* size_t overflow. */
1739 if (likely(!malloc_slow)) {
1740 ret = ialloc_body(num_size, true, &tsdn, &usize, false);
1741 ialloc_post_check(ret, tsdn, usize, "calloc", true, false);
1743 ret = ialloc_body(num_size, true, &tsdn, &usize, true);
1744 ialloc_post_check(ret, tsdn, usize, "calloc", true, true);
1745 UTRACE(0, num_size, ret);
1746 JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, true);
1753 irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
1760 if (usize <= SMALL_MAXCLASS) {
1761 p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
1764 arena_prof_promoted(tsd_tsdn(tsd), p, usize);
1766 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
1771 JEMALLOC_ALWAYS_INLINE_C void *
1772 irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize)
1776 prof_tctx_t *old_tctx, *tctx;
1778 prof_active = prof_active_get_unlocked();
1779 old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr);
1780 tctx = prof_alloc_prep(tsd, usize, prof_active, true);
1781 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1782 p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
1784 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
1785 if (unlikely(p == NULL)) {
1786 prof_alloc_rollback(tsd, tctx, true);
1789 prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
1795 JEMALLOC_INLINE_C void
1796 ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
1799 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1801 witness_assert_lockless(tsd_tsdn(tsd));
1803 assert(ptr != NULL);
1804 assert(malloc_initialized() || IS_INITIALIZER);
1806 if (config_prof && opt_prof) {
1807 usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
1808 prof_free(tsd, ptr, usize);
1809 } else if (config_stats || config_valgrind)
1810 usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
1812 *tsd_thread_deallocatedp_get(tsd) += usize;
1814 if (likely(!slow_path))
1815 iqalloc(tsd, ptr, tcache, false);
1817 if (config_valgrind && unlikely(in_valgrind))
1818 rzsize = p2rz(tsd_tsdn(tsd), ptr);
1819 iqalloc(tsd, ptr, tcache, true);
1820 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1824 JEMALLOC_INLINE_C void
1825 isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path)
1827 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1829 witness_assert_lockless(tsd_tsdn(tsd));
1831 assert(ptr != NULL);
1832 assert(malloc_initialized() || IS_INITIALIZER);
1834 if (config_prof && opt_prof)
1835 prof_free(tsd, ptr, usize);
1837 *tsd_thread_deallocatedp_get(tsd) += usize;
1838 if (config_valgrind && unlikely(in_valgrind))
1839 rzsize = p2rz(tsd_tsdn(tsd), ptr);
1840 isqalloc(tsd, ptr, usize, tcache, slow_path);
1841 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1844 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1845 void JEMALLOC_NOTHROW *
1846 JEMALLOC_ALLOC_SIZE(2)
1847 je_realloc(void *ptr, size_t size)
1850 tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
1851 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1852 size_t old_usize = 0;
1853 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1855 if (unlikely(size == 0)) {
1859 /* realloc(ptr, 0) is equivalent to free(ptr). */
1862 ifree(tsd, ptr, tcache_get(tsd, false), true);
1868 if (likely(ptr != NULL)) {
1871 assert(malloc_initialized() || IS_INITIALIZER);
1872 malloc_thread_init();
1875 witness_assert_lockless(tsd_tsdn(tsd));
1877 old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
1878 if (config_valgrind && unlikely(in_valgrind)) {
1879 old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) :
1883 if (config_prof && opt_prof) {
1885 ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ?
1886 NULL : irealloc_prof(tsd, ptr, old_usize, usize);
1888 if (config_stats || (config_valgrind &&
1889 unlikely(in_valgrind)))
1891 ret = iralloc(tsd, ptr, old_usize, size, 0, false);
1893 tsdn = tsd_tsdn(tsd);
1895 /* realloc(NULL, size) is equivalent to malloc(size). */
1896 if (likely(!malloc_slow))
1897 ret = ialloc_body(size, false, &tsdn, &usize, false);
1899 ret = ialloc_body(size, false, &tsdn, &usize, true);
1900 assert(!tsdn_null(tsdn) || ret == NULL);
1903 if (unlikely(ret == NULL)) {
1904 if (config_xmalloc && unlikely(opt_xmalloc)) {
1905 malloc_write("<jemalloc>: Error in realloc(): "
1911 if (config_stats && likely(ret != NULL)) {
1914 assert(usize == isalloc(tsdn, ret, config_prof));
1915 tsd = tsdn_tsd(tsdn);
1916 *tsd_thread_allocatedp_get(tsd) += usize;
1917 *tsd_thread_deallocatedp_get(tsd) += old_usize;
1919 UTRACE(ptr, size, ret);
1920 JEMALLOC_VALGRIND_REALLOC(true, tsdn, ret, usize, true, ptr, old_usize,
1921 old_rzsize, true, false);
1922 witness_assert_lockless(tsdn);
1926 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
1931 if (likely(ptr != NULL)) {
1932 tsd_t *tsd = tsd_fetch();
1933 witness_assert_lockless(tsd_tsdn(tsd));
1934 if (likely(!malloc_slow))
1935 ifree(tsd, ptr, tcache_get(tsd, false), false);
1937 ifree(tsd, ptr, tcache_get(tsd, false), true);
1938 witness_assert_lockless(tsd_tsdn(tsd));
1943 * End malloc(3)-compatible functions.
1945 /******************************************************************************/
1947 * Begin non-standard override functions.
1950 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
1951 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1952 void JEMALLOC_NOTHROW *
1953 JEMALLOC_ATTR(malloc)
1954 je_memalign(size_t alignment, size_t size)
1956 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1957 if (unlikely(imemalign(&ret, alignment, size, 1) != 0))
1963 #ifdef JEMALLOC_OVERRIDE_VALLOC
1964 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1965 void JEMALLOC_NOTHROW *
1966 JEMALLOC_ATTR(malloc)
1967 je_valloc(size_t size)
1969 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1970 if (unlikely(imemalign(&ret, PAGE, size, 1) != 0))
1977 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1978 * #define je_malloc malloc
1980 #define malloc_is_malloc 1
1981 #define is_malloc_(a) malloc_is_ ## a
1982 #define is_malloc(a) is_malloc_(a)
1984 #if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK))
1986 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1987 * to inconsistently reference libc's malloc(3)-compatible functions
1988 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1990 * These definitions interpose hooks in glibc. The functions are actually
1991 * passed an extra argument for the caller return address, which will be
1994 JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
1995 JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
1996 JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
1997 # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
1998 JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
2004 * End non-standard override functions.
2006 /******************************************************************************/
2008 * Begin non-standard functions.
2011 JEMALLOC_ALWAYS_INLINE_C bool
2012 imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
2013 size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
2016 if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) {
2020 *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
2021 *usize = sa2u(size, *alignment);
2023 if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS))
2025 *zero = MALLOCX_ZERO_GET(flags);
2026 if ((flags & MALLOCX_TCACHE_MASK) != 0) {
2027 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2030 *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2032 *tcache = tcache_get(tsd, true);
2033 if ((flags & MALLOCX_ARENA_MASK) != 0) {
2034 unsigned arena_ind = MALLOCX_ARENA_GET(flags);
2035 *arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
2036 if (unlikely(*arena == NULL))
2043 JEMALLOC_ALWAYS_INLINE_C void *
2044 imallocx_flags(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
2045 tcache_t *tcache, arena_t *arena, bool slow_path)
2049 if (unlikely(alignment != 0))
2050 return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
2051 ind = size2index(usize);
2052 assert(ind < NSIZES);
2053 return (iallocztm(tsdn, usize, ind, zero, tcache, false, arena,
2058 imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
2059 tcache_t *tcache, arena_t *arena, bool slow_path)
2063 if (usize <= SMALL_MAXCLASS) {
2064 assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
2065 sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
2066 p = imallocx_flags(tsdn, LARGE_MINCLASS, alignment, zero,
2067 tcache, arena, slow_path);
2070 arena_prof_promoted(tsdn, p, usize);
2072 p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena,
2079 JEMALLOC_ALWAYS_INLINE_C void *
2080 imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path)
2089 if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
2090 &zero, &tcache, &arena)))
2092 tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true);
2093 if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
2094 p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero,
2095 tcache, arena, slow_path);
2096 } else if ((uintptr_t)tctx > (uintptr_t)1U) {
2097 p = imallocx_prof_sample(tsd_tsdn(tsd), *usize, alignment, zero,
2098 tcache, arena, slow_path);
2101 if (unlikely(p == NULL)) {
2102 prof_alloc_rollback(tsd, tctx, true);
2105 prof_malloc(tsd_tsdn(tsd), p, *usize, tctx);
2107 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2111 JEMALLOC_ALWAYS_INLINE_C void *
2112 imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize,
2121 if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
2122 &zero, &tcache, &arena)))
2124 p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, tcache,
2126 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2130 /* This function guarantees that *tsdn is non-NULL on success. */
2131 JEMALLOC_ALWAYS_INLINE_C void *
2132 imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize,
2137 if (slow_path && unlikely(malloc_init())) {
2143 *tsdn = tsd_tsdn(tsd);
2144 witness_assert_lockless(tsd_tsdn(tsd));
2146 if (likely(flags == 0)) {
2147 szind_t ind = size2index(size);
2148 if (unlikely(ind >= NSIZES))
2150 if (config_stats || (config_prof && opt_prof) || (slow_path &&
2151 config_valgrind && unlikely(in_valgrind))) {
2152 *usize = index2size(ind);
2153 assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
2156 if (config_prof && opt_prof) {
2157 return (ialloc_prof(tsd, *usize, ind, false,
2161 return (ialloc(tsd, size, ind, false, slow_path));
2164 if (config_prof && opt_prof)
2165 return (imallocx_prof(tsd, size, flags, usize, slow_path));
2167 return (imallocx_no_prof(tsd, size, flags, usize, slow_path));
2170 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2171 void JEMALLOC_NOTHROW *
2172 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2173 je_mallocx(size_t size, int flags)
2181 if (likely(!malloc_slow)) {
2182 p = imallocx_body(size, flags, &tsdn, &usize, false);
2183 ialloc_post_check(p, tsdn, usize, "mallocx", false, false);
2185 p = imallocx_body(size, flags, &tsdn, &usize, true);
2186 ialloc_post_check(p, tsdn, usize, "mallocx", false, true);
2188 JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize,
2189 MALLOCX_ZERO_GET(flags));
2196 irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize,
2197 size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
2204 if (usize <= SMALL_MAXCLASS) {
2205 p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment,
2206 zero, tcache, arena);
2209 arena_prof_promoted(tsd_tsdn(tsd), p, usize);
2211 p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero,
2218 JEMALLOC_ALWAYS_INLINE_C void *
2219 irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
2220 size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
2225 prof_tctx_t *old_tctx, *tctx;
2227 prof_active = prof_active_get_unlocked();
2228 old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr);
2229 tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
2230 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2231 p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize,
2232 alignment, zero, tcache, arena, tctx);
2234 p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero,
2237 if (unlikely(p == NULL)) {
2238 prof_alloc_rollback(tsd, tctx, false);
2242 if (p == old_ptr && alignment != 0) {
2244 * The allocation did not move, so it is possible that the size
2245 * class is smaller than would guarantee the requested
2246 * alignment, and that the alignment constraint was
2247 * serendipitously satisfied. Additionally, old_usize may not
2248 * be the same as the current usize because of in-place large
2249 * reallocation. Therefore, query the actual value of usize.
2251 *usize = isalloc(tsd_tsdn(tsd), p, config_prof);
2253 prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
2254 old_usize, old_tctx);
2259 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2260 void JEMALLOC_NOTHROW *
2261 JEMALLOC_ALLOC_SIZE(2)
2262 je_rallocx(void *ptr, size_t size, int flags)
2268 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
2269 size_t alignment = MALLOCX_ALIGN_GET(flags);
2270 bool zero = flags & MALLOCX_ZERO;
2274 assert(ptr != NULL);
2276 assert(malloc_initialized() || IS_INITIALIZER);
2277 malloc_thread_init();
2279 witness_assert_lockless(tsd_tsdn(tsd));
2281 if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
2282 unsigned arena_ind = MALLOCX_ARENA_GET(flags);
2283 arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
2284 if (unlikely(arena == NULL))
2289 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2290 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2293 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2295 tcache = tcache_get(tsd, true);
2297 old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
2298 if (config_valgrind && unlikely(in_valgrind))
2299 old_rzsize = u2rz(old_usize);
2301 if (config_prof && opt_prof) {
2302 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
2303 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
2305 p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
2306 zero, tcache, arena);
2307 if (unlikely(p == NULL))
2310 p = iralloct(tsd, ptr, old_usize, size, alignment, zero,
2312 if (unlikely(p == NULL))
2314 if (config_stats || (config_valgrind && unlikely(in_valgrind)))
2315 usize = isalloc(tsd_tsdn(tsd), p, config_prof);
2317 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2320 *tsd_thread_allocatedp_get(tsd) += usize;
2321 *tsd_thread_deallocatedp_get(tsd) += old_usize;
2323 UTRACE(ptr, size, p);
2324 JEMALLOC_VALGRIND_REALLOC(true, tsd_tsdn(tsd), p, usize, false, ptr,
2325 old_usize, old_rzsize, false, zero);
2326 witness_assert_lockless(tsd_tsdn(tsd));
2329 if (config_xmalloc && unlikely(opt_xmalloc)) {
2330 malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
2333 UTRACE(ptr, size, 0);
2334 witness_assert_lockless(tsd_tsdn(tsd));
2338 JEMALLOC_ALWAYS_INLINE_C size_t
2339 ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
2340 size_t extra, size_t alignment, bool zero)
2344 if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero))
2346 usize = isalloc(tsdn, ptr, config_prof);
2352 ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
2353 size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx)
2359 usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
2365 JEMALLOC_ALWAYS_INLINE_C size_t
2366 ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2367 size_t extra, size_t alignment, bool zero)
2369 size_t usize_max, usize;
2371 prof_tctx_t *old_tctx, *tctx;
2373 prof_active = prof_active_get_unlocked();
2374 old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr);
2376 * usize isn't knowable before ixalloc() returns when extra is non-zero.
2377 * Therefore, compute its maximum possible value and use that in
2378 * prof_alloc_prep() to decide whether to capture a backtrace.
2379 * prof_realloc() will use the actual usize to decide whether to sample.
2381 if (alignment == 0) {
2382 usize_max = s2u(size+extra);
2383 assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS);
2385 usize_max = sa2u(size+extra, alignment);
2386 if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) {
2388 * usize_max is out of range, and chances are that
2389 * allocation will fail, but use the maximum possible
2390 * value and carry on with prof_alloc_prep(), just in
2391 * case allocation succeeds.
2393 usize_max = HUGE_MAXCLASS;
2396 tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
2398 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2399 usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
2400 size, extra, alignment, zero, tctx);
2402 usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2403 extra, alignment, zero);
2405 if (usize == old_usize) {
2406 prof_alloc_rollback(tsd, tctx, false);
2409 prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
2415 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2416 je_xallocx(void *ptr, size_t size, size_t extra, int flags)
2419 size_t usize, old_usize;
2420 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
2421 size_t alignment = MALLOCX_ALIGN_GET(flags);
2422 bool zero = flags & MALLOCX_ZERO;
2424 assert(ptr != NULL);
2426 assert(SIZE_T_MAX - size >= extra);
2427 assert(malloc_initialized() || IS_INITIALIZER);
2428 malloc_thread_init();
2430 witness_assert_lockless(tsd_tsdn(tsd));
2432 old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
2435 * The API explicitly absolves itself of protecting against (size +
2436 * extra) numerical overflow, but we may need to clamp extra to avoid
2437 * exceeding HUGE_MAXCLASS.
2439 * Ordinarily, size limit checking is handled deeper down, but here we
2440 * have to check as part of (size + extra) clamping, since we need the
2441 * clamped value in the above helper functions.
2443 if (unlikely(size > HUGE_MAXCLASS)) {
2445 goto label_not_resized;
2447 if (unlikely(HUGE_MAXCLASS - size < extra))
2448 extra = HUGE_MAXCLASS - size;
2450 if (config_valgrind && unlikely(in_valgrind))
2451 old_rzsize = u2rz(old_usize);
2453 if (config_prof && opt_prof) {
2454 usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
2457 usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2458 extra, alignment, zero);
2460 if (unlikely(usize == old_usize))
2461 goto label_not_resized;
2464 *tsd_thread_allocatedp_get(tsd) += usize;
2465 *tsd_thread_deallocatedp_get(tsd) += old_usize;
2467 JEMALLOC_VALGRIND_REALLOC(false, tsd_tsdn(tsd), ptr, usize, false, ptr,
2468 old_usize, old_rzsize, false, zero);
2470 UTRACE(ptr, size, ptr);
2471 witness_assert_lockless(tsd_tsdn(tsd));
2475 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2477 je_sallocx(const void *ptr, int flags)
2482 assert(malloc_initialized() || IS_INITIALIZER);
2483 malloc_thread_init();
2485 tsdn = tsdn_fetch();
2486 witness_assert_lockless(tsdn);
2488 if (config_ivsalloc)
2489 usize = ivsalloc(tsdn, ptr, config_prof);
2491 usize = isalloc(tsdn, ptr, config_prof);
2493 witness_assert_lockless(tsdn);
2497 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2498 je_dallocx(void *ptr, int flags)
2503 assert(ptr != NULL);
2504 assert(malloc_initialized() || IS_INITIALIZER);
2507 witness_assert_lockless(tsd_tsdn(tsd));
2508 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2509 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2512 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2514 tcache = tcache_get(tsd, false);
2517 if (likely(!malloc_slow))
2518 ifree(tsd, ptr, tcache, false);
2520 ifree(tsd, ptr, tcache, true);
2521 witness_assert_lockless(tsd_tsdn(tsd));
2524 JEMALLOC_ALWAYS_INLINE_C size_t
2525 inallocx(tsdn_t *tsdn, size_t size, int flags)
2529 witness_assert_lockless(tsdn);
2531 if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
2534 usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
2535 witness_assert_lockless(tsdn);
2539 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2540 je_sdallocx(void *ptr, size_t size, int flags)
2546 assert(ptr != NULL);
2547 assert(malloc_initialized() || IS_INITIALIZER);
2549 usize = inallocx(tsd_tsdn(tsd), size, flags);
2550 assert(usize == isalloc(tsd_tsdn(tsd), ptr, config_prof));
2552 witness_assert_lockless(tsd_tsdn(tsd));
2553 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2554 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2557 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2559 tcache = tcache_get(tsd, false);
2562 if (likely(!malloc_slow))
2563 isfree(tsd, ptr, usize, tcache, false);
2565 isfree(tsd, ptr, usize, tcache, true);
2566 witness_assert_lockless(tsd_tsdn(tsd));
2569 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2571 je_nallocx(size_t size, int flags)
2578 if (unlikely(malloc_init()))
2581 tsdn = tsdn_fetch();
2582 witness_assert_lockless(tsdn);
2584 usize = inallocx(tsdn, size, flags);
2585 if (unlikely(usize > HUGE_MAXCLASS))
2588 witness_assert_lockless(tsdn);
2592 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2593 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
2599 if (unlikely(malloc_init()))
2603 witness_assert_lockless(tsd_tsdn(tsd));
2604 ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
2605 witness_assert_lockless(tsd_tsdn(tsd));
2609 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2610 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
2615 if (unlikely(malloc_init()))
2618 tsdn = tsdn_fetch();
2619 witness_assert_lockless(tsdn);
2620 ret = ctl_nametomib(tsdn, name, mibp, miblenp);
2621 witness_assert_lockless(tsdn);
2625 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2626 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
2627 void *newp, size_t newlen)
2632 if (unlikely(malloc_init()))
2636 witness_assert_lockless(tsd_tsdn(tsd));
2637 ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
2638 witness_assert_lockless(tsd_tsdn(tsd));
2642 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2643 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
2648 tsdn = tsdn_fetch();
2649 witness_assert_lockless(tsdn);
2650 stats_print(write_cb, cbopaque, opts);
2651 witness_assert_lockless(tsdn);
2654 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2655 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
2660 assert(malloc_initialized() || IS_INITIALIZER);
2661 malloc_thread_init();
2663 tsdn = tsdn_fetch();
2664 witness_assert_lockless(tsdn);
2666 if (config_ivsalloc)
2667 ret = ivsalloc(tsdn, ptr, config_prof);
2669 ret = (ptr == NULL) ? 0 : isalloc(tsdn, ptr, config_prof);
2671 witness_assert_lockless(tsdn);
2676 * End non-standard functions.
2678 /******************************************************************************/
2680 * Begin compatibility functions.
2683 #define ALLOCM_LG_ALIGN(la) (la)
2684 #define ALLOCM_ALIGN(a) (ffsl(a)-1)
2685 #define ALLOCM_ZERO ((int)0x40)
2686 #define ALLOCM_NO_MOVE ((int)0x80)
2688 #define ALLOCM_SUCCESS 0
2689 #define ALLOCM_ERR_OOM 1
2690 #define ALLOCM_ERR_NOT_MOVED 2
2693 je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
2697 assert(ptr != NULL);
2699 p = je_mallocx(size, flags);
2701 return (ALLOCM_ERR_OOM);
2703 *rsize = isalloc(tsdn_fetch(), p, config_prof);
2705 return (ALLOCM_SUCCESS);
2709 je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
2712 bool no_move = flags & ALLOCM_NO_MOVE;
2714 assert(ptr != NULL);
2715 assert(*ptr != NULL);
2717 assert(SIZE_T_MAX - size >= extra);
2720 size_t usize = je_xallocx(*ptr, size, extra, flags);
2721 ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED;
2725 void *p = je_rallocx(*ptr, size+extra, flags);
2728 ret = ALLOCM_SUCCESS;
2730 ret = ALLOCM_ERR_OOM;
2732 *rsize = isalloc(tsdn_fetch(), *ptr, config_prof);
2738 je_sallocm(const void *ptr, size_t *rsize, int flags)
2741 assert(rsize != NULL);
2742 *rsize = je_sallocx(ptr, flags);
2743 return (ALLOCM_SUCCESS);
2747 je_dallocm(void *ptr, int flags)
2750 je_dallocx(ptr, flags);
2751 return (ALLOCM_SUCCESS);
2755 je_nallocm(size_t *rsize, size_t size, int flags)
2759 usize = je_nallocx(size, flags);
2761 return (ALLOCM_ERR_OOM);
2764 return (ALLOCM_SUCCESS);
2767 #undef ALLOCM_LG_ALIGN
2770 #undef ALLOCM_NO_MOVE
2772 #undef ALLOCM_SUCCESS
2773 #undef ALLOCM_ERR_OOM
2774 #undef ALLOCM_ERR_NOT_MOVED
2777 * End compatibility functions.
2779 /******************************************************************************/
2781 * The following functions are used by threading libraries for protection of
2782 * malloc during fork().
2786 * If an application creates a thread before doing any allocation in the main
2787 * thread, then calls fork(2) in the main thread followed by memory allocation
2788 * in the child process, a race can occur that results in deadlock within the
2789 * child: the main thread may have forked while the created thread had
2790 * partially initialized the allocator. Ordinarily jemalloc prevents
2791 * fork/malloc races via the following functions it registers during
2792 * initialization using pthread_atfork(), but of course that does no good if
2793 * the allocator isn't fully initialized at fork time. The following library
2794 * constructor is a partial solution to this problem. It may still be possible
2795 * to trigger the deadlock described above, but doing so would involve forking
2796 * via a library constructor that runs before jemalloc's runs.
2798 #ifndef JEMALLOC_JET
2799 JEMALLOC_ATTR(constructor)
2801 jemalloc_constructor(void)
2808 #ifndef JEMALLOC_MUTEX_INIT_CB
2810 jemalloc_prefork(void)
2812 JEMALLOC_EXPORT void
2813 _malloc_prefork(void)
2817 unsigned i, j, narenas;
2820 #ifdef JEMALLOC_MUTEX_INIT_CB
2821 if (!malloc_initialized())
2824 assert(malloc_initialized());
2828 narenas = narenas_total_get();
2830 witness_prefork(tsd);
2831 /* Acquire all mutexes in a safe order. */
2832 ctl_prefork(tsd_tsdn(tsd));
2833 malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
2834 prof_prefork0(tsd_tsdn(tsd));
2835 for (i = 0; i < 3; i++) {
2836 for (j = 0; j < narenas; j++) {
2837 if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
2841 arena_prefork0(tsd_tsdn(tsd), arena);
2844 arena_prefork1(tsd_tsdn(tsd), arena);
2847 arena_prefork2(tsd_tsdn(tsd), arena);
2849 default: not_reached();
2854 base_prefork(tsd_tsdn(tsd));
2855 chunk_prefork(tsd_tsdn(tsd));
2856 for (i = 0; i < narenas; i++) {
2857 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
2858 arena_prefork3(tsd_tsdn(tsd), arena);
2860 prof_prefork1(tsd_tsdn(tsd));
2863 #ifndef JEMALLOC_MUTEX_INIT_CB
2865 jemalloc_postfork_parent(void)
2867 JEMALLOC_EXPORT void
2868 _malloc_postfork(void)
2872 unsigned i, narenas;
2874 #ifdef JEMALLOC_MUTEX_INIT_CB
2875 if (!malloc_initialized())
2878 assert(malloc_initialized());
2882 witness_postfork_parent(tsd);
2883 /* Release all mutexes, now that fork() has completed. */
2884 chunk_postfork_parent(tsd_tsdn(tsd));
2885 base_postfork_parent(tsd_tsdn(tsd));
2886 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
2889 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
2890 arena_postfork_parent(tsd_tsdn(tsd), arena);
2892 prof_postfork_parent(tsd_tsdn(tsd));
2893 malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
2894 ctl_postfork_parent(tsd_tsdn(tsd));
2898 jemalloc_postfork_child(void)
2901 unsigned i, narenas;
2903 assert(malloc_initialized());
2907 witness_postfork_child(tsd);
2908 /* Release all mutexes, now that fork() has completed. */
2909 chunk_postfork_child(tsd_tsdn(tsd));
2910 base_postfork_child(tsd_tsdn(tsd));
2911 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
2914 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
2915 arena_postfork_child(tsd_tsdn(tsd), arena);
2917 prof_postfork_child(tsd_tsdn(tsd));
2918 malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
2919 ctl_postfork_child(tsd_tsdn(tsd));
2923 _malloc_first_thread(void)
2926 (void)malloc_mutex_first_thread();
2929 /******************************************************************************/