2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 malloc_tsd_data(, arenas, arena_t *, NULL)
8 malloc_tsd_data(, thread_allocated, thread_allocated_t,
9 THREAD_ALLOCATED_INITIALIZER)
11 /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */
12 const char *__malloc_options_1_0 = NULL;
13 __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
15 /* Runtime configuration options. */
16 const char *je_malloc_conf;
25 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
31 size_t opt_quarantine = ZU(0);
32 bool opt_redzone = false;
33 bool opt_utrace = false;
34 bool opt_valgrind = false;
35 bool opt_xmalloc = false;
36 bool opt_zero = false;
37 size_t opt_narenas = 0;
41 malloc_mutex_t arenas_lock;
43 unsigned narenas_total;
44 unsigned narenas_auto;
46 /* Set to true once the allocator has been initialized. */
47 static bool malloc_initialized = false;
49 #ifdef JEMALLOC_THREADED_INIT
50 /* Used to let the initializing thread recursively allocate. */
51 # define NO_INITIALIZER ((unsigned long)0)
52 # define INITIALIZER pthread_self()
53 # define IS_INITIALIZER (malloc_initializer == pthread_self())
54 static pthread_t malloc_initializer = NO_INITIALIZER;
56 # define NO_INITIALIZER false
57 # define INITIALIZER true
58 # define IS_INITIALIZER malloc_initializer
59 static bool malloc_initializer = NO_INITIALIZER;
62 /* Used to avoid initialization races. */
64 static malloc_mutex_t init_lock;
66 JEMALLOC_ATTR(constructor)
71 malloc_mutex_init(&init_lock);
75 # pragma section(".CRT$XCU", read)
76 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
77 static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
81 static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
85 void *p; /* Input pointer (as in realloc(p, s)). */
86 size_t s; /* Request size. */
87 void *r; /* Result pointer. */
90 #ifdef JEMALLOC_UTRACE
91 # define UTRACE(a, b, c) do { \
93 int utrace_serrno = errno; \
98 utrace(&ut, sizeof(ut)); \
99 errno = utrace_serrno; \
103 # define UTRACE(a, b, c)
106 /******************************************************************************/
108 * Function prototypes for static functions that are referenced prior to
112 static bool malloc_init_hard(void);
114 /******************************************************************************/
116 * Begin miscellaneous support functions.
119 /* Create a new arena and insert it into the arenas array at index ind. */
121 arenas_extend(unsigned ind)
125 ret = (arena_t *)base_alloc(sizeof(arena_t));
126 if (ret != NULL && arena_new(ret, ind) == false) {
130 /* Only reached if there is an OOM error. */
133 * OOM here is quite inconvenient to propagate, since dealing with it
134 * would require a check for failure in the fast path. Instead, punt
135 * by using arenas[0]. In practice, this is an extremely unlikely
138 malloc_write("<jemalloc>: Error initializing arena\n");
145 /* Slow path, called only by choose_arena(). */
147 choose_arena_hard(void)
151 if (narenas_auto > 1) {
152 unsigned i, choose, first_null;
155 first_null = narenas_auto;
156 malloc_mutex_lock(&arenas_lock);
157 assert(arenas[0] != NULL);
158 for (i = 1; i < narenas_auto; i++) {
159 if (arenas[i] != NULL) {
161 * Choose the first arena that has the lowest
162 * number of threads assigned to it.
164 if (arenas[i]->nthreads <
165 arenas[choose]->nthreads)
167 } else if (first_null == narenas_auto) {
169 * Record the index of the first uninitialized
170 * arena, in case all extant arenas are in use.
172 * NB: It is possible for there to be
173 * discontinuities in terms of initialized
174 * versus uninitialized arenas, due to the
175 * "thread.arena" mallctl.
181 if (arenas[choose]->nthreads == 0
182 || first_null == narenas_auto) {
184 * Use an unloaded arena, or the least loaded arena if
185 * all arenas are already initialized.
187 ret = arenas[choose];
189 /* Initialize a new arena. */
190 ret = arenas_extend(first_null);
193 malloc_mutex_unlock(&arenas_lock);
196 malloc_mutex_lock(&arenas_lock);
198 malloc_mutex_unlock(&arenas_lock);
201 arenas_tsd_set(&ret);
207 stats_print_atexit(void)
210 if (config_tcache && config_stats) {
214 * Merge stats from extant threads. This is racy, since
215 * individual threads do not lock when recording tcache stats
216 * events. As a consequence, the final stats may be slightly
217 * out of date by the time they are reported, if other threads
218 * continue to allocate.
220 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
221 arena_t *arena = arenas[i];
226 * tcache_stats_merge() locks bins, so if any
227 * code is introduced that acquires both arena
228 * and bin locks in the opposite order,
229 * deadlocks may result.
231 malloc_mutex_lock(&arena->lock);
232 ql_foreach(tcache, &arena->tcache_ql, link) {
233 tcache_stats_merge(tcache, arena);
235 malloc_mutex_unlock(&arena->lock);
239 je_malloc_stats_print(NULL, NULL, NULL);
243 * End miscellaneous support functions.
245 /******************************************************************************/
247 * Begin initialization functions.
258 result = si.dwNumberOfProcessors;
260 result = sysconf(_SC_NPROCESSORS_ONLN);
262 return ((result == -1) ? 1 : (unsigned)result);
266 arenas_cleanup(void *arg)
268 arena_t *arena = *(arena_t **)arg;
270 malloc_mutex_lock(&arenas_lock);
272 malloc_mutex_unlock(&arenas_lock);
275 JEMALLOC_ALWAYS_INLINE_C void
276 malloc_thread_init(void)
280 * TSD initialization can't be safely done as a side effect of
281 * deallocation, because it is possible for a thread to do nothing but
282 * deallocate its TLS data via free(), in which case writing to TLS
283 * would cause write-after-free memory corruption. The quarantine
284 * facility *only* gets used as a side effect of deallocation, so make
285 * a best effort attempt at initializing its TSD by hooking all
288 if (config_fill && opt_quarantine)
289 quarantine_alloc_hook();
292 JEMALLOC_ALWAYS_INLINE_C bool
296 if (malloc_initialized == false && malloc_init_hard())
298 malloc_thread_init();
304 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
305 char const **v_p, size_t *vlen_p)
308 const char *opts = *opts_p;
312 for (accept = false; accept == false;) {
314 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
315 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
316 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
317 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
319 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
320 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
321 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
322 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
324 case '0': case '1': case '2': case '3': case '4': case '5':
325 case '6': case '7': case '8': case '9':
331 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
336 if (opts != *opts_p) {
337 malloc_write("<jemalloc>: Conf string ends "
342 malloc_write("<jemalloc>: Malformed conf string\n");
347 for (accept = false; accept == false;) {
352 * Look ahead one character here, because the next time
353 * this function is called, it will assume that end of
354 * input has been cleanly reached if no input remains,
355 * but we have optimistically already consumed the
356 * comma if one exists.
359 malloc_write("<jemalloc>: Conf string ends "
362 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
366 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
380 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
384 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
389 malloc_conf_init(void)
392 char buf[PATH_MAX + 1];
393 const char *opts, *k, *v;
397 * Automatically configure valgrind before processing options. The
398 * valgrind option remains in jemalloc 3.x for compatibility reasons.
400 if (config_valgrind) {
401 opt_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
402 if (config_fill && opt_valgrind) {
404 assert(opt_zero == false);
405 opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
408 if (config_tcache && opt_valgrind)
412 for (i = 0; i < 3; i++) {
413 /* Get runtime configuration. */
416 if (je_malloc_conf != NULL) {
418 * Use options that were compiled into the
421 opts = je_malloc_conf;
423 /* No configuration specified. */
431 int saved_errno = errno;
432 const char *linkname =
433 # ifdef JEMALLOC_PREFIX
434 "/etc/"JEMALLOC_PREFIX"malloc.conf"
441 * Try to use the contents of the "/etc/malloc.conf"
442 * symbolic link's name.
444 linklen = readlink(linkname, buf, sizeof(buf) - 1);
446 /* No configuration specified. */
449 set_errno(saved_errno);
456 const char *envname =
457 #ifdef JEMALLOC_PREFIX
458 JEMALLOC_CPREFIX"MALLOC_CONF"
464 if (issetugid() == 0 && (opts = getenv(envname)) !=
467 * Do nothing; opts is already initialized to
468 * the value of the MALLOC_CONF environment
472 /* No configuration specified. */
483 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
485 #define CONF_HANDLE_BOOL(o, n) \
486 if (sizeof(n)-1 == klen && strncmp(n, k, \
488 if (strncmp("true", v, vlen) == 0 && \
489 vlen == sizeof("true")-1) \
491 else if (strncmp("false", v, vlen) == \
492 0 && vlen == sizeof("false")-1) \
496 "Invalid conf value", \
501 #define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
502 if (sizeof(n)-1 == klen && strncmp(n, k, \
508 um = malloc_strtoumax(v, &end, 0); \
509 if (get_errno() != 0 || (uintptr_t)end -\
510 (uintptr_t)v != vlen) { \
512 "Invalid conf value", \
515 if (min != 0 && um < min) \
522 if ((min != 0 && um < min) || \
533 #define CONF_HANDLE_SSIZE_T(o, n, min, max) \
534 if (sizeof(n)-1 == klen && strncmp(n, k, \
540 l = strtol(v, &end, 0); \
541 if (get_errno() != 0 || (uintptr_t)end -\
542 (uintptr_t)v != vlen) { \
544 "Invalid conf value", \
546 } else if (l < (ssize_t)min || l > \
549 "Out-of-range conf value", \
555 #define CONF_HANDLE_CHAR_P(o, n, d) \
556 if (sizeof(n)-1 == klen && strncmp(n, k, \
558 size_t cpylen = (vlen <= \
559 sizeof(o)-1) ? vlen : \
561 strncpy(o, v, cpylen); \
566 CONF_HANDLE_BOOL(opt_abort, "abort")
568 * Chunks always require at least one header page, plus
569 * one data page in the absence of redzones, or three
570 * pages in the presence of redzones. In order to
571 * simplify options processing, fix the limit based on
574 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
575 (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1,
577 if (strncmp("dss", k, klen) == 0) {
580 for (i = 0; i < dss_prec_limit; i++) {
581 if (strncmp(dss_prec_names[i], v, vlen)
583 if (chunk_dss_prec_set(i)) {
595 if (match == false) {
596 malloc_conf_error("Invalid conf value",
601 CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
603 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
604 -1, (sizeof(size_t) << 3) - 1)
605 CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
607 CONF_HANDLE_BOOL(opt_junk, "junk")
608 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
609 0, SIZE_T_MAX, false)
610 CONF_HANDLE_BOOL(opt_redzone, "redzone")
611 CONF_HANDLE_BOOL(opt_zero, "zero")
614 CONF_HANDLE_BOOL(opt_utrace, "utrace")
616 if (config_valgrind) {
617 CONF_HANDLE_BOOL(opt_valgrind, "valgrind")
619 if (config_xmalloc) {
620 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
623 CONF_HANDLE_BOOL(opt_tcache, "tcache")
624 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
626 (sizeof(size_t) << 3) - 1)
629 CONF_HANDLE_BOOL(opt_prof, "prof")
630 CONF_HANDLE_CHAR_P(opt_prof_prefix,
631 "prof_prefix", "jeprof")
632 CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
633 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
635 (sizeof(uint64_t) << 3) - 1)
636 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
637 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
638 "lg_prof_interval", -1,
639 (sizeof(uint64_t) << 3) - 1)
640 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
641 CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
642 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
644 malloc_conf_error("Invalid conf pair", k, klen, v,
646 #undef CONF_HANDLE_BOOL
647 #undef CONF_HANDLE_SIZE_T
648 #undef CONF_HANDLE_SSIZE_T
649 #undef CONF_HANDLE_CHAR_P
655 malloc_init_hard(void)
657 arena_t *init_arenas[1];
659 malloc_mutex_lock(&init_lock);
660 if (malloc_initialized || IS_INITIALIZER) {
662 * Another thread initialized the allocator before this one
663 * acquired init_lock, or this thread is the initializing
664 * thread, and it is recursively allocating.
666 malloc_mutex_unlock(&init_lock);
669 #ifdef JEMALLOC_THREADED_INIT
670 if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
671 /* Busy-wait until the initializing thread completes. */
673 malloc_mutex_unlock(&init_lock);
675 malloc_mutex_lock(&init_lock);
676 } while (malloc_initialized == false);
677 malloc_mutex_unlock(&init_lock);
681 malloc_initializer = INITIALIZER;
689 if (opt_stats_print) {
690 /* Print statistics at exit. */
691 if (atexit(stats_print_atexit) != 0) {
692 malloc_write("<jemalloc>: Error in atexit()\n");
699 malloc_mutex_unlock(&init_lock);
704 malloc_mutex_unlock(&init_lock);
709 malloc_mutex_unlock(&init_lock);
718 if (config_tcache && tcache_boot0()) {
719 malloc_mutex_unlock(&init_lock);
724 malloc_mutex_unlock(&init_lock);
728 if (malloc_mutex_init(&arenas_lock)) {
729 malloc_mutex_unlock(&init_lock);
734 * Create enough scaffolding to allow recursive allocation in
737 narenas_total = narenas_auto = 1;
738 arenas = init_arenas;
739 memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
742 * Initialize one arena here. The rest are lazily created in
743 * choose_arena_hard().
746 if (arenas[0] == NULL) {
747 malloc_mutex_unlock(&init_lock);
751 /* Initialize allocation counters before any allocations can occur. */
752 if (config_stats && thread_allocated_tsd_boot()) {
753 malloc_mutex_unlock(&init_lock);
757 if (arenas_tsd_boot()) {
758 malloc_mutex_unlock(&init_lock);
762 if (config_tcache && tcache_boot1()) {
763 malloc_mutex_unlock(&init_lock);
767 if (config_fill && quarantine_boot()) {
768 malloc_mutex_unlock(&init_lock);
772 if (config_prof && prof_boot2()) {
773 malloc_mutex_unlock(&init_lock);
777 malloc_mutex_unlock(&init_lock);
778 /**********************************************************************/
779 /* Recursive allocation may follow. */
781 ncpus = malloc_ncpus();
783 #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
785 /* LinuxThreads's pthread_atfork() allocates. */
786 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
787 jemalloc_postfork_child) != 0) {
788 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
794 /* Done recursively allocating. */
795 /**********************************************************************/
796 malloc_mutex_lock(&init_lock);
799 malloc_mutex_unlock(&init_lock);
803 if (opt_narenas == 0) {
805 * For SMP systems, create more than one arena per CPU by
809 opt_narenas = ncpus << 2;
813 narenas_auto = opt_narenas;
815 * Make sure that the arenas array can be allocated. In practice, this
816 * limit is enough to allow the allocator to function, but the ctl
817 * machinery will fail to allocate memory at far lower limits.
819 if (narenas_auto > chunksize / sizeof(arena_t *)) {
820 narenas_auto = chunksize / sizeof(arena_t *);
821 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
824 narenas_total = narenas_auto;
826 /* Allocate and initialize arenas. */
827 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
828 if (arenas == NULL) {
829 malloc_mutex_unlock(&init_lock);
833 * Zero the array. In practice, this should always be pre-zeroed,
834 * since it was just mmap()ed, but let's be sure.
836 memset(arenas, 0, sizeof(arena_t *) * narenas_total);
837 /* Copy the pointer to the one arena that was already initialized. */
838 arenas[0] = init_arenas[0];
840 malloc_initialized = true;
841 malloc_mutex_unlock(&init_lock);
847 * End initialization functions.
849 /******************************************************************************/
851 * Begin malloc(3)-compatible functions.
855 imalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt)
861 if (prof_promote && usize <= SMALL_MAXCLASS) {
862 p = imalloc(SMALL_MAXCLASS+1);
865 arena_prof_promoted(p, usize);
872 JEMALLOC_ALWAYS_INLINE_C void *
873 imalloc_prof(size_t usize, prof_thr_cnt_t *cnt)
877 if ((uintptr_t)cnt != (uintptr_t)1U)
878 p = imalloc_prof_sample(usize, cnt);
883 prof_malloc(p, usize, cnt);
889 * MALLOC_BODY() is a macro rather than a function because its contents are in
890 * the fast path, but inlining would cause reliability issues when determining
891 * how many frames to discard from heap profiling backtraces.
893 #define MALLOC_BODY(ret, size, usize) do { \
897 if (config_prof && opt_prof) { \
898 prof_thr_cnt_t *cnt; \
902 * Call PROF_ALLOC_PREP() here rather than in \
903 * imalloc_prof() so that imalloc_prof() can be \
904 * inlined without introducing uncertainty \
905 * about the number of backtrace frames to \
906 * ignore. imalloc_prof() is in the fast path \
907 * when heap profiling is enabled, so inlining \
908 * is critical to performance. (For \
909 * consistency all callers of PROF_ALLOC_PREP() \
910 * are structured similarly, even though e.g. \
911 * realloc() isn't called enough for inlining \
914 PROF_ALLOC_PREP(1, usize, cnt); \
915 ret = imalloc_prof(usize, cnt); \
917 if (config_stats || (config_valgrind && \
920 ret = imalloc(size); \
926 je_malloc(size_t size)
929 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
934 MALLOC_BODY(ret, size, usize);
937 if (config_xmalloc && opt_xmalloc) {
938 malloc_write("<jemalloc>: Error in malloc(): "
944 if (config_stats && ret != NULL) {
945 assert(usize == isalloc(ret, config_prof));
946 thread_allocated_tsd_get()->allocated += usize;
948 UTRACE(0, size, ret);
949 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
954 imemalign_prof_sample(size_t alignment, size_t usize, prof_thr_cnt_t *cnt)
960 if (prof_promote && usize <= SMALL_MAXCLASS) {
961 assert(sa2u(SMALL_MAXCLASS+1, alignment) != 0);
962 p = ipalloc(sa2u(SMALL_MAXCLASS+1, alignment), alignment,
966 arena_prof_promoted(p, usize);
968 p = ipalloc(usize, alignment, false);
973 JEMALLOC_ALWAYS_INLINE_C void *
974 imemalign_prof(size_t alignment, size_t usize, prof_thr_cnt_t *cnt)
978 if ((uintptr_t)cnt != (uintptr_t)1U)
979 p = imemalign_prof_sample(alignment, usize, cnt);
981 p = ipalloc(usize, alignment, false);
984 prof_malloc(p, usize, cnt);
989 JEMALLOC_ATTR(nonnull(1))
992 * Avoid any uncertainty as to how many backtrace frames to ignore in
998 imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
1004 assert(min_alignment != 0);
1006 if (malloc_init()) {
1013 /* Make sure that alignment is a large enough power of 2. */
1014 if (((alignment - 1) & alignment) != 0
1015 || (alignment < min_alignment)) {
1016 if (config_xmalloc && opt_xmalloc) {
1017 malloc_write("<jemalloc>: Error allocating "
1018 "aligned memory: invalid alignment\n");
1026 usize = sa2u(size, alignment);
1032 if (config_prof && opt_prof) {
1033 prof_thr_cnt_t *cnt;
1035 PROF_ALLOC_PREP(2, usize, cnt);
1036 result = imemalign_prof(alignment, usize, cnt);
1038 result = ipalloc(usize, alignment, false);
1046 if (config_stats && result != NULL) {
1047 assert(usize == isalloc(result, config_prof));
1048 thread_allocated_tsd_get()->allocated += usize;
1050 UTRACE(0, size, result);
1053 assert(result == NULL);
1054 if (config_xmalloc && opt_xmalloc) {
1055 malloc_write("<jemalloc>: Error allocating aligned memory: "
1064 je_posix_memalign(void **memptr, size_t alignment, size_t size)
1066 int ret = imemalign(memptr, alignment, size, sizeof(void *));
1067 JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
1068 config_prof), false);
1073 je_aligned_alloc(size_t alignment, size_t size)
1078 if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
1082 JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
1088 icalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt)
1094 if (prof_promote && usize <= SMALL_MAXCLASS) {
1095 p = icalloc(SMALL_MAXCLASS+1);
1098 arena_prof_promoted(p, usize);
1105 JEMALLOC_ALWAYS_INLINE_C void *
1106 icalloc_prof(size_t usize, prof_thr_cnt_t *cnt)
1110 if ((uintptr_t)cnt != (uintptr_t)1U)
1111 p = icalloc_prof_sample(usize, cnt);
1116 prof_malloc(p, usize, cnt);
1122 je_calloc(size_t num, size_t size)
1126 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1128 if (malloc_init()) {
1134 num_size = num * size;
1135 if (num_size == 0) {
1136 if (num == 0 || size == 0)
1143 * Try to avoid division here. We know that it isn't possible to
1144 * overflow during multiplication if neither operand uses any of the
1145 * most significant half of the bits in a size_t.
1147 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
1148 && (num_size / size != num)) {
1149 /* size_t overflow. */
1154 if (config_prof && opt_prof) {
1155 prof_thr_cnt_t *cnt;
1157 usize = s2u(num_size);
1158 PROF_ALLOC_PREP(1, usize, cnt);
1159 ret = icalloc_prof(usize, cnt);
1161 if (config_stats || (config_valgrind && opt_valgrind))
1162 usize = s2u(num_size);
1163 ret = icalloc(num_size);
1168 if (config_xmalloc && opt_xmalloc) {
1169 malloc_write("<jemalloc>: Error in calloc(): out of "
1175 if (config_stats && ret != NULL) {
1176 assert(usize == isalloc(ret, config_prof));
1177 thread_allocated_tsd_get()->allocated += usize;
1179 UTRACE(0, num_size, ret);
1180 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
1185 irealloc_prof_sample(void *oldptr, size_t usize, prof_thr_cnt_t *cnt)
1191 if (prof_promote && usize <= SMALL_MAXCLASS) {
1192 p = iralloc(oldptr, SMALL_MAXCLASS+1, 0, 0, false);
1195 arena_prof_promoted(p, usize);
1197 p = iralloc(oldptr, usize, 0, 0, false);
1202 JEMALLOC_ALWAYS_INLINE_C void *
1203 irealloc_prof(void *oldptr, size_t old_usize, size_t usize, prof_thr_cnt_t *cnt)
1206 prof_ctx_t *old_ctx;
1208 old_ctx = prof_ctx_get(oldptr);
1209 if ((uintptr_t)cnt != (uintptr_t)1U)
1210 p = irealloc_prof_sample(oldptr, usize, cnt);
1212 p = iralloc(oldptr, usize, 0, 0, false);
1215 prof_realloc(p, usize, cnt, old_usize, old_ctx);
1220 JEMALLOC_INLINE_C void
1224 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1226 assert(ptr != NULL);
1227 assert(malloc_initialized || IS_INITIALIZER);
1229 if (config_prof && opt_prof) {
1230 usize = isalloc(ptr, config_prof);
1231 prof_free(ptr, usize);
1232 } else if (config_stats || config_valgrind)
1233 usize = isalloc(ptr, config_prof);
1235 thread_allocated_tsd_get()->deallocated += usize;
1236 if (config_valgrind && opt_valgrind)
1239 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1243 je_realloc(void *ptr, size_t size)
1246 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1247 size_t old_usize = 0;
1248 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1252 /* realloc(ptr, 0) is equivalent to free(ptr). */
1261 assert(malloc_initialized || IS_INITIALIZER);
1262 malloc_thread_init();
1264 if ((config_prof && opt_prof) || config_stats ||
1265 (config_valgrind && opt_valgrind))
1266 old_usize = isalloc(ptr, config_prof);
1267 if (config_valgrind && opt_valgrind)
1268 old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
1270 if (config_prof && opt_prof) {
1271 prof_thr_cnt_t *cnt;
1274 PROF_ALLOC_PREP(1, usize, cnt);
1275 ret = irealloc_prof(ptr, old_usize, usize, cnt);
1277 if (config_stats || (config_valgrind && opt_valgrind))
1279 ret = iralloc(ptr, size, 0, 0, false);
1282 /* realloc(NULL, size) is equivalent to malloc(size). */
1283 MALLOC_BODY(ret, size, usize);
1287 if (config_xmalloc && opt_xmalloc) {
1288 malloc_write("<jemalloc>: Error in realloc(): "
1294 if (config_stats && ret != NULL) {
1295 thread_allocated_t *ta;
1296 assert(usize == isalloc(ret, config_prof));
1297 ta = thread_allocated_tsd_get();
1298 ta->allocated += usize;
1299 ta->deallocated += old_usize;
1301 UTRACE(ptr, size, ret);
1302 JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_usize, old_rzsize,
1317 * End malloc(3)-compatible functions.
1319 /******************************************************************************/
1321 * Begin non-standard override functions.
1324 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
1326 je_memalign(size_t alignment, size_t size)
1328 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1329 imemalign(&ret, alignment, size, 1);
1330 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1335 #ifdef JEMALLOC_OVERRIDE_VALLOC
1337 je_valloc(size_t size)
1339 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1340 imemalign(&ret, PAGE, size, 1);
1341 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1347 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1348 * #define je_malloc malloc
1350 #define malloc_is_malloc 1
1351 #define is_malloc_(a) malloc_is_ ## a
1352 #define is_malloc(a) is_malloc_(a)
1354 #if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
1356 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1357 * to inconsistently reference libc's malloc(3)-compatible functions
1358 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1360 * These definitions interpose hooks in glibc. The functions are actually
1361 * passed an extra argument for the caller return address, which will be
1364 JEMALLOC_EXPORT void (* __free_hook)(void *ptr) = je_free;
1365 JEMALLOC_EXPORT void *(* __malloc_hook)(size_t size) = je_malloc;
1366 JEMALLOC_EXPORT void *(* __realloc_hook)(void *ptr, size_t size) = je_realloc;
1367 JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) =
1372 * End non-standard override functions.
1374 /******************************************************************************/
1376 * Begin non-standard functions.
1379 JEMALLOC_ALWAYS_INLINE_C void *
1380 imallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
1384 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
1388 return (ipalloct(usize, alignment, zero, try_tcache, arena));
1390 return (icalloct(usize, try_tcache, arena));
1392 return (imalloct(usize, try_tcache, arena));
1396 imallocx_prof_sample(size_t usize, size_t alignment, bool zero, bool try_tcache,
1397 arena_t *arena, prof_thr_cnt_t *cnt)
1403 if (prof_promote && usize <= SMALL_MAXCLASS) {
1404 size_t usize_promoted = (alignment == 0) ?
1405 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, alignment);
1406 assert(usize_promoted != 0);
1407 p = imallocx(usize_promoted, alignment, zero, try_tcache,
1411 arena_prof_promoted(p, usize);
1413 p = imallocx(usize, alignment, zero, try_tcache, arena);
1418 JEMALLOC_ALWAYS_INLINE_C void *
1419 imallocx_prof(size_t usize, size_t alignment, bool zero, bool try_tcache,
1420 arena_t *arena, prof_thr_cnt_t *cnt)
1424 if ((uintptr_t)cnt != (uintptr_t)1U) {
1425 p = imallocx_prof_sample(usize, alignment, zero, try_tcache,
1428 p = imallocx(usize, alignment, zero, try_tcache, arena);
1431 prof_malloc(p, usize, cnt);
1437 je_mallocx(size_t size, int flags)
1441 size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
1443 bool zero = flags & MALLOCX_ZERO;
1444 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1453 if (arena_ind != UINT_MAX) {
1454 arena = arenas[arena_ind];
1461 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1464 if (config_prof && opt_prof) {
1465 prof_thr_cnt_t *cnt;
1467 PROF_ALLOC_PREP(1, usize, cnt);
1468 p = imallocx_prof(usize, alignment, zero, try_tcache, arena,
1471 p = imallocx(usize, alignment, zero, try_tcache, arena);
1476 assert(usize == isalloc(p, config_prof));
1477 thread_allocated_tsd_get()->allocated += usize;
1480 JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
1483 if (config_xmalloc && opt_xmalloc) {
1484 malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
1492 irallocx_prof_sample(void *oldptr, size_t size, size_t alignment, size_t usize,
1493 bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena,
1494 prof_thr_cnt_t *cnt)
1500 if (prof_promote && usize <= SMALL_MAXCLASS) {
1501 p = iralloct(oldptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1502 size) ? 0 : size - (SMALL_MAXCLASS+1), alignment, zero,
1503 try_tcache_alloc, try_tcache_dalloc, arena);
1506 arena_prof_promoted(p, usize);
1508 p = iralloct(oldptr, size, 0, alignment, zero,
1509 try_tcache_alloc, try_tcache_dalloc, arena);
1515 JEMALLOC_ALWAYS_INLINE_C void *
1516 irallocx_prof(void *oldptr, size_t old_usize, size_t size, size_t alignment,
1517 size_t *usize, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
1518 arena_t *arena, prof_thr_cnt_t *cnt)
1521 prof_ctx_t *old_ctx;
1523 old_ctx = prof_ctx_get(oldptr);
1524 if ((uintptr_t)cnt != (uintptr_t)1U)
1525 p = irallocx_prof_sample(oldptr, size, alignment, *usize, zero,
1526 try_tcache_alloc, try_tcache_dalloc, arena, cnt);
1528 p = iralloct(oldptr, size, 0, alignment, zero,
1529 try_tcache_alloc, try_tcache_dalloc, arena);
1534 if (p == oldptr && alignment != 0) {
1536 * The allocation did not move, so it is possible that the size
1537 * class is smaller than would guarantee the requested
1538 * alignment, and that the alignment constraint was
1539 * serendipitously satisfied. Additionally, old_usize may not
1540 * be the same as the current usize because of in-place large
1541 * reallocation. Therefore, query the actual value of usize.
1543 *usize = isalloc(p, config_prof);
1545 prof_realloc(p, *usize, cnt, old_usize, old_ctx);
1551 je_rallocx(void *ptr, size_t size, int flags)
1554 size_t usize, old_usize;
1555 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1556 size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
1558 bool zero = flags & MALLOCX_ZERO;
1559 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1560 bool try_tcache_alloc, try_tcache_dalloc;
1563 assert(ptr != NULL);
1565 assert(malloc_initialized || IS_INITIALIZER);
1566 malloc_thread_init();
1568 if (arena_ind != UINT_MAX) {
1569 arena_chunk_t *chunk;
1570 try_tcache_alloc = false;
1571 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1572 try_tcache_dalloc = (chunk == ptr || chunk->arena !=
1574 arena = arenas[arena_ind];
1576 try_tcache_alloc = true;
1577 try_tcache_dalloc = true;
1581 if ((config_prof && opt_prof) || config_stats ||
1582 (config_valgrind && opt_valgrind))
1583 old_usize = isalloc(ptr, config_prof);
1584 if (config_valgrind && opt_valgrind)
1585 old_rzsize = u2rz(old_usize);
1587 if (config_prof && opt_prof) {
1588 prof_thr_cnt_t *cnt;
1590 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1592 PROF_ALLOC_PREP(1, usize, cnt);
1593 p = irallocx_prof(ptr, old_usize, size, alignment, &usize, zero,
1594 try_tcache_alloc, try_tcache_dalloc, arena, cnt);
1598 p = iralloct(ptr, size, 0, alignment, zero, try_tcache_alloc,
1599 try_tcache_dalloc, arena);
1602 if (config_stats || (config_valgrind && opt_valgrind))
1603 usize = isalloc(p, config_prof);
1607 thread_allocated_t *ta;
1608 ta = thread_allocated_tsd_get();
1609 ta->allocated += usize;
1610 ta->deallocated += old_usize;
1612 UTRACE(ptr, size, p);
1613 JEMALLOC_VALGRIND_REALLOC(p, usize, ptr, old_usize, old_rzsize, zero);
1616 if (config_xmalloc && opt_xmalloc) {
1617 malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
1620 UTRACE(ptr, size, 0);
1624 JEMALLOC_ALWAYS_INLINE_C size_t
1625 ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
1626 size_t alignment, bool zero, arena_t *arena)
1630 if (ixalloc(ptr, size, extra, alignment, zero))
1632 usize = isalloc(ptr, config_prof);
1638 ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
1639 size_t alignment, size_t max_usize, bool zero, arena_t *arena,
1640 prof_thr_cnt_t *cnt)
1646 /* Use minimum usize to determine whether promotion may happen. */
1647 if (prof_promote && ((alignment == 0) ? s2u(size) : sa2u(size,
1648 alignment)) <= SMALL_MAXCLASS) {
1649 if (ixalloc(ptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1650 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
1653 usize = isalloc(ptr, config_prof);
1654 if (max_usize < PAGE)
1655 arena_prof_promoted(ptr, usize);
1657 usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
1664 JEMALLOC_ALWAYS_INLINE_C size_t
1665 ixallocx_prof(void *ptr, size_t old_usize, size_t size, size_t extra,
1666 size_t alignment, size_t max_usize, bool zero, arena_t *arena,
1667 prof_thr_cnt_t *cnt)
1670 prof_ctx_t *old_ctx;
1672 old_ctx = prof_ctx_get(ptr);
1673 if ((uintptr_t)cnt != (uintptr_t)1U) {
1674 usize = ixallocx_prof_sample(ptr, old_usize, size, extra,
1675 alignment, zero, max_usize, arena, cnt);
1677 usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
1680 if (usize == old_usize)
1682 prof_realloc(ptr, usize, cnt, old_usize, old_ctx);
1688 je_xallocx(void *ptr, size_t size, size_t extra, int flags)
1690 size_t usize, old_usize;
1691 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1692 size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
1694 bool zero = flags & MALLOCX_ZERO;
1695 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1698 assert(ptr != NULL);
1700 assert(SIZE_T_MAX - size >= extra);
1701 assert(malloc_initialized || IS_INITIALIZER);
1702 malloc_thread_init();
1704 if (arena_ind != UINT_MAX)
1705 arena = arenas[arena_ind];
1709 old_usize = isalloc(ptr, config_prof);
1710 if (config_valgrind && opt_valgrind)
1711 old_rzsize = u2rz(old_usize);
1713 if (config_prof && opt_prof) {
1714 prof_thr_cnt_t *cnt;
1716 * usize isn't knowable before ixalloc() returns when extra is
1717 * non-zero. Therefore, compute its maximum possible value and
1718 * use that in PROF_ALLOC_PREP() to decide whether to capture a
1719 * backtrace. prof_realloc() will use the actual usize to
1720 * decide whether to sample.
1722 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1723 sa2u(size+extra, alignment);
1724 PROF_ALLOC_PREP(1, max_usize, cnt);
1725 usize = ixallocx_prof(ptr, old_usize, size, extra, alignment,
1726 max_usize, zero, arena, cnt);
1728 usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
1731 if (usize == old_usize)
1732 goto label_not_resized;
1735 thread_allocated_t *ta;
1736 ta = thread_allocated_tsd_get();
1737 ta->allocated += usize;
1738 ta->deallocated += old_usize;
1740 JEMALLOC_VALGRIND_REALLOC(ptr, usize, ptr, old_usize, old_rzsize, zero);
1742 UTRACE(ptr, size, ptr);
1747 je_sallocx(const void *ptr, int flags)
1751 assert(malloc_initialized || IS_INITIALIZER);
1752 malloc_thread_init();
1754 if (config_ivsalloc)
1755 usize = ivsalloc(ptr, config_prof);
1757 assert(ptr != NULL);
1758 usize = isalloc(ptr, config_prof);
1765 je_dallocx(void *ptr, int flags)
1768 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1769 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1772 assert(ptr != NULL);
1773 assert(malloc_initialized || IS_INITIALIZER);
1775 if (arena_ind != UINT_MAX) {
1776 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1777 try_tcache = (chunk == ptr || chunk->arena !=
1783 if (config_stats || config_valgrind)
1784 usize = isalloc(ptr, config_prof);
1785 if (config_prof && opt_prof) {
1786 if (config_stats == false && config_valgrind == false)
1787 usize = isalloc(ptr, config_prof);
1788 prof_free(ptr, usize);
1791 thread_allocated_tsd_get()->deallocated += usize;
1792 if (config_valgrind && opt_valgrind)
1794 iqalloct(ptr, try_tcache);
1795 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1799 je_nallocx(size_t size, int flags)
1802 size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
1810 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1816 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
1823 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1827 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
1833 return (ctl_nametomib(name, mibp, miblenp));
1837 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1838 void *newp, size_t newlen)
1844 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1848 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1852 stats_print(write_cb, cbopaque, opts);
1856 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
1860 assert(malloc_initialized || IS_INITIALIZER);
1861 malloc_thread_init();
1863 if (config_ivsalloc)
1864 ret = ivsalloc(ptr, config_prof);
1866 ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
1872 * End non-standard functions.
1874 /******************************************************************************/
1876 * Begin experimental functions.
1878 #ifdef JEMALLOC_EXPERIMENTAL
1881 je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
1885 assert(ptr != NULL);
1887 p = je_mallocx(size, flags);
1889 return (ALLOCM_ERR_OOM);
1891 *rsize = isalloc(p, config_prof);
1893 return (ALLOCM_SUCCESS);
1897 je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
1900 bool no_move = flags & ALLOCM_NO_MOVE;
1902 assert(ptr != NULL);
1903 assert(*ptr != NULL);
1905 assert(SIZE_T_MAX - size >= extra);
1908 size_t usize = je_xallocx(*ptr, size, extra, flags);
1909 ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED;
1913 void *p = je_rallocx(*ptr, size+extra, flags);
1916 ret = ALLOCM_SUCCESS;
1918 ret = ALLOCM_ERR_OOM;
1920 *rsize = isalloc(*ptr, config_prof);
1926 je_sallocm(const void *ptr, size_t *rsize, int flags)
1929 assert(rsize != NULL);
1930 *rsize = je_sallocx(ptr, flags);
1931 return (ALLOCM_SUCCESS);
1935 je_dallocm(void *ptr, int flags)
1938 je_dallocx(ptr, flags);
1939 return (ALLOCM_SUCCESS);
1943 je_nallocm(size_t *rsize, size_t size, int flags)
1947 usize = je_nallocx(size, flags);
1949 return (ALLOCM_ERR_OOM);
1952 return (ALLOCM_SUCCESS);
1957 * End experimental functions.
1959 /******************************************************************************/
1961 * The following functions are used by threading libraries for protection of
1962 * malloc during fork().
1966 * If an application creates a thread before doing any allocation in the main
1967 * thread, then calls fork(2) in the main thread followed by memory allocation
1968 * in the child process, a race can occur that results in deadlock within the
1969 * child: the main thread may have forked while the created thread had
1970 * partially initialized the allocator. Ordinarily jemalloc prevents
1971 * fork/malloc races via the following functions it registers during
1972 * initialization using pthread_atfork(), but of course that does no good if
1973 * the allocator isn't fully initialized at fork time. The following library
1974 * constructor is a partial solution to this problem. It may still possible to
1975 * trigger the deadlock described above, but doing so would involve forking via
1976 * a library constructor that runs before jemalloc's runs.
1978 JEMALLOC_ATTR(constructor)
1980 jemalloc_constructor(void)
1986 #ifndef JEMALLOC_MUTEX_INIT_CB
1988 jemalloc_prefork(void)
1990 JEMALLOC_EXPORT void
1991 _malloc_prefork(void)
1996 #ifdef JEMALLOC_MUTEX_INIT_CB
1997 if (malloc_initialized == false)
2000 assert(malloc_initialized);
2002 /* Acquire all mutexes in a safe order. */
2005 malloc_mutex_prefork(&arenas_lock);
2006 for (i = 0; i < narenas_total; i++) {
2007 if (arenas[i] != NULL)
2008 arena_prefork(arenas[i]);
2015 #ifndef JEMALLOC_MUTEX_INIT_CB
2017 jemalloc_postfork_parent(void)
2019 JEMALLOC_EXPORT void
2020 _malloc_postfork(void)
2025 #ifdef JEMALLOC_MUTEX_INIT_CB
2026 if (malloc_initialized == false)
2029 assert(malloc_initialized);
2031 /* Release all mutexes, now that fork() has completed. */
2032 huge_postfork_parent();
2033 base_postfork_parent();
2034 chunk_postfork_parent();
2035 for (i = 0; i < narenas_total; i++) {
2036 if (arenas[i] != NULL)
2037 arena_postfork_parent(arenas[i]);
2039 malloc_mutex_postfork_parent(&arenas_lock);
2040 prof_postfork_parent();
2041 ctl_postfork_parent();
2045 jemalloc_postfork_child(void)
2049 assert(malloc_initialized);
2051 /* Release all mutexes, now that fork() has completed. */
2052 huge_postfork_child();
2053 base_postfork_child();
2054 chunk_postfork_child();
2055 for (i = 0; i < narenas_total; i++) {
2056 if (arenas[i] != NULL)
2057 arena_postfork_child(arenas[i]);
2059 malloc_mutex_postfork_child(&arenas_lock);
2060 prof_postfork_child();
2061 ctl_postfork_child();
2064 /******************************************************************************/
2066 * The following functions are used for TLS allocation/deallocation in static
2067 * binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
2068 * is that these avoid accessing TLS variables.
2072 a0alloc(size_t size, bool zero)
2081 if (size <= arena_maxclass)
2082 return (arena_malloc(arenas[0], size, zero, false));
2084 return (huge_malloc(size, zero, huge_dss_prec_get(arenas[0])));
2088 a0malloc(size_t size)
2091 return (a0alloc(size, false));
2095 a0calloc(size_t num, size_t size)
2098 return (a0alloc(num * size, true));
2104 arena_chunk_t *chunk;
2109 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
2111 arena_dalloc(chunk->arena, chunk, ptr, false);
2113 huge_dalloc(ptr, true);
2116 /******************************************************************************/