1 #ifndef JEMALLOC_INTERNAL_H
2 #define JEMALLOC_INTERNAL_H
3 #include "libc_private.h"
9 # define ENOENT ERROR_PATH_NOT_FOUND
10 # define EINVAL ERROR_BAD_ARGUMENTS
11 # define EAGAIN ERROR_OUTOFMEMORY
12 # define EPERM ERROR_WRITE_FAULT
13 # define EFAULT ERROR_INVALID_ADDRESS
14 # define ENOMEM ERROR_NOT_ENOUGH_MEMORY
16 # define ERANGE ERROR_INVALID_DATA
18 # include <sys/param.h>
19 # include <sys/mman.h>
20 # include <sys/syscall.h>
21 # if !defined(SYS_write) && defined(__NR_write)
22 # define SYS_write __NR_write
28 #include <sys/types.h>
32 # define SIZE_T_MAX SIZE_MAX
41 # define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
49 typedef intptr_t ssize_t;
50 # define PATH_MAX 1024
51 # define STDERR_FILENO 2
52 # define __func__ __FUNCTION__
53 /* Disable warnings about deprecated system functions */
54 # pragma warning(disable: 4996)
60 #include "un-namespace.h"
61 #include "libc_private.h"
63 #define JEMALLOC_NO_DEMANGLE
64 #include "../jemalloc.h"
66 #ifdef JEMALLOC_UTRACE
67 #include <sys/ktrace.h>
70 #ifdef JEMALLOC_VALGRIND
71 #include <valgrind/valgrind.h>
72 #include <valgrind/memcheck.h>
75 #include "jemalloc/internal/private_namespace.h"
77 #ifdef JEMALLOC_CC_SILENCE
78 #define UNUSED JEMALLOC_ATTR(unused)
83 static const bool config_debug =
90 static const bool config_dss =
97 static const bool config_fill =
104 static const bool config_lazy_lock = true;
105 static const bool config_prof =
112 static const bool config_prof_libgcc =
113 #ifdef JEMALLOC_PROF_LIBGCC
119 static const bool config_prof_libunwind =
120 #ifdef JEMALLOC_PROF_LIBUNWIND
126 static const bool config_mremap =
127 #ifdef JEMALLOC_MREMAP
133 static const bool config_munmap =
134 #ifdef JEMALLOC_MUNMAP
140 static const bool config_stats =
141 #ifdef JEMALLOC_STATS
147 static const bool config_tcache =
148 #ifdef JEMALLOC_TCACHE
154 static const bool config_tls =
161 static const bool config_utrace =
162 #ifdef JEMALLOC_UTRACE
168 static const bool config_valgrind =
169 #ifdef JEMALLOC_VALGRIND
175 static const bool config_xmalloc =
176 #ifdef JEMALLOC_XMALLOC
182 static const bool config_ivsalloc =
183 #ifdef JEMALLOC_IVSALLOC
190 #ifdef JEMALLOC_ATOMIC9
191 #include <machine/atomic.h>
194 #if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
195 #include <libkern/OSAtomic.h>
199 #include <mach/mach_error.h>
200 #include <mach/mach_init.h>
201 #include <mach/vm_map.h>
202 #include <malloc/malloc.h>
206 #include "jemalloc/internal/rb.h"
207 #include "jemalloc/internal/qr.h"
208 #include "jemalloc/internal/ql.h"
211 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
212 * but there are circular dependencies that cannot be broken without
213 * substantial performance degradation. In order to reduce the effect on
214 * visual code flow, read the header files in multiple passes, with one of the
215 * following cpp variables defined during each pass:
217 * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data
219 * JEMALLOC_H_STRUCTS : Data structures.
220 * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
221 * JEMALLOC_H_INLINES : Inline functions.
223 /******************************************************************************/
224 #define JEMALLOC_H_TYPES
226 #define ALLOCM_LG_ALIGN_MASK ((int)0x3f)
228 #define ZU(z) ((size_t)z)
229 #define QU(q) ((uint64_t)q)
232 # define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
235 #ifdef JEMALLOC_DEBUG
236 /* Disable inlining to make debugging easier. */
237 # define JEMALLOC_ALWAYS_INLINE
238 # define JEMALLOC_INLINE
241 # define JEMALLOC_ENABLE_INLINE
242 # ifdef JEMALLOC_HAVE_ATTR
243 # define JEMALLOC_ALWAYS_INLINE \
244 static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline)
246 # define JEMALLOC_ALWAYS_INLINE static inline
248 # define JEMALLOC_INLINE static inline
250 # define inline _inline
254 /* Smallest size class to support. */
255 #define LG_TINY_MIN 3
256 #define TINY_MIN (1U << LG_TINY_MIN)
259 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
263 # if (defined(__i386__) || defined(_M_IX86))
264 # define LG_QUANTUM 4
267 # define LG_QUANTUM 4
270 # define LG_QUANTUM 4
273 # define LG_QUANTUM 4
275 # if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
276 # define LG_QUANTUM 4
279 # define LG_QUANTUM 3
282 # define LG_QUANTUM 4
285 # define LG_QUANTUM 4
288 # define LG_QUANTUM 3
291 # define LG_QUANTUM 4
294 # define LG_QUANTUM 4
297 # define LG_QUANTUM 4
300 # define LG_QUANTUM 4
303 # error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
307 #define QUANTUM ((size_t)(1U << LG_QUANTUM))
308 #define QUANTUM_MASK (QUANTUM - 1)
310 /* Return the smallest quantum multiple that is >= a. */
311 #define QUANTUM_CEILING(a) \
312 (((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
314 #define LONG ((size_t)(1U << LG_SIZEOF_LONG))
315 #define LONG_MASK (LONG - 1)
317 /* Return the smallest long multiple that is >= a. */
318 #define LONG_CEILING(a) \
319 (((a) + LONG_MASK) & ~LONG_MASK)
321 #define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
322 #define PTR_MASK (SIZEOF_PTR - 1)
324 /* Return the smallest (void *) multiple that is >= a. */
325 #define PTR_CEILING(a) \
326 (((a) + PTR_MASK) & ~PTR_MASK)
329 * Maximum size of L1 cache line. This is used to avoid cache line aliasing.
330 * In addition, this controls the spacing of cacheline-spaced size classes.
332 * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
333 * only handle raw constants.
335 #define LG_CACHELINE 6
337 #define CACHELINE_MASK (CACHELINE - 1)
339 /* Return the smallest cacheline multiple that is >= s. */
340 #define CACHELINE_CEILING(s) \
341 (((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
343 /* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */
347 #define LG_PAGE STATIC_PAGE_SHIFT
348 #define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT))
349 #define PAGE_MASK ((size_t)(PAGE - 1))
351 /* Return the smallest pagesize multiple that is >= s. */
352 #define PAGE_CEILING(s) \
353 (((s) + PAGE_MASK) & ~PAGE_MASK)
355 /* Return the nearest aligned address at or below a. */
356 #define ALIGNMENT_ADDR2BASE(a, alignment) \
357 ((void *)((uintptr_t)(a) & (-(alignment))))
359 /* Return the offset between a and the nearest aligned address at or below a. */
360 #define ALIGNMENT_ADDR2OFFSET(a, alignment) \
361 ((size_t)((uintptr_t)(a) & (alignment - 1)))
363 /* Return the smallest alignment multiple that is >= s. */
364 #define ALIGNMENT_CEILING(s, alignment) \
365 (((s) + (alignment - 1)) & (-(alignment)))
367 /* Declare a variable length array */
368 #if __STDC_VERSION__ < 199901L
371 # define alloca _alloca
373 # ifdef JEMALLOC_HAS_ALLOCA_H
379 # define VARIABLE_ARRAY(type, name, count) \
380 type *name = alloca(sizeof(type) * count)
382 # define VARIABLE_ARRAY(type, name, count) type name[count]
385 #ifdef JEMALLOC_VALGRIND
387 * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions
388 * so that when Valgrind reports errors, there are no extra stack frames
391 * The size that is reported to valgrind must be consistent through a chain of
392 * malloc..realloc..realloc calls. Request size isn't recorded anywhere in
393 * jemalloc, so it is critical that all callers of these macros provide usize
394 * rather than request size. As a result, buffer overflow detection is
395 * technically weakened for the standard API, though it is generally accepted
396 * practice to consider any extra bytes reported by malloc_usable_size() as
399 #define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \
400 if (config_valgrind && opt_valgrind && cond) \
401 VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \
403 #define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \
404 old_rzsize, zero) do { \
405 if (config_valgrind && opt_valgrind) { \
406 size_t rzsize = p2rz(ptr); \
408 if (ptr == old_ptr) { \
409 VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
411 if (zero && old_usize < usize) { \
412 VALGRIND_MAKE_MEM_DEFINED( \
413 (void *)((uintptr_t)ptr + \
414 old_usize), usize - old_usize); \
417 if (old_ptr != NULL) { \
418 VALGRIND_FREELIKE_BLOCK(old_ptr, \
422 size_t copy_size = (old_usize < usize) \
423 ? old_usize : usize; \
424 size_t tail_size = usize - copy_size; \
425 VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \
427 if (copy_size > 0) { \
428 VALGRIND_MAKE_MEM_DEFINED(ptr, \
431 if (zero && tail_size > 0) { \
432 VALGRIND_MAKE_MEM_DEFINED( \
433 (void *)((uintptr_t)ptr + \
434 copy_size), tail_size); \
440 #define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \
441 if (config_valgrind && opt_valgrind) \
442 VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \
445 #define RUNNING_ON_VALGRIND ((unsigned)0)
446 #define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
448 #define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \
450 #define VALGRIND_FREELIKE_BLOCK(addr, rzB) do {} while (0)
451 #define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len) do {} while (0)
452 #define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) do {} while (0)
453 #define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) do {} while (0)
454 #define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0)
455 #define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \
456 old_rzsize, zero) do {} while (0)
457 #define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
460 #include "jemalloc/internal/util.h"
461 #include "jemalloc/internal/atomic.h"
462 #include "jemalloc/internal/prng.h"
463 #include "jemalloc/internal/ckh.h"
464 #include "jemalloc/internal/size_classes.h"
465 #include "jemalloc/internal/stats.h"
466 #include "jemalloc/internal/ctl.h"
467 #include "jemalloc/internal/mutex.h"
468 #include "jemalloc/internal/tsd.h"
469 #include "jemalloc/internal/mb.h"
470 #include "jemalloc/internal/extent.h"
471 #include "jemalloc/internal/arena.h"
472 #include "jemalloc/internal/bitmap.h"
473 #include "jemalloc/internal/base.h"
474 #include "jemalloc/internal/chunk.h"
475 #include "jemalloc/internal/huge.h"
476 #include "jemalloc/internal/rtree.h"
477 #include "jemalloc/internal/tcache.h"
478 #include "jemalloc/internal/hash.h"
479 #include "jemalloc/internal/quarantine.h"
480 #include "jemalloc/internal/prof.h"
482 #undef JEMALLOC_H_TYPES
483 /******************************************************************************/
484 #define JEMALLOC_H_STRUCTS
486 #include "jemalloc/internal/util.h"
487 #include "jemalloc/internal/atomic.h"
488 #include "jemalloc/internal/prng.h"
489 #include "jemalloc/internal/ckh.h"
490 #include "jemalloc/internal/size_classes.h"
491 #include "jemalloc/internal/stats.h"
492 #include "jemalloc/internal/ctl.h"
493 #include "jemalloc/internal/mutex.h"
494 #include "jemalloc/internal/tsd.h"
495 #include "jemalloc/internal/mb.h"
496 #include "jemalloc/internal/bitmap.h"
497 #include "jemalloc/internal/extent.h"
498 #include "jemalloc/internal/arena.h"
499 #include "jemalloc/internal/base.h"
500 #include "jemalloc/internal/chunk.h"
501 #include "jemalloc/internal/huge.h"
502 #include "jemalloc/internal/rtree.h"
503 #include "jemalloc/internal/tcache.h"
504 #include "jemalloc/internal/hash.h"
505 #include "jemalloc/internal/quarantine.h"
506 #include "jemalloc/internal/prof.h"
510 uint64_t deallocated;
511 } thread_allocated_t;
513 * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
516 #define THREAD_ALLOCATED_INITIALIZER JEMALLOC_CONCAT({0, 0})
518 #undef JEMALLOC_H_STRUCTS
519 /******************************************************************************/
520 #define JEMALLOC_H_EXTERNS
522 extern bool opt_abort;
523 extern bool opt_junk;
524 extern size_t opt_quarantine;
525 extern bool opt_redzone;
526 extern bool opt_utrace;
527 extern bool opt_valgrind;
528 extern bool opt_xmalloc;
529 extern bool opt_zero;
530 extern size_t opt_narenas;
532 /* Number of CPUs. */
533 extern unsigned ncpus;
535 /* Protects arenas initialization (arenas, arenas_total). */
536 extern malloc_mutex_t arenas_lock;
538 * Arenas that are used to service external requests. Not all elements of the
539 * arenas array are necessarily used; arenas are created lazily as needed.
541 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
542 * arenas. arenas[narenas_auto..narenas_total) are only used if the application
543 * takes some action to create them and allocate from them.
545 extern arena_t **arenas;
546 extern unsigned narenas_total;
547 extern unsigned narenas_auto; /* Read-only after initialization. */
549 arena_t *arenas_extend(unsigned ind);
550 void arenas_cleanup(void *arg);
551 arena_t *choose_arena_hard(void);
552 void jemalloc_prefork(void);
553 void jemalloc_postfork_parent(void);
554 void jemalloc_postfork_child(void);
556 #include "jemalloc/internal/util.h"
557 #include "jemalloc/internal/atomic.h"
558 #include "jemalloc/internal/prng.h"
559 #include "jemalloc/internal/ckh.h"
560 #include "jemalloc/internal/size_classes.h"
561 #include "jemalloc/internal/stats.h"
562 #include "jemalloc/internal/ctl.h"
563 #include "jemalloc/internal/mutex.h"
564 #include "jemalloc/internal/tsd.h"
565 #include "jemalloc/internal/mb.h"
566 #include "jemalloc/internal/bitmap.h"
567 #include "jemalloc/internal/extent.h"
568 #include "jemalloc/internal/arena.h"
569 #include "jemalloc/internal/base.h"
570 #include "jemalloc/internal/chunk.h"
571 #include "jemalloc/internal/huge.h"
572 #include "jemalloc/internal/rtree.h"
573 #include "jemalloc/internal/tcache.h"
574 #include "jemalloc/internal/hash.h"
575 #include "jemalloc/internal/quarantine.h"
576 #include "jemalloc/internal/prof.h"
578 #undef JEMALLOC_H_EXTERNS
579 /******************************************************************************/
580 #define JEMALLOC_H_INLINES
582 #include "jemalloc/internal/util.h"
583 #include "jemalloc/internal/atomic.h"
584 #include "jemalloc/internal/prng.h"
585 #include "jemalloc/internal/ckh.h"
586 #include "jemalloc/internal/size_classes.h"
587 #include "jemalloc/internal/stats.h"
588 #include "jemalloc/internal/ctl.h"
589 #include "jemalloc/internal/mutex.h"
590 #include "jemalloc/internal/tsd.h"
591 #include "jemalloc/internal/mb.h"
592 #include "jemalloc/internal/extent.h"
593 #include "jemalloc/internal/base.h"
594 #include "jemalloc/internal/chunk.h"
595 #include "jemalloc/internal/huge.h"
597 #ifndef JEMALLOC_ENABLE_INLINE
598 malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
600 size_t s2u(size_t size);
601 size_t sa2u(size_t size, size_t alignment);
602 unsigned narenas_total_get(void);
603 arena_t *choose_arena(arena_t *arena);
606 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
608 * Map of pthread_self() --> arenas[???], used for selecting an arena to use
611 malloc_tsd_externs(arenas, arena_t *)
612 malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, arenas, arena_t *, NULL,
616 * Compute usable size that would result from allocating an object with the
619 JEMALLOC_ALWAYS_INLINE size_t
623 if (size <= SMALL_MAXCLASS)
624 return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
625 if (size <= arena_maxclass)
626 return (PAGE_CEILING(size));
627 return (CHUNK_CEILING(size));
631 * Compute usable size that would result from allocating an object with the
632 * specified size and alignment.
634 JEMALLOC_ALWAYS_INLINE size_t
635 sa2u(size_t size, size_t alignment)
639 assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
642 * Round size up to the nearest multiple of alignment.
644 * This done, we can take advantage of the fact that for each small
645 * size class, every object is aligned at the smallest power of two
646 * that is non-zero in the base two representation of the size. For
649 * Size | Base 2 | Minimum alignment
650 * -----+----------+------------------
652 * 144 | 10100000 | 32
653 * 192 | 11000000 | 64
655 usize = ALIGNMENT_CEILING(size, alignment);
657 * (usize < size) protects against the combination of maximal
658 * alignment and size greater than maximal alignment.
661 /* size_t overflow. */
665 if (usize <= arena_maxclass && alignment <= PAGE) {
666 if (usize <= SMALL_MAXCLASS)
667 return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
668 return (PAGE_CEILING(usize));
673 * We can't achieve subpage alignment, so round up alignment
674 * permanently; it makes later calculations simpler.
676 alignment = PAGE_CEILING(alignment);
677 usize = PAGE_CEILING(size);
679 * (usize < size) protects against very large sizes within
680 * PAGE of SIZE_T_MAX.
682 * (usize + alignment < usize) protects against the
683 * combination of maximal alignment and usize large enough
684 * to cause overflow. This is similar to the first overflow
685 * check above, but it needs to be repeated due to the new
686 * usize value, which may now be *equal* to maximal
687 * alignment, whereas before we only detected overflow if the
688 * original size was *greater* than maximal alignment.
690 if (usize < size || usize + alignment < usize) {
691 /* size_t overflow. */
696 * Calculate the size of the over-size run that arena_palloc()
697 * would need to allocate in order to guarantee the alignment.
698 * If the run wouldn't fit within a chunk, round up to a huge
701 run_size = usize + alignment - PAGE;
702 if (run_size <= arena_maxclass)
703 return (PAGE_CEILING(usize));
704 return (CHUNK_CEILING(usize));
708 JEMALLOC_INLINE unsigned
709 narenas_total_get(void)
713 malloc_mutex_lock(&arenas_lock);
714 narenas = narenas_total;
715 malloc_mutex_unlock(&arenas_lock);
720 /* Choose an arena based on a per-thread value. */
721 JEMALLOC_INLINE arena_t *
722 choose_arena(arena_t *arena)
729 if ((ret = *arenas_tsd_get()) == NULL) {
730 ret = choose_arena_hard();
738 #include "jemalloc/internal/bitmap.h"
739 #include "jemalloc/internal/rtree.h"
741 * Include arena.h twice in order to resolve circular dependencies with
744 #define JEMALLOC_ARENA_INLINE_A
745 #include "jemalloc/internal/arena.h"
746 #undef JEMALLOC_ARENA_INLINE_A
747 #include "jemalloc/internal/tcache.h"
748 #define JEMALLOC_ARENA_INLINE_B
749 #include "jemalloc/internal/arena.h"
750 #undef JEMALLOC_ARENA_INLINE_B
751 #include "jemalloc/internal/hash.h"
752 #include "jemalloc/internal/quarantine.h"
754 #ifndef JEMALLOC_ENABLE_INLINE
755 void *imallocx(size_t size, bool try_tcache, arena_t *arena);
756 void *imalloc(size_t size);
757 void *icallocx(size_t size, bool try_tcache, arena_t *arena);
758 void *icalloc(size_t size);
759 void *ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
761 void *ipalloc(size_t usize, size_t alignment, bool zero);
762 size_t isalloc(const void *ptr, bool demote);
763 size_t ivsalloc(const void *ptr, bool demote);
764 size_t u2rz(size_t usize);
765 size_t p2rz(const void *ptr);
766 void idallocx(void *ptr, bool try_tcache);
767 void idalloc(void *ptr);
768 void iqallocx(void *ptr, bool try_tcache);
769 void iqalloc(void *ptr);
770 void *irallocx(void *ptr, size_t size, size_t extra, size_t alignment,
771 bool zero, bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc,
773 void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
774 bool zero, bool no_move);
775 malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
778 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
779 JEMALLOC_ALWAYS_INLINE void *
780 imallocx(size_t size, bool try_tcache, arena_t *arena)
785 if (size <= arena_maxclass)
786 return (arena_malloc(arena, size, false, try_tcache));
788 return (huge_malloc(size, false));
791 JEMALLOC_ALWAYS_INLINE void *
795 return (imallocx(size, true, NULL));
798 JEMALLOC_ALWAYS_INLINE void *
799 icallocx(size_t size, bool try_tcache, arena_t *arena)
802 if (size <= arena_maxclass)
803 return (arena_malloc(arena, size, true, try_tcache));
805 return (huge_malloc(size, true));
808 JEMALLOC_ALWAYS_INLINE void *
812 return (icallocx(size, true, NULL));
815 JEMALLOC_ALWAYS_INLINE void *
816 ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
822 assert(usize == sa2u(usize, alignment));
824 if (usize <= arena_maxclass && alignment <= PAGE)
825 ret = arena_malloc(arena, usize, zero, try_tcache);
827 if (usize <= arena_maxclass) {
828 ret = arena_palloc(choose_arena(arena), usize,
830 } else if (alignment <= chunksize)
831 ret = huge_malloc(usize, zero);
833 ret = huge_palloc(usize, alignment, zero);
836 assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
840 JEMALLOC_ALWAYS_INLINE void *
841 ipalloc(size_t usize, size_t alignment, bool zero)
844 return (ipallocx(usize, alignment, zero, true, NULL));
850 * size_t sz = isalloc(ptr, config_prof);
852 JEMALLOC_ALWAYS_INLINE size_t
853 isalloc(const void *ptr, bool demote)
856 arena_chunk_t *chunk;
859 /* Demotion only makes sense if config_prof is true. */
860 assert(config_prof || demote == false);
862 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
864 ret = arena_salloc(ptr, demote);
866 ret = huge_salloc(ptr);
871 JEMALLOC_ALWAYS_INLINE size_t
872 ivsalloc(const void *ptr, bool demote)
875 /* Return 0 if ptr is not within a chunk managed by jemalloc. */
876 if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
879 return (isalloc(ptr, demote));
882 JEMALLOC_INLINE size_t
887 if (usize <= SMALL_MAXCLASS) {
888 size_t binind = SMALL_SIZE2BIN(usize);
889 ret = arena_bin_info[binind].redzone_size;
896 JEMALLOC_INLINE size_t
897 p2rz(const void *ptr)
899 size_t usize = isalloc(ptr, false);
901 return (u2rz(usize));
904 JEMALLOC_ALWAYS_INLINE void
905 idallocx(void *ptr, bool try_tcache)
907 arena_chunk_t *chunk;
911 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
913 arena_dalloc(chunk->arena, chunk, ptr, try_tcache);
915 huge_dalloc(ptr, true);
918 JEMALLOC_ALWAYS_INLINE void
925 JEMALLOC_ALWAYS_INLINE void
926 iqallocx(void *ptr, bool try_tcache)
929 if (config_fill && opt_quarantine)
932 idallocx(ptr, try_tcache);
935 JEMALLOC_ALWAYS_INLINE void
942 JEMALLOC_ALWAYS_INLINE void *
943 irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
944 bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
952 oldsize = isalloc(ptr, config_prof);
954 if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
956 size_t usize, copysize;
959 * Existing object alignment is inadequate; allocate new space
964 usize = sa2u(size + extra, alignment);
967 ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
971 /* Try again, without extra this time. */
972 usize = sa2u(size, alignment);
975 ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
981 * Copy at most size bytes (not size+extra), since the caller
982 * has no expectation that the extra bytes will be reliably
985 copysize = (size < oldsize) ? size : oldsize;
986 memcpy(ret, ptr, copysize);
987 iqallocx(ptr, try_tcache_dalloc);
992 if (size <= arena_maxclass) {
993 return (arena_ralloc_no_move(ptr, oldsize, size,
996 return (huge_ralloc_no_move(ptr, oldsize, size,
1000 if (size + extra <= arena_maxclass) {
1001 return (arena_ralloc(arena, ptr, oldsize, size, extra,
1002 alignment, zero, try_tcache_alloc,
1003 try_tcache_dalloc));
1005 return (huge_ralloc(ptr, oldsize, size, extra,
1006 alignment, zero, try_tcache_dalloc));
1011 JEMALLOC_ALWAYS_INLINE void *
1012 iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
1016 return (irallocx(ptr, size, extra, alignment, zero, no_move, true, true,
1020 malloc_tsd_externs(thread_allocated, thread_allocated_t)
1021 malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, thread_allocated, thread_allocated_t,
1022 THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
1025 #include "jemalloc/internal/prof.h"
1027 #undef JEMALLOC_H_INLINES
1028 /******************************************************************************/
1029 #endif /* JEMALLOC_INTERNAL_H */