1 #define JEMALLOC_CHUNK_MMAP_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
12 alloc_size = size + alignment - PAGE;
13 /* Beware size_t wrap-around. */
14 if (alloc_size < size)
19 pages = pages_map(NULL, alloc_size);
22 leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
24 ret = pages_trim(pages, alloc_size, leadsize, size);
25 } while (ret == NULL);
30 *commit = pages_decommit(ret, size);
35 chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
42 * Ideally, there would be a way to specify alignment to mmap() (like
43 * NetBSD has), but in the absence of such a feature, we have to work
44 * hard to efficiently create aligned mappings. The reliable, but
45 * slow method is to create a mapping that is over-sized, then trim the
46 * excess. However, that always results in one or two calls to
49 * Optimistically try mapping precisely the right amount before falling
50 * back to the slow method, with the expectation that the optimistic
51 * approach works most of the time.
54 assert(alignment != 0);
55 assert((alignment & chunksize_mask) == 0);
57 ret = pages_map(new_addr, size);
58 if (ret == NULL || ret == new_addr)
60 assert(new_addr == NULL);
61 offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
63 pages_unmap(ret, size);
64 return (chunk_alloc_mmap_slow(size, alignment, zero, commit));
70 *commit = pages_decommit(ret, size);
75 chunk_dalloc_mmap(void *chunk, size_t size)
79 pages_unmap(chunk, size);
81 return (!config_munmap);