1 #define JEMALLOC_PAGES_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
4 #include "jemalloc/internal/pages.h"
6 #include "jemalloc/internal/jemalloc_internal_includes.h"
8 #include "jemalloc/internal/assert.h"
9 #include "jemalloc/internal/malloc_io.h"
11 #ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
12 #include <sys/sysctl.h>
15 /******************************************************************************/
18 /* Actual operating system page size, detected during bootstrap, <= PAGE. */
19 static size_t os_page;
22 # define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE)
23 # define PAGES_PROT_DECOMMIT (PROT_NONE)
24 static int mmap_flags;
26 static bool os_overcommits;
28 /******************************************************************************/
30 * Function prototypes for static functions that are referenced prior to
34 static void os_pages_unmap(void *addr, size_t size);
36 /******************************************************************************/
39 os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
40 assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
41 assert(ALIGNMENT_CEILING(size, os_page) == size);
51 * If VirtualAlloc can't allocate at the given address when one is
52 * given, it fails and returns NULL.
54 ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0),
58 * We don't use MAP_FIXED here, because it can cause the *replacement*
59 * of existing mappings, and we only want to create new mappings.
62 int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
64 ret = mmap(addr, size, prot, mmap_flags, -1, 0);
68 if (ret == MAP_FAILED) {
70 } else if (addr != NULL && ret != addr) {
72 * We succeeded in mapping memory, but not in the right place.
74 os_pages_unmap(ret, size);
78 assert(ret == NULL || (addr == NULL && ret != addr) || (addr != NULL &&
84 os_pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
86 void *ret = (void *)((uintptr_t)addr + leadsize);
88 assert(alloc_size >= leadsize + size);
90 os_pages_unmap(addr, alloc_size);
91 void *new_addr = os_pages_map(ret, size, PAGE, commit);
92 if (new_addr == ret) {
95 if (new_addr != NULL) {
96 os_pages_unmap(new_addr, size);
100 size_t trailsize = alloc_size - leadsize - size;
103 os_pages_unmap(addr, leadsize);
105 if (trailsize != 0) {
106 os_pages_unmap((void *)((uintptr_t)ret + size), trailsize);
113 os_pages_unmap(void *addr, size_t size) {
114 assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
115 assert(ALIGNMENT_CEILING(size, os_page) == size);
118 if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
120 if (munmap(addr, size) == -1)
123 char buf[BUFERROR_BUF];
125 buferror(get_errno(), buf, sizeof(buf));
126 malloc_printf("<jemalloc>: Error in "
140 pages_map_slow(size_t size, size_t alignment, bool *commit) {
141 size_t alloc_size = size + alignment - os_page;
142 /* Beware size_t wrap-around. */
143 if (alloc_size < size) {
149 void *pages = os_pages_map(NULL, alloc_size, alignment, commit);
153 size_t leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment)
155 ret = os_pages_trim(pages, alloc_size, leadsize, size, commit);
156 } while (ret == NULL);
159 assert(PAGE_ADDR2BASE(ret) == ret);
164 pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
165 assert(alignment >= PAGE);
166 assert(ALIGNMENT_ADDR2BASE(addr, alignment) == addr);
169 * Ideally, there would be a way to specify alignment to mmap() (like
170 * NetBSD has), but in the absence of such a feature, we have to work
171 * hard to efficiently create aligned mappings. The reliable, but
172 * slow method is to create a mapping that is over-sized, then trim the
173 * excess. However, that always results in one or two calls to
174 * os_pages_unmap(), and it can leave holes in the process's virtual
175 * memory map if memory grows downward.
177 * Optimistically try mapping precisely the right amount before falling
178 * back to the slow method, with the expectation that the optimistic
179 * approach works most of the time.
182 void *ret = os_pages_map(addr, size, os_page, commit);
183 if (ret == NULL || ret == addr) {
186 assert(addr == NULL);
187 if (ALIGNMENT_ADDR2OFFSET(ret, alignment) != 0) {
188 os_pages_unmap(ret, size);
189 return pages_map_slow(size, alignment, commit);
192 assert(PAGE_ADDR2BASE(ret) == ret);
197 pages_unmap(void *addr, size_t size) {
198 assert(PAGE_ADDR2BASE(addr) == addr);
199 assert(PAGE_CEILING(size) == size);
201 os_pages_unmap(addr, size);
205 pages_commit_impl(void *addr, size_t size, bool commit) {
206 assert(PAGE_ADDR2BASE(addr) == addr);
207 assert(PAGE_CEILING(size) == size);
209 if (os_overcommits) {
214 return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT,
215 PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT)));
218 int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
219 void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED,
221 if (result == MAP_FAILED) {
224 if (result != addr) {
226 * We succeeded in mapping memory, but not in the right
229 os_pages_unmap(result, size);
238 pages_commit(void *addr, size_t size) {
239 return pages_commit_impl(addr, size, true);
243 pages_decommit(void *addr, size_t size) {
244 return pages_commit_impl(addr, size, false);
248 pages_purge_lazy(void *addr, size_t size) {
249 assert(PAGE_ADDR2BASE(addr) == addr);
250 assert(PAGE_CEILING(size) == size);
252 if (!pages_can_purge_lazy) {
257 VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
259 #elif defined(JEMALLOC_PURGE_MADVISE_FREE)
260 return (madvise(addr, size, MADV_FREE) != 0);
261 #elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
262 !defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
263 return (madvise(addr, size, MADV_DONTNEED) != 0);
270 pages_purge_forced(void *addr, size_t size) {
271 assert(PAGE_ADDR2BASE(addr) == addr);
272 assert(PAGE_CEILING(size) == size);
274 if (!pages_can_purge_forced) {
278 #if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
279 defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
280 return (madvise(addr, size, MADV_DONTNEED) != 0);
281 #elif defined(JEMALLOC_MAPS_COALESCE)
282 /* Try to overlay a new demand-zeroed mapping. */
283 return pages_commit(addr, size);
290 pages_huge(void *addr, size_t size) {
291 assert(HUGEPAGE_ADDR2BASE(addr) == addr);
292 assert(HUGEPAGE_CEILING(size) == size);
295 return (madvise(addr, size, MADV_HUGEPAGE) != 0);
302 pages_nohuge(void *addr, size_t size) {
303 assert(HUGEPAGE_ADDR2BASE(addr) == addr);
304 assert(HUGEPAGE_CEILING(size) == size);
307 return (madvise(addr, size, MADV_NOHUGEPAGE) != 0);
314 os_page_detect(void) {
318 return si.dwPageSize;
320 long result = sysconf(_SC_PAGESIZE);
324 return (size_t)result;
328 #ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
330 os_overcommits_sysctl(void) {
334 sz = sizeof(vm_overcommit);
335 if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) {
336 return false; /* Error. */
339 return ((vm_overcommit & 0x3) == 0);
343 #ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
345 * Use syscall(2) rather than {open,read,close}(2) when possible to avoid
346 * reentry during bootstrapping if another library has interposed system call
350 os_overcommits_proc(void) {
355 #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
356 fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY |
358 #elif defined(JEMALLOC_USE_SYSCALL) && defined(SYS_openat)
359 fd = (int)syscall(SYS_openat,
360 AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC);
362 fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC);
365 return false; /* Error. */
368 #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
369 nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf));
371 nread = read(fd, &buf, sizeof(buf));
374 #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
375 syscall(SYS_close, fd);
381 return false; /* Error. */
384 * /proc/sys/vm/overcommit_memory meanings:
385 * 0: Heuristic overcommit.
386 * 1: Always overcommit.
387 * 2: Never overcommit.
389 return (buf[0] == '0' || buf[0] == '1');
395 os_page = os_page_detect();
396 if (os_page > PAGE) {
397 malloc_write("<jemalloc>: Unsupported system page size\n");
405 mmap_flags = MAP_PRIVATE | MAP_ANON;
408 #ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
409 os_overcommits = os_overcommits_sysctl();
410 #elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY)
411 os_overcommits = os_overcommits_proc();
412 # ifdef MAP_NORESERVE
413 if (os_overcommits) {
414 mmap_flags |= MAP_NORESERVE;
418 os_overcommits = false;