1 //===- Unix/Memory.cpp - Generic UNIX System Configuration ------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines some functions for various memory management utilities.
12 //===----------------------------------------------------------------------===//
15 #include "llvm/Support/DataTypes.h"
16 #include "llvm/Support/ErrorHandling.h"
17 #include "llvm/Support/Process.h"
19 #ifdef HAVE_SYS_MMAN_H
24 #include <mach/mach.h>
28 # if defined(__OpenBSD__)
29 # include <mips64/sysarch.h>
31 # include <sys/cachectl.h>
35 extern "C" void sys_icache_invalidate(const void *Addr, size_t len);
39 int getPosixProtectionFlags(unsigned Flags) {
41 case llvm::sys::Memory::MF_READ:
43 case llvm::sys::Memory::MF_WRITE:
45 case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE:
46 return PROT_READ | PROT_WRITE;
47 case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC:
48 return PROT_READ | PROT_EXEC;
49 case llvm::sys::Memory::MF_READ |
50 llvm::sys::Memory::MF_WRITE |
51 llvm::sys::Memory::MF_EXEC:
52 return PROT_READ | PROT_WRITE | PROT_EXEC;
53 case llvm::sys::Memory::MF_EXEC:
54 #if defined(__FreeBSD__)
55 // On PowerPC, having an executable page that has no read permission
56 // can have unintended consequences. The function InvalidateInstruction-
57 // Cache uses instructions dcbf and icbi, both of which are treated by
58 // the processor as loads. If the page has no read permissions,
59 // executing these instructions will result in a segmentation fault.
60 // Somehow, this problem is not present on Linux, but it does happen
62 return PROT_READ | PROT_EXEC;
67 llvm_unreachable("Illegal memory protection flag specified!");
69 // Provide a default return value as required by some compilers.
79 Memory::allocateMappedMemory(size_t NumBytes,
80 const MemoryBlock *const NearBlock,
83 EC = error_code::success();
87 static const size_t PageSize = process::get_self()->page_size();
88 const size_t NumPages = (NumBytes+PageSize-1)/PageSize;
91 #ifdef NEED_DEV_ZERO_FOR_MMAP
92 static int zero_fd = open("/dev/zero", O_RDWR);
94 EC = error_code(errno, system_category());
100 int MMFlags = MAP_PRIVATE |
101 #ifdef HAVE_MMAP_ANONYMOUS
106 ; // Ends statement above
108 int Protect = getPosixProtectionFlags(PFlags);
110 // Use any near hint and the page size to set a page-aligned starting address
111 uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) +
112 NearBlock->size() : 0;
113 if (Start && Start % PageSize)
114 Start += PageSize - Start % PageSize;
116 void *Addr = ::mmap(reinterpret_cast<void*>(Start), PageSize*NumPages,
117 Protect, MMFlags, fd, 0);
118 if (Addr == MAP_FAILED) {
119 if (NearBlock) //Try again without a near hint
120 return allocateMappedMemory(NumBytes, 0, PFlags, EC);
122 EC = error_code(errno, system_category());
123 return MemoryBlock();
127 Result.Address = Addr;
128 Result.Size = NumPages*PageSize;
130 if (PFlags & MF_EXEC)
131 Memory::InvalidateInstructionCache(Result.Address, Result.Size);
137 Memory::releaseMappedMemory(MemoryBlock &M) {
138 if (M.Address == 0 || M.Size == 0)
139 return error_code::success();
141 if (0 != ::munmap(M.Address, M.Size))
142 return error_code(errno, system_category());
147 return error_code::success();
151 Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) {
152 if (M.Address == 0 || M.Size == 0)
153 return error_code::success();
156 return error_code(EINVAL, generic_category());
158 int Protect = getPosixProtectionFlags(Flags);
160 int Result = ::mprotect(M.Address, M.Size, Protect);
162 return error_code(errno, system_category());
165 Memory::InvalidateInstructionCache(M.Address, M.Size);
167 return error_code::success();
170 /// AllocateRWX - Allocate a slab of memory with read/write/execute
171 /// permissions. This is typically used for JIT applications where we want
172 /// to emit code to the memory then jump to it. Getting this type of memory
173 /// is very OS specific.
176 Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
177 std::string *ErrMsg) {
178 if (NumBytes == 0) return MemoryBlock();
180 size_t PageSize = process::get_self()->page_size();
181 size_t NumPages = (NumBytes+PageSize-1)/PageSize;
184 #ifdef NEED_DEV_ZERO_FOR_MMAP
185 static int zero_fd = open("/dev/zero", O_RDWR);
187 MakeErrMsg(ErrMsg, "Can't open /dev/zero device");
188 return MemoryBlock();
193 int flags = MAP_PRIVATE |
194 #ifdef HAVE_MMAP_ANONYMOUS
201 void* start = NearBlock ? (unsigned char*)NearBlock->base() +
202 NearBlock->size() : 0;
204 #if defined(__APPLE__) && defined(__arm__)
205 void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_EXEC,
208 void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_WRITE|PROT_EXEC,
211 if (pa == MAP_FAILED) {
212 if (NearBlock) //Try again without a near hint
213 return AllocateRWX(NumBytes, 0);
215 MakeErrMsg(ErrMsg, "Can't allocate RWX Memory");
216 return MemoryBlock();
219 #if defined(__APPLE__) && defined(__arm__)
220 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)pa,
221 (vm_size_t)(PageSize*NumPages), 0,
222 VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
223 if (KERN_SUCCESS != kr) {
224 MakeErrMsg(ErrMsg, "vm_protect max RX failed");
225 return MemoryBlock();
228 kr = vm_protect(mach_task_self(), (vm_address_t)pa,
229 (vm_size_t)(PageSize*NumPages), 0,
230 VM_PROT_READ | VM_PROT_WRITE);
231 if (KERN_SUCCESS != kr) {
232 MakeErrMsg(ErrMsg, "vm_protect RW failed");
233 return MemoryBlock();
239 result.Size = NumPages*PageSize;
244 bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
245 if (M.Address == 0 || M.Size == 0) return false;
246 if (0 != ::munmap(M.Address, M.Size))
247 return MakeErrMsg(ErrMsg, "Can't release RWX Memory");
251 bool Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) {
252 #if defined(__APPLE__) && defined(__arm__)
253 if (M.Address == 0 || M.Size == 0) return false;
254 Memory::InvalidateInstructionCache(M.Address, M.Size);
255 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
256 (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_WRITE);
257 return KERN_SUCCESS == kr;
263 bool Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) {
264 #if defined(__APPLE__) && defined(__arm__)
265 if (M.Address == 0 || M.Size == 0) return false;
266 Memory::InvalidateInstructionCache(M.Address, M.Size);
267 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
268 (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
269 return KERN_SUCCESS == kr;
275 bool Memory::setRangeWritable(const void *Addr, size_t Size) {
276 #if defined(__APPLE__) && defined(__arm__)
277 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
279 VM_PROT_READ | VM_PROT_WRITE);
280 return KERN_SUCCESS == kr;
286 bool Memory::setRangeExecutable(const void *Addr, size_t Size) {
287 #if defined(__APPLE__) && defined(__arm__)
288 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
290 VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
291 return KERN_SUCCESS == kr;
297 /// InvalidateInstructionCache - Before the JIT can run a block of code
298 /// that has been emitted it must invalidate the instruction cache on some
300 void Memory::InvalidateInstructionCache(const void *Addr,
303 // icache invalidation for PPC and ARM.
304 #if defined(__APPLE__)
306 # if (defined(__POWERPC__) || defined (__ppc__) || \
307 defined(_POWER) || defined(_ARCH_PPC)) || defined(__arm__)
308 sys_icache_invalidate(const_cast<void *>(Addr), Len);
313 # if (defined(__POWERPC__) || defined (__ppc__) || \
314 defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__)
315 const size_t LineSize = 32;
317 const intptr_t Mask = ~(LineSize - 1);
318 const intptr_t StartLine = ((intptr_t) Addr) & Mask;
319 const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask;
321 for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
322 asm volatile("dcbf 0, %0" : : "r"(Line));
323 asm volatile("sync");
325 for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
326 asm volatile("icbi 0, %0" : : "r"(Line));
327 asm volatile("isync");
328 # elif (defined(__arm__) || defined(__aarch64__)) && defined(__GNUC__) && !defined(__FreeBSD__)
329 // FIXME: Can we safely always call this for __GNUC__ everywhere?
330 const char *Start = static_cast<const char *>(Addr);
331 const char *End = Start + Len;
332 __clear_cache(const_cast<char *>(Start), const_cast<char *>(End));
333 # elif defined(__mips__)
334 const char *Start = static_cast<const char *>(Addr);
335 # if defined(ANDROID)
336 // The declaration of "cacheflush" in Android bionic:
337 // extern int cacheflush(long start, long end, long flags);
338 const char *End = Start + Len;
339 long LStart = reinterpret_cast<long>(const_cast<char *>(Start));
340 long LEnd = reinterpret_cast<long>(const_cast<char *>(End));
341 cacheflush(LStart, LEnd, BCACHE);
343 cacheflush(const_cast<char *>(Start), Len, BCACHE);
349 ValgrindDiscardTranslations(Addr, Len);