1 //===- Unix/Memory.cpp - Generic UNIX System Configuration ------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines some functions for various memory management utilities.
12 //===----------------------------------------------------------------------===//
15 #include "llvm/Support/DataTypes.h"
16 #include "llvm/Support/ErrorHandling.h"
17 #include "llvm/Support/Process.h"
19 #ifdef HAVE_SYS_MMAN_H
24 #include <mach/mach.h>
28 # if defined(__OpenBSD__)
29 # include <mips64/sysarch.h>
31 # include <sys/cachectl.h>
35 extern "C" void sys_icache_invalidate(const void *Addr, size_t len);
39 int getPosixProtectionFlags(unsigned Flags) {
41 case llvm::sys::Memory::MF_READ:
43 case llvm::sys::Memory::MF_WRITE:
45 case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE:
46 return PROT_READ | PROT_WRITE;
47 case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC:
48 return PROT_READ | PROT_EXEC;
49 case llvm::sys::Memory::MF_READ |
50 llvm::sys::Memory::MF_WRITE |
51 llvm::sys::Memory::MF_EXEC:
52 return PROT_READ | PROT_WRITE | PROT_EXEC;
53 case llvm::sys::Memory::MF_EXEC:
54 #if defined(__FreeBSD__)
55 return PROT_READ | PROT_EXEC;
60 llvm_unreachable("Illegal memory protection flag specified!");
62 // Provide a default return value as required by some compilers.
72 Memory::allocateMappedMemory(size_t NumBytes,
73 const MemoryBlock *const NearBlock,
76 EC = error_code::success();
80 static const size_t PageSize = process::get_self()->page_size();
81 const size_t NumPages = (NumBytes+PageSize-1)/PageSize;
84 #ifdef NEED_DEV_ZERO_FOR_MMAP
85 static int zero_fd = open("/dev/zero", O_RDWR);
87 EC = error_code(errno, system_category());
93 int MMFlags = MAP_PRIVATE |
94 #ifdef HAVE_MMAP_ANONYMOUS
99 ; // Ends statement above
101 int Protect = getPosixProtectionFlags(PFlags);
103 // Use any near hint and the page size to set a page-aligned starting address
104 uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) +
105 NearBlock->size() : 0;
106 if (Start && Start % PageSize)
107 Start += PageSize - Start % PageSize;
109 void *Addr = ::mmap(reinterpret_cast<void*>(Start), PageSize*NumPages,
110 Protect, MMFlags, fd, 0);
111 if (Addr == MAP_FAILED) {
112 if (NearBlock) //Try again without a near hint
113 return allocateMappedMemory(NumBytes, 0, PFlags, EC);
115 EC = error_code(errno, system_category());
116 return MemoryBlock();
120 Result.Address = Addr;
121 Result.Size = NumPages*PageSize;
123 if (PFlags & MF_EXEC)
124 Memory::InvalidateInstructionCache(Result.Address, Result.Size);
130 Memory::releaseMappedMemory(MemoryBlock &M) {
131 if (M.Address == 0 || M.Size == 0)
132 return error_code::success();
134 if (0 != ::munmap(M.Address, M.Size))
135 return error_code(errno, system_category());
140 return error_code::success();
144 Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) {
145 if (M.Address == 0 || M.Size == 0)
146 return error_code::success();
149 return error_code(EINVAL, generic_category());
151 int Protect = getPosixProtectionFlags(Flags);
153 int Result = ::mprotect(M.Address, M.Size, Protect);
155 return error_code(errno, system_category());
158 Memory::InvalidateInstructionCache(M.Address, M.Size);
160 return error_code::success();
163 /// AllocateRWX - Allocate a slab of memory with read/write/execute
164 /// permissions. This is typically used for JIT applications where we want
165 /// to emit code to the memory then jump to it. Getting this type of memory
166 /// is very OS specific.
169 Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
170 std::string *ErrMsg) {
171 if (NumBytes == 0) return MemoryBlock();
173 size_t PageSize = process::get_self()->page_size();
174 size_t NumPages = (NumBytes+PageSize-1)/PageSize;
177 #ifdef NEED_DEV_ZERO_FOR_MMAP
178 static int zero_fd = open("/dev/zero", O_RDWR);
180 MakeErrMsg(ErrMsg, "Can't open /dev/zero device");
181 return MemoryBlock();
186 int flags = MAP_PRIVATE |
187 #ifdef HAVE_MMAP_ANONYMOUS
194 void* start = NearBlock ? (unsigned char*)NearBlock->base() +
195 NearBlock->size() : 0;
197 #if defined(__APPLE__) && defined(__arm__)
198 void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_EXEC,
201 void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_WRITE|PROT_EXEC,
204 if (pa == MAP_FAILED) {
205 if (NearBlock) //Try again without a near hint
206 return AllocateRWX(NumBytes, 0);
208 MakeErrMsg(ErrMsg, "Can't allocate RWX Memory");
209 return MemoryBlock();
212 #if defined(__APPLE__) && defined(__arm__)
213 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)pa,
214 (vm_size_t)(PageSize*NumPages), 0,
215 VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
216 if (KERN_SUCCESS != kr) {
217 MakeErrMsg(ErrMsg, "vm_protect max RX failed");
218 return MemoryBlock();
221 kr = vm_protect(mach_task_self(), (vm_address_t)pa,
222 (vm_size_t)(PageSize*NumPages), 0,
223 VM_PROT_READ | VM_PROT_WRITE);
224 if (KERN_SUCCESS != kr) {
225 MakeErrMsg(ErrMsg, "vm_protect RW failed");
226 return MemoryBlock();
232 result.Size = NumPages*PageSize;
237 bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
238 if (M.Address == 0 || M.Size == 0) return false;
239 if (0 != ::munmap(M.Address, M.Size))
240 return MakeErrMsg(ErrMsg, "Can't release RWX Memory");
244 bool Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) {
245 #if defined(__APPLE__) && defined(__arm__)
246 if (M.Address == 0 || M.Size == 0) return false;
247 Memory::InvalidateInstructionCache(M.Address, M.Size);
248 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
249 (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_WRITE);
250 return KERN_SUCCESS == kr;
256 bool Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) {
257 #if defined(__APPLE__) && defined(__arm__)
258 if (M.Address == 0 || M.Size == 0) return false;
259 Memory::InvalidateInstructionCache(M.Address, M.Size);
260 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
261 (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
262 return KERN_SUCCESS == kr;
268 bool Memory::setRangeWritable(const void *Addr, size_t Size) {
269 #if defined(__APPLE__) && defined(__arm__)
270 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
272 VM_PROT_READ | VM_PROT_WRITE);
273 return KERN_SUCCESS == kr;
279 bool Memory::setRangeExecutable(const void *Addr, size_t Size) {
280 #if defined(__APPLE__) && defined(__arm__)
281 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
283 VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
284 return KERN_SUCCESS == kr;
290 /// InvalidateInstructionCache - Before the JIT can run a block of code
291 /// that has been emitted it must invalidate the instruction cache on some
293 void Memory::InvalidateInstructionCache(const void *Addr,
296 // icache invalidation for PPC and ARM.
297 #if defined(__APPLE__)
299 # if (defined(__POWERPC__) || defined (__ppc__) || \
300 defined(_POWER) || defined(_ARCH_PPC)) || defined(__arm__)
301 sys_icache_invalidate(const_cast<void *>(Addr), Len);
306 # if (defined(__POWERPC__) || defined (__ppc__) || \
307 defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__)
308 const size_t LineSize = 32;
310 const intptr_t Mask = ~(LineSize - 1);
311 const intptr_t StartLine = ((intptr_t) Addr) & Mask;
312 const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask;
314 for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
315 asm volatile("dcbf 0, %0" : : "r"(Line));
316 asm volatile("sync");
318 for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
319 asm volatile("icbi 0, %0" : : "r"(Line));
320 asm volatile("isync");
321 # elif defined(__arm__) && defined(__GNUC__)
322 // FIXME: Can we safely always call this for __GNUC__ everywhere?
323 const char *Start = static_cast<const char *>(Addr);
324 const char *End = Start + Len;
325 __clear_cache(const_cast<char *>(Start), const_cast<char *>(End));
326 # elif defined(__mips__)
327 const char *Start = static_cast<const char *>(Addr);
328 cacheflush(const_cast<char *>(Start), Len, BCACHE);
333 ValgrindDiscardTranslations(Addr, Len);