#define LLVM_SYSTEM_MEMORY_H
#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/system_error.h"
#include <string>
namespace llvm {
/// @brief An abstraction for memory operations.
class Memory {
public:
+ enum ProtectionFlags {
+ MF_READ = 0x1000000,
+ MF_WRITE = 0x2000000,
+ MF_EXEC = 0x4000000
+ };
+
+ /// This method allocates a block of memory that is suitable for loading
+ /// dynamically generated code (e.g. JIT). An attempt to allocate
+ /// \p NumBytes bytes of virtual memory is made.
+ /// \p NearBlock may point to an existing allocation in which case
+ /// an attempt is made to allocate more memory near the existing block.
+ /// The actual allocated address is not guaranteed to be near the requested
+ /// address.
+ /// \p Flags is used to set the initial protection flags for the block
+ /// of the memory.
+ /// \p EC [out] returns an object describing any error that occurs.
+ ///
+ /// This method may allocate more than the number of bytes requested. The
+ /// actual number of bytes allocated is indicated in the returned
+ /// MemoryBlock.
+ ///
+ /// The start of the allocated block must be aligned with the
+ /// system allocation granularity (64K on Windows, page size on Linux).
+ /// If the address following \p NearBlock is not so aligned, it will be
+ /// rounded up to the next allocation granularity boundary.
+ ///
+ /// \r a non-null MemoryBlock if the function was successful,
+ /// otherwise a null MemoryBlock is with \p EC describing the error.
+ ///
+ /// @brief Allocate mapped memory.
+ static MemoryBlock allocateMappedMemory(size_t NumBytes,
+ const MemoryBlock *const NearBlock,
+ unsigned Flags,
+ error_code &EC);
+
+ /// This method releases a block of memory that was allocated with the
+ /// allocateMappedMemory method. It should not be used to release any
+ /// memory block allocated any other way.
+ /// \p Block describes the memory to be released.
+ ///
+ /// \r error_success if the function was successful, or an error_code
+ /// describing the failure if an error occurred.
+ ///
+ /// @brief Release mapped memory.
+ static error_code releaseMappedMemory(MemoryBlock &Block);
+
+ /// This method sets the protection flags for a block of memory to the
+ /// state specified by /p Flags. The behavior is not specified if the
+ /// memory was not allocated using the allocateMappedMemory method.
+ /// \p Block describes the memory block to be protected.
+ /// \p Flags specifies the new protection state to be assigned to the block.
+ /// \p ErrMsg [out] returns a string describing any error that occured.
+ ///
+ /// If \p Flags is MF_WRITE, the actual behavior varies
+ /// with the operating system (i.e. MF_READWRITE on Windows) and the
+ /// target architecture (i.e. MF_WRITE -> MF_READWRITE on i386).
+ ///
+ /// \r error_success if the function was successful, or an error_code
+ /// describing the failure if an error occurred.
+ ///
+ /// @brief Set memory protection state.
+ static error_code protectMappedMemory(const MemoryBlock &Block,
+ unsigned Flags);
+
/// This method allocates a block of Read/Write/Execute memory that is
/// suitable for executing dynamically generated code (e.g. JIT). An
/// attempt to allocate \p NumBytes bytes of virtual memory is made.
#include "llvm/Support/Valgrind.h"
#include "llvm/Config/config.h"
-namespace llvm {
-using namespace sys;
-}
-
// Include the platform-specific parts of this class.
#ifdef LLVM_ON_UNIX
#include "Unix/Memory.inc"
#ifdef LLVM_ON_WIN32
#include "Windows/Memory.inc"
#endif
-
-extern "C" void sys_icache_invalidate(const void *Addr, size_t len);
-
-/// InvalidateInstructionCache - Before the JIT can run a block of code
-/// that has been emitted it must invalidate the instruction cache on some
-/// platforms.
-void llvm::sys::Memory::InvalidateInstructionCache(const void *Addr,
- size_t Len) {
-
-// icache invalidation for PPC and ARM.
-#if defined(__APPLE__)
-
-# if (defined(__POWERPC__) || defined (__ppc__) || \
- defined(_POWER) || defined(_ARCH_PPC)) || defined(__arm__)
- sys_icache_invalidate(const_cast<void *>(Addr), Len);
-# endif
-
-#else
-
-# if (defined(__POWERPC__) || defined (__ppc__) || \
- defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__)
- const size_t LineSize = 32;
-
- const intptr_t Mask = ~(LineSize - 1);
- const intptr_t StartLine = ((intptr_t) Addr) & Mask;
- const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask;
-
- for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
- asm volatile("dcbf 0, %0" : : "r"(Line));
- asm volatile("sync");
-
- for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
- asm volatile("icbi 0, %0" : : "r"(Line));
- asm volatile("isync");
-# elif defined(__arm__) && defined(__GNUC__)
- // FIXME: Can we safely always call this for __GNUC__ everywhere?
- const char *Start = static_cast<const char *>(Addr);
- const char *End = Start + Len;
- __clear_cache(const_cast<char *>(Start), const_cast<char *>(End));
-# elif defined(__mips__)
- const char *Start = static_cast<const char *>(Addr);
- cacheflush(const_cast<char *>(Start), Len, BCACHE);
-# endif
-
-#endif // end apple
-
- ValgrindDiscardTranslations(Addr, Len);
-}
#include "Unix.h"
#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Process.h"
#ifdef HAVE_SYS_MMAN_H
# endif
#endif
+extern "C" void sys_icache_invalidate(const void *Addr, size_t len);
+
+namespace {
+
+int getPosixProtectionFlags(unsigned Flags) {
+ switch (Flags) {
+ case llvm::sys::Memory::MF_READ:
+ return PROT_READ;
+ case llvm::sys::Memory::MF_WRITE:
+ return PROT_WRITE;
+ case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE:
+ return PROT_READ | PROT_WRITE;
+ case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC:
+ return PROT_READ | PROT_EXEC;
+ case llvm::sys::Memory::MF_READ |
+ llvm::sys::Memory::MF_WRITE |
+ llvm::sys::Memory::MF_EXEC:
+ return PROT_READ | PROT_WRITE | PROT_EXEC;
+ case llvm::sys::Memory::MF_EXEC:
+ return PROT_EXEC;
+ default:
+ llvm_unreachable("Illegal memory protection flag specified!");
+ }
+ // Provide a default return value as required by some compilers.
+ return PROT_NONE;
+}
+
+} // namespace
+
+namespace llvm {
+namespace sys {
+
+MemoryBlock
+Memory::allocateMappedMemory(size_t NumBytes,
+ const MemoryBlock *const NearBlock,
+ unsigned PFlags,
+ error_code &EC) {
+ EC = error_code::success();
+ if (NumBytes == 0)
+ return MemoryBlock();
+
+ static const size_t PageSize = Process::GetPageSize();
+ const size_t NumPages = (NumBytes+PageSize-1)/PageSize;
+
+ int fd = -1;
+#ifdef NEED_DEV_ZERO_FOR_MMAP
+ static int zero_fd = open("/dev/zero", O_RDWR);
+ if (zero_fd == -1) {
+ EC = error_code(errno, system_category());
+ return MemoryBlock();
+ }
+ fd = zero_fd;
+#endif
+
+ int MMFlags = MAP_PRIVATE |
+#ifdef HAVE_MMAP_ANONYMOUS
+ MAP_ANONYMOUS
+#else
+ MAP_ANON
+#endif
+ ; // Ends statement above
+
+ int Protect = getPosixProtectionFlags(PFlags);
+
+ // Use any near hint and the page size to set a page-aligned starting address
+ uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) +
+ NearBlock->size() : 0;
+ if (Start && Start % PageSize)
+ Start += PageSize - Start % PageSize;
+
+ void *Addr = ::mmap(reinterpret_cast<void*>(Start), PageSize*NumPages,
+ Protect, MMFlags, fd, 0);
+ if (Addr == MAP_FAILED) {
+ if (NearBlock) //Try again without a near hint
+ return allocateMappedMemory(NumBytes, 0, PFlags, EC);
+
+ EC = error_code(errno, system_category());
+ return MemoryBlock();
+ }
+
+ MemoryBlock Result;
+ Result.Address = Addr;
+ Result.Size = NumPages*PageSize;
+
+ if (PFlags & MF_EXEC)
+ Memory::InvalidateInstructionCache(Result.Address, Result.Size);
+
+ return Result;
+}
+
+error_code
+Memory::releaseMappedMemory(MemoryBlock &M) {
+ if (M.Address == 0 || M.Size == 0)
+ return error_code::success();
+
+ if (0 != ::munmap(M.Address, M.Size))
+ return error_code(errno, system_category());
+
+ M.Address = 0;
+ M.Size = 0;
+
+ return error_code::success();
+}
+
+error_code
+Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) {
+ if (M.Address == 0 || M.Size == 0)
+ return error_code::success();
+
+ if (!Flags)
+ return error_code(EINVAL, generic_category());
+
+ int Protect = getPosixProtectionFlags(Flags);
+
+ int Result = ::mprotect(M.Address, M.Size, Protect);
+ if (Result != 0)
+ return error_code(errno, system_category());
+
+ if (Flags & MF_EXEC)
+ Memory::InvalidateInstructionCache(M.Address, M.Size);
+
+ return error_code::success();
+}
+
/// AllocateRWX - Allocate a slab of memory with read/write/execute
/// permissions. This is typically used for JIT applications where we want
/// to emit code to the memory then jump to it. Getting this type of memory
/// is very OS specific.
///
-llvm::sys::MemoryBlock
-llvm::sys::Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
- std::string *ErrMsg) {
+MemoryBlock
+Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
+ std::string *ErrMsg) {
if (NumBytes == 0) return MemoryBlock();
size_t pageSize = Process::GetPageSize();
VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
if (KERN_SUCCESS != kr) {
MakeErrMsg(ErrMsg, "vm_protect max RX failed");
- return sys::MemoryBlock();
+ return MemoryBlock();
}
kr = vm_protect(mach_task_self(), (vm_address_t)pa,
VM_PROT_READ | VM_PROT_WRITE);
if (KERN_SUCCESS != kr) {
MakeErrMsg(ErrMsg, "vm_protect RW failed");
- return sys::MemoryBlock();
+ return MemoryBlock();
}
#endif
return result;
}
-bool llvm::sys::Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
+bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
if (M.Address == 0 || M.Size == 0) return false;
if (0 != ::munmap(M.Address, M.Size))
return MakeErrMsg(ErrMsg, "Can't release RWX Memory");
return false;
}
-bool llvm::sys::Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) {
+bool Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) {
#if defined(__APPLE__) && defined(__arm__)
if (M.Address == 0 || M.Size == 0) return false;
- sys::Memory::InvalidateInstructionCache(M.Address, M.Size);
+ Memory::InvalidateInstructionCache(M.Address, M.Size);
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
(vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_WRITE);
return KERN_SUCCESS == kr;
#endif
}
-bool llvm::sys::Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) {
+bool Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) {
#if defined(__APPLE__) && defined(__arm__)
if (M.Address == 0 || M.Size == 0) return false;
- sys::Memory::InvalidateInstructionCache(M.Address, M.Size);
+ Memory::InvalidateInstructionCache(M.Address, M.Size);
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
(vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
return KERN_SUCCESS == kr;
#endif
}
-bool llvm::sys::Memory::setRangeWritable(const void *Addr, size_t Size) {
+bool Memory::setRangeWritable(const void *Addr, size_t Size) {
#if defined(__APPLE__) && defined(__arm__)
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
(vm_size_t)Size, 0,
#endif
}
-bool llvm::sys::Memory::setRangeExecutable(const void *Addr, size_t Size) {
+bool Memory::setRangeExecutable(const void *Addr, size_t Size) {
#if defined(__APPLE__) && defined(__arm__)
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
(vm_size_t)Size, 0,
return true;
#endif
}
+
+/// InvalidateInstructionCache - Before the JIT can run a block of code
+/// that has been emitted it must invalidate the instruction cache on some
+/// platforms.
+void Memory::InvalidateInstructionCache(const void *Addr,
+ size_t Len) {
+
+// icache invalidation for PPC and ARM.
+#if defined(__APPLE__)
+
+# if (defined(__POWERPC__) || defined (__ppc__) || \
+ defined(_POWER) || defined(_ARCH_PPC)) || defined(__arm__)
+ sys_icache_invalidate(const_cast<void *>(Addr), Len);
+# endif
+
+#else
+
+# if (defined(__POWERPC__) || defined (__ppc__) || \
+ defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__)
+ const size_t LineSize = 32;
+
+ const intptr_t Mask = ~(LineSize - 1);
+ const intptr_t StartLine = ((intptr_t) Addr) & Mask;
+ const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask;
+
+ for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
+ asm volatile("dcbf 0, %0" : : "r"(Line));
+ asm volatile("sync");
+
+ for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
+ asm volatile("icbi 0, %0" : : "r"(Line));
+ asm volatile("isync");
+# elif defined(__arm__) && defined(__GNUC__)
+ // FIXME: Can we safely always call this for __GNUC__ everywhere?
+ const char *Start = static_cast<const char *>(Addr);
+ const char *End = Start + Len;
+ __clear_cache(const_cast<char *>(Start), const_cast<char *>(End));
+# elif defined(__mips__)
+ const char *Start = static_cast<const char *>(Addr);
+ cacheflush(const_cast<char *>(Start), Len, BCACHE);
+# endif
+
+#endif // end apple
+
+ ValgrindDiscardTranslations(Addr, Len);
+}
+
+} // namespace sys
+} // namespace llvm
//
//===----------------------------------------------------------------------===//
-#include "Windows.h"
#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Process.h"
+#include "Windows.h"
+
+namespace {
+
+DWORD getWindowsProtectionFlags(unsigned Flags) {
+ switch (Flags) {
+ // Contrary to what you might expect, the Windows page protection flags
+ // are not a bitwise combination of RWX values
+ case llvm::sys::Memory::MF_READ:
+ return PAGE_READONLY;
+ case llvm::sys::Memory::MF_WRITE:
+ // Note: PAGE_WRITE is not supported by VirtualProtect
+ return PAGE_READWRITE;
+ case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE:
+ return PAGE_READWRITE;
+ case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC:
+ return PAGE_EXECUTE_READ;
+ case llvm::sys::Memory::MF_READ |
+ llvm::sys::Memory::MF_WRITE |
+ llvm::sys::Memory::MF_EXEC:
+ return PAGE_EXECUTE_READWRITE;
+ case llvm::sys::Memory::MF_EXEC:
+ return PAGE_EXECUTE;
+ default:
+ llvm_unreachable("Illegal memory protection flag specified!");
+ }
+ // Provide a default return value as required by some compilers.
+ return PAGE_NOACCESS;
+}
+
+size_t getAllocationGranularity() {
+ SYSTEM_INFO Info;
+ ::GetSystemInfo(&Info);
+ if (Info.dwPageSize > Info.dwAllocationGranularity)
+ return Info.dwPageSize;
+ else
+ return Info.dwAllocationGranularity;
+}
+
+} // namespace
namespace llvm {
-using namespace sys;
+namespace sys {
//===----------------------------------------------------------------------===//
//=== WARNING: Implementation here must contain only Win32 specific code
//=== and must not be UNIX code
//===----------------------------------------------------------------------===//
-MemoryBlock Memory::AllocateRWX(size_t NumBytes,
- const MemoryBlock *NearBlock,
- std::string *ErrMsg) {
- if (NumBytes == 0) return MemoryBlock();
+MemoryBlock Memory::allocateMappedMemory(size_t NumBytes,
+ const MemoryBlock *const NearBlock,
+ unsigned Flags,
+ error_code &EC) {
+ EC = error_code::success();
+ if (NumBytes == 0)
+ return MemoryBlock();
+
+ // While we'd be happy to allocate single pages, the Windows allocation
+ // granularity may be larger than a single page (in practice, it is 64K)
+ // so mapping less than that will create an unreachable fragment of memory.
+ static const size_t Granularity = getAllocationGranularity();
+ const size_t NumBlocks = (NumBytes+Granularity-1)/Granularity;
- static const size_t pageSize = Process::GetPageSize();
- size_t NumPages = (NumBytes+pageSize-1)/pageSize;
+ uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) +
+ NearBlock->size()
+ : NULL;
- PVOID start = NearBlock ? static_cast<unsigned char *>(NearBlock->base()) +
- NearBlock->size() : NULL;
+ // If the requested address is not aligned to the allocation granularity,
+ // round up to get beyond NearBlock. VirtualAlloc would have rounded down.
+ if (Start && Start % Granularity != 0)
+ Start += Granularity - Start % Granularity;
- void *pa = VirtualAlloc(start, NumPages*pageSize, MEM_RESERVE | MEM_COMMIT,
- PAGE_EXECUTE_READWRITE);
- if (pa == NULL) {
+ DWORD Protect = getWindowsProtectionFlags(Flags);
+
+ void *PA = ::VirtualAlloc(reinterpret_cast<void*>(Start),
+ NumBlocks*Granularity,
+ MEM_RESERVE | MEM_COMMIT, Protect);
+ if (PA == NULL) {
if (NearBlock) {
// Try again without the NearBlock hint
- return AllocateRWX(NumBytes, NULL, ErrMsg);
+ return allocateMappedMemory(NumBytes, NULL, Flags, EC);
}
- MakeErrMsg(ErrMsg, "Can't allocate RWX Memory: ");
+ EC = error_code(::GetLastError(), system_category());
return MemoryBlock();
}
- MemoryBlock result;
- result.Address = pa;
- result.Size = NumPages*pageSize;
- return result;
+ MemoryBlock Result;
+ Result.Address = PA;
+ Result.Size = NumBlocks*Granularity;
+ ;
+ if (Flags & MF_EXEC)
+ Memory::InvalidateInstructionCache(Result.Address, Result.Size);
+
+ return Result;
}
-bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
- if (M.Address == 0 || M.Size == 0) return false;
+error_code Memory::releaseMappedMemory(MemoryBlock &M) {
+ if (M.Address == 0 || M.Size == 0)
+ return error_code::success();
+
if (!VirtualFree(M.Address, 0, MEM_RELEASE))
- return MakeErrMsg(ErrMsg, "Can't release RWX Memory: ");
- return false;
+ return error_code(::GetLastError(), system_category());
+
+ M.Address = 0;
+ M.Size = 0;
+
+ return error_code::success();
+}
+
+error_code Memory::protectMappedMemory(const MemoryBlock &M,
+ unsigned Flags) {
+ if (M.Address == 0 || M.Size == 0)
+ return error_code::success();
+
+ DWORD Protect = getWindowsProtectionFlags(Flags);
+
+ DWORD OldFlags;
+ if (!VirtualProtect(M.Address, M.Size, Protect, &OldFlags))
+ return error_code(::GetLastError(), system_category());
+
+ if (Flags & MF_EXEC)
+ Memory::InvalidateInstructionCache(M.Address, M.Size);
+
+ return error_code::success();
+}
+
+/// InvalidateInstructionCache - Before the JIT can run a block of code
+/// that has been emitted it must invalidate the instruction cache on some
+/// platforms.
+void Memory::InvalidateInstructionCache(
+ const void *Addr, size_t Len) {
+ FlushInstructionCache(GetCurrentProcess(), Addr, Len);
+}
+
+
+MemoryBlock Memory::AllocateRWX(size_t NumBytes,
+ const MemoryBlock *NearBlock,
+ std::string *ErrMsg) {
+ MemoryBlock MB;
+ error_code EC;
+ MB = allocateMappedMemory(NumBytes, NearBlock,
+ MF_READ|MF_WRITE|MF_EXEC, EC);
+ if (EC != error_code::success() && ErrMsg) {
+ MakeErrMsg(ErrMsg, EC.message());
+ }
+ return MB;
+}
+
+bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
+ error_code EC = releaseMappedMemory(M);
+ if (EC == error_code::success())
+ return false;
+ MakeErrMsg(ErrMsg, EC.message());
+ return true;
}
static DWORD getProtection(const void *addr) {
}
DWORD oldProt;
- sys::Memory::InvalidateInstructionCache(Addr, Size);
+ Memory::InvalidateInstructionCache(Addr, Size);
return ::VirtualProtect(const_cast<LPVOID>(Addr), Size, prot, &oldProt)
== TRUE;
}
}
DWORD oldProt;
- sys::Memory::InvalidateInstructionCache(Addr, Size);
+ Memory::InvalidateInstructionCache(Addr, Size);
return ::VirtualProtect(const_cast<LPVOID>(Addr), Size, prot, &oldProt)
== TRUE;
}
-}
+} // namespace sys
+} // namespace llvm
LeakDetectorTest.cpp
ManagedStatic.cpp
MathExtrasTest.cpp
+ MemoryTest.cpp
Path.cpp
RegexTest.cpp
SwapByteOrderTest.cpp
--- /dev/null
+//===- llvm/unittest/Support/AllocatorTest.cpp - BumpPtrAllocator tests ---===//\r
+//\r
+// The LLVM Compiler Infrastructure\r
+//\r
+// This file is distributed under the University of Illinois Open Source\r
+// License. See LICENSE.TXT for details.\r
+//\r
+//===----------------------------------------------------------------------===//\r
+\r
+#include "llvm/Support/Memory.h"\r
+#include "llvm/Support/Process.h"\r
+\r
+#include "gtest/gtest.h"\r
+#include <cstdlib>\r
+\r
+using namespace llvm;\r
+using namespace sys;\r
+\r
+namespace {\r
+\r
+class MappedMemoryTest : public ::testing::TestWithParam<unsigned> {\r
+public:\r
+ MappedMemoryTest() {\r
+ Flags = GetParam();\r
+ PageSize = sys::Process::GetPageSize();\r
+ }\r
+\r
+protected:\r
+ // Adds RW flags to permit testing of the resulting memory\r
+ unsigned getTestableEquivalent(unsigned RequestedFlags) {\r
+ switch (RequestedFlags) {\r
+ case Memory::MF_READ:\r
+ case Memory::MF_WRITE:\r
+ case Memory::MF_READ|Memory::MF_WRITE:\r
+ return Memory::MF_READ|Memory::MF_WRITE;\r
+ case Memory::MF_READ|Memory::MF_EXEC:\r
+ case Memory::MF_READ|Memory::MF_WRITE|Memory::MF_EXEC:\r
+ case Memory::MF_EXEC:\r
+ return Memory::MF_READ|Memory::MF_WRITE|Memory::MF_EXEC;\r
+ }\r
+ // Default in case values are added to the enum, as required by some compilers\r
+ return Memory::MF_READ|Memory::MF_WRITE;\r
+ }\r
+\r
+ // Returns true if the memory blocks overlap\r
+ bool doesOverlap(MemoryBlock M1, MemoryBlock M2) {\r
+ if (M1.base() == M2.base())\r
+ return true;\r
+\r
+ if (M1.base() > M2.base())\r
+ return (unsigned char *)M2.base() + M2.size() > M1.base();\r
+\r
+ return (unsigned char *)M1.base() + M1.size() > M2.base();\r
+ }\r
+\r
+ unsigned Flags;\r
+ size_t PageSize;\r
+};\r
+\r
+TEST_P(MappedMemoryTest, AllocAndRelease) {\r
+ error_code EC;\r
+ MemoryBlock M1 = Memory::allocateMappedMemory(sizeof(int), 0, Flags, EC);\r
+ EXPECT_EQ(error_code::success(), EC);\r
+\r
+ EXPECT_NE((void*)0, M1.base());\r
+ EXPECT_LE(sizeof(int), M1.size());\r
+\r
+ EXPECT_FALSE(Memory::releaseMappedMemory(M1));\r
+}\r
+\r
+TEST_P(MappedMemoryTest, MultipleAllocAndRelease) {\r
+ error_code EC;\r
+ MemoryBlock M1 = Memory::allocateMappedMemory(16, 0, Flags, EC);\r
+ EXPECT_EQ(error_code::success(), EC);\r
+ MemoryBlock M2 = Memory::allocateMappedMemory(64, 0, Flags, EC);\r
+ EXPECT_EQ(error_code::success(), EC);\r
+ MemoryBlock M3 = Memory::allocateMappedMemory(32, 0, Flags, EC);\r
+ EXPECT_EQ(error_code::success(), EC);\r
+\r
+ EXPECT_NE((void*)0, M1.base());\r
+ EXPECT_LE(16U, M1.size());\r
+ EXPECT_NE((void*)0, M2.base());\r
+ EXPECT_LE(64U, M2.size());\r
+ EXPECT_NE((void*)0, M3.base());\r
+ EXPECT_LE(32U, M3.size());\r
+\r
+ EXPECT_FALSE(doesOverlap(M1, M2));\r
+ EXPECT_FALSE(doesOverlap(M2, M3));\r
+ EXPECT_FALSE(doesOverlap(M1, M3));\r
+\r
+ EXPECT_FALSE(Memory::releaseMappedMemory(M1));\r
+ EXPECT_FALSE(Memory::releaseMappedMemory(M3));\r
+ MemoryBlock M4 = Memory::allocateMappedMemory(16, 0, Flags, EC);\r
+ EXPECT_EQ(error_code::success(), EC);\r
+ EXPECT_NE((void*)0, M4.base());\r
+ EXPECT_LE(16U, M4.size());\r
+ EXPECT_FALSE(Memory::releaseMappedMemory(M4));\r
+ EXPECT_FALSE(Memory::releaseMappedMemory(M2));\r
+}\r
+\r
+TEST_P(MappedMemoryTest, BasicWrite) {\r
+ // This test applies only to writeable combinations\r
+ if (Flags && !(Flags & Memory::MF_WRITE))\r
+ return;\r
+\r
+ error_code EC;\r
+ MemoryBlock M1 = Memory::allocateMappedMemory(sizeof(int), 0, Flags, EC);\r
+ EXPECT_EQ(error_code::success(), EC);\r
+\r
+ EXPECT_NE((void*)0, M1.base());\r
+ EXPECT_LE(sizeof(int), M1.size());\r
+\r
+ int *a = (int*)M1.base();\r
+ *a = 1;\r
+ EXPECT_EQ(1, *a);\r
+\r
+ EXPECT_FALSE(Memory::releaseMappedMemory(M1));\r
+}\r
+\r
+TEST_P(MappedMemoryTest, MultipleWrite) {\r
+ // This test applies only to writeable combinations\r
+ if (Flags && !(Flags & Memory::MF_WRITE))\r
+ return;\r
+ error_code EC;\r
+ MemoryBlock M1 = Memory::allocateMappedMemory(sizeof(int), 0, Flags, EC);\r
+ EXPECT_EQ(error_code::success(), EC);\r
+ MemoryBlock M2 = Memory::allocateMappedMemory(8 * sizeof(int), 0, Flags, EC);\r
+ EXPECT_EQ(error_code::success(), EC);\r
+ MemoryBlock M3 = Memory::allocateMappedMemory(4 * sizeof(int), 0, Flags, EC);\r
+ EXPECT_EQ(error_code::success(), EC);\r
+\r
+ EXPECT_FALSE(doesOverlap(M1, M2));\r
+ EXPECT_FALSE(doesOverlap(M2, M3));\r
+ EXPECT_FALSE(doesOverlap(M1, M3));\r
+\r
+ EXPECT_NE((void*)0, M1.base());\r
+ EXPECT_LE(1U * sizeof(int), M1.size());\r
+ EXPECT_NE((void*)0, M2.base());\r
+ EXPECT_LE(8U * sizeof(int), M2.size());\r
+ EXPECT_NE((void*)0, M3.base());\r
+ EXPECT_LE(4U * sizeof(int), M3.size());\r
+\r
+ int *x = (int*)M1.base();\r
+ *x = 1;\r
+\r
+ int *y = (int*)M2.base();\r
+ for (int i = 0; i < 8; i++) {\r
+ y[i] = i;\r
+ }\r
+\r
+ int *z = (int*)M3.base();\r
+ *z = 42;\r
+\r
+ EXPECT_EQ(1, *x);\r
+ EXPECT_EQ(7, y[7]);\r
+ EXPECT_EQ(42, *z);\r
+\r
+ EXPECT_FALSE(Memory::releaseMappedMemory(M1));\r
+ EXPECT_FALSE(Memory::releaseMappedMemory(M3));\r
+\r
+ MemoryBlock M4 = Memory::allocateMappedMemory(64 * sizeof(int), 0, Flags, EC);\r
+ EXPECT_EQ(error_code::success(), EC);\r
+ EXPECT_NE((void*)0, M4.base());\r
+ EXPECT_LE(64U * sizeof(int), M4.size());\r
+ x = (int*)M4.base();\r
+ *x = 4;\r
+ EXPECT_EQ(4, *x);\r
+ EXPECT_FALSE(Memory::releaseMappedMemory(M4));\r
+\r
+ // Verify that M2 remains unaffected by other activity\r
+ for (int i = 0; i < 8; i++) {\r
+ EXPECT_EQ(i, y[i]);\r
+ }\r
+ EXPECT_FALSE(Memory::releaseMappedMemory(M2));\r
+}\r
+\r
+TEST_P(MappedMemoryTest, EnabledWrite) {\r
+ error_code EC;\r
+ MemoryBlock M1 = Memory::allocateMappedMemory(2 * sizeof(int), 0, Flags, EC);\r
+ EXPECT_EQ(error_code::success(), EC);\r
+ MemoryBlock M2 = Memory::allocateMappedMemory(8 * sizeof(int), 0, Flags, EC);\r
+ EXPECT_EQ(error_code::success(), EC);\r
+ MemoryBlock M3 = Memory::allocateMappedMemory(4 * sizeof(int), 0, Flags, EC);\r
+ EXPECT_EQ(error_code::success(), EC);\r
+\r
+ EXPECT_NE((void*)0, M1.base());\r
+ EXPECT_LE(2U * sizeof(int), M1.size());\r
+ EXPECT_NE((void*)0, M2.base());\r
+ EXPECT_LE(8U * sizeof(int), M2.size());\r
+ EXPECT_NE((void*)0, M3.base());\r
+ EXPECT_LE(4U * sizeof(int), M3.size());\r
+\r
+ EXPECT_FALSE(Memory::protectMappedMemory(M1, getTestableEquivalent(Flags)));\r
+ EXPECT_FALSE(Memory::protectMappedMemory(M2, getTestableEquivalent(Flags)));\r
+ EXPECT_FALSE(Memory::protectMappedMemory(M3, getTestableEquivalent(Flags)));\r
+\r
+ EXPECT_FALSE(doesOverlap(M1, M2));\r
+ EXPECT_FALSE(doesOverlap(M2, M3));\r
+ EXPECT_FALSE(doesOverlap(M1, M3));\r
+\r
+ int *x = (int*)M1.base();\r
+ *x = 1;\r
+ int *y = (int*)M2.base();\r
+ for (unsigned int i = 0; i < 8; i++) {\r
+ y[i] = i;\r
+ }\r
+ int *z = (int*)M3.base();\r
+ *z = 42;\r
+\r
+ EXPECT_EQ(1, *x);\r
+ EXPECT_EQ(7, y[7]);\r
+ EXPECT_EQ(42, *z);\r
+\r
+ EXPECT_FALSE(Memory::releaseMappedMemory(M1));\r
+ EXPECT_FALSE(Memory::releaseMappedMemory(M3));\r
+ EXPECT_EQ(6, y[6]);\r
+\r
+ MemoryBlock M4 = Memory::allocateMappedMemory(16, 0, Flags, EC);\r
+ EXPECT_EQ(error_code::success(), EC);\r
+ EXPECT_NE((void*)0, M4.base());\r
+ EXPECT_LE(16U, M4.size());\r
+ EXPECT_EQ(error_code::success(), Memory::protectMappedMemory(M4, getTestableEquivalent(Flags)));\r
+ x = (int*)M4.base();\r
+ *x = 4;\r
+ EXPECT_EQ(4, *x);\r
+ EXPECT_FALSE(Memory::releaseMappedMemory(M4));\r
+ EXPECT_FALSE(Memory::releaseMappedMemory(M2));\r
+}\r
+\r
+TEST_P(MappedMemoryTest, SuccessiveNear) {\r
+ error_code EC;\r
+ MemoryBlock M1 = Memory::allocateMappedMemory(16, 0, Flags, EC);\r
+ EXPECT_EQ(error_code::success(), EC);\r
+ MemoryBlock M2 = Memory::allocateMappedMemory(64, &M1, Flags, EC);\r
+ EXPECT_EQ(error_code::success(), EC);\r
+ MemoryBlock M3 = Memory::allocateMappedMemory(32, &M2, Flags, EC);\r
+ EXPECT_EQ(error_code::success(), EC);\r
+\r
+ EXPECT_NE((void*)0, M1.base());\r
+ EXPECT_LE(16U, M1.size());\r
+ EXPECT_NE((void*)0, M2.base());\r
+ EXPECT_LE(64U, M2.size());\r
+ EXPECT_NE((void*)0, M3.base());\r
+ EXPECT_LE(32U, M3.size());\r
+\r
+ EXPECT_FALSE(doesOverlap(M1, M2));\r
+ EXPECT_FALSE(doesOverlap(M2, M3));\r
+ EXPECT_FALSE(doesOverlap(M1, M3));\r
+\r
+ EXPECT_FALSE(Memory::releaseMappedMemory(M1));\r
+ EXPECT_FALSE(Memory::releaseMappedMemory(M3));\r
+ EXPECT_FALSE(Memory::releaseMappedMemory(M2));\r
+}\r
+\r
+TEST_P(MappedMemoryTest, DuplicateNear) {\r
+ error_code EC;\r
+ MemoryBlock Near((void*)(3*PageSize), 16);\r
+ MemoryBlock M1 = Memory::allocateMappedMemory(16, &Near, Flags, EC);\r
+ EXPECT_EQ(error_code::success(), EC);\r
+ MemoryBlock M2 = Memory::allocateMappedMemory(64, &Near, Flags, EC);\r
+ EXPECT_EQ(error_code::success(), EC);\r
+ MemoryBlock M3 = Memory::allocateMappedMemory(32, &Near, Flags, EC);\r
+ EXPECT_EQ(error_code::success(), EC);\r
+\r
+ EXPECT_NE((void*)0, M1.base());\r
+ EXPECT_LE(16U, M1.size());\r
+ EXPECT_NE((void*)0, M2.base());\r
+ EXPECT_LE(64U, M2.size());\r
+ EXPECT_NE((void*)0, M3.base());\r
+ EXPECT_LE(32U, M3.size());\r
+\r
+ EXPECT_FALSE(Memory::releaseMappedMemory(M1));\r
+ EXPECT_FALSE(Memory::releaseMappedMemory(M3));\r
+ EXPECT_FALSE(Memory::releaseMappedMemory(M2));\r
+}\r
+\r
+TEST_P(MappedMemoryTest, ZeroNear) {\r
+ error_code EC;\r
+ MemoryBlock Near(0, 0);\r
+ MemoryBlock M1 = Memory::allocateMappedMemory(16, &Near, Flags, EC);\r
+ EXPECT_EQ(error_code::success(), EC);\r
+ MemoryBlock M2 = Memory::allocateMappedMemory(64, &Near, Flags, EC);\r
+ EXPECT_EQ(error_code::success(), EC);\r
+ MemoryBlock M3 = Memory::allocateMappedMemory(32, &Near, Flags, EC);\r
+ EXPECT_EQ(error_code::success(), EC);\r
+\r
+ EXPECT_NE((void*)0, M1.base());\r
+ EXPECT_LE(16U, M1.size());\r
+ EXPECT_NE((void*)0, M2.base());\r
+ EXPECT_LE(64U, M2.size());\r
+ EXPECT_NE((void*)0, M3.base());\r
+ EXPECT_LE(32U, M3.size());\r
+\r
+ EXPECT_FALSE(doesOverlap(M1, M2));\r
+ EXPECT_FALSE(doesOverlap(M2, M3));\r
+ EXPECT_FALSE(doesOverlap(M1, M3));\r
+\r
+ EXPECT_FALSE(Memory::releaseMappedMemory(M1));\r
+ EXPECT_FALSE(Memory::releaseMappedMemory(M3));\r
+ EXPECT_FALSE(Memory::releaseMappedMemory(M2));\r
+}\r
+\r
+TEST_P(MappedMemoryTest, ZeroSizeNear) {\r
+ error_code EC;\r
+ MemoryBlock Near((void*)(4*PageSize), 0);\r
+ MemoryBlock M1 = Memory::allocateMappedMemory(16, &Near, Flags, EC);\r
+ EXPECT_EQ(error_code::success(), EC);\r
+ MemoryBlock M2 = Memory::allocateMappedMemory(64, &Near, Flags, EC);\r
+ EXPECT_EQ(error_code::success(), EC);\r
+ MemoryBlock M3 = Memory::allocateMappedMemory(32, &Near, Flags, EC);\r
+ EXPECT_EQ(error_code::success(), EC);\r
+\r
+ EXPECT_NE((void*)0, M1.base());\r
+ EXPECT_LE(16U, M1.size());\r
+ EXPECT_NE((void*)0, M2.base());\r
+ EXPECT_LE(64U, M2.size());\r
+ EXPECT_NE((void*)0, M3.base());\r
+ EXPECT_LE(32U, M3.size());\r
+\r
+ EXPECT_FALSE(doesOverlap(M1, M2));\r
+ EXPECT_FALSE(doesOverlap(M2, M3));\r
+ EXPECT_FALSE(doesOverlap(M1, M3));\r
+\r
+ EXPECT_FALSE(Memory::releaseMappedMemory(M1));\r
+ EXPECT_FALSE(Memory::releaseMappedMemory(M3));\r
+ EXPECT_FALSE(Memory::releaseMappedMemory(M2));\r
+}\r
+\r
+TEST_P(MappedMemoryTest, UnalignedNear) {\r
+ error_code EC;\r
+ MemoryBlock Near((void*)(2*PageSize+5), 0);\r
+ MemoryBlock M1 = Memory::allocateMappedMemory(15, &Near, Flags, EC);\r
+ EXPECT_EQ(error_code::success(), EC);\r
+\r
+ EXPECT_NE((void*)0, M1.base());\r
+ EXPECT_LE(sizeof(int), M1.size());\r
+\r
+ EXPECT_FALSE(Memory::releaseMappedMemory(M1));\r
+}\r
+\r
+// Note that Memory::MF_WRITE is not supported exclusively across\r
+// operating systems and architectures and can imply MF_READ|MF_WRITE\r
+unsigned MemoryFlags[] = {\r
+ Memory::MF_READ,\r
+ Memory::MF_WRITE,\r
+ Memory::MF_READ|Memory::MF_WRITE,\r
+ Memory::MF_EXEC,\r
+ Memory::MF_READ|Memory::MF_EXEC,\r
+ Memory::MF_READ|Memory::MF_WRITE|Memory::MF_EXEC\r
+ };\r
+\r
+INSTANTIATE_TEST_CASE_P(AllocationTests,\r
+ MappedMemoryTest,\r
+ ::testing::ValuesIn(MemoryFlags));\r
+\r
+} // anonymous namespace\r