X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=blobdiff_plain;f=include%2Fllvm%2FSupport%2FAllocator.h;h=ff08622eee548a76c96da4dab6a4725409bd0111;hp=9e567e567af0ff3f61a5df403ad54ae83bbf1af8;hb=4f240010fdea1fcbe7dac3c8e9ebad082e4036c2;hpb=e20c45d2d8d9c3ddf909babbd1013128685b70d2 diff --git a/include/llvm/Support/Allocator.h b/include/llvm/Support/Allocator.h index 9e567e567af..ff08622eee5 100644 --- a/include/llvm/Support/Allocator.h +++ b/include/llvm/Support/Allocator.h @@ -6,9 +6,16 @@ // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// -// -// This file defines the MallocAllocator and BumpPtrAllocator interfaces. -// +/// \file +/// +/// This file defines the MallocAllocator and BumpPtrAllocator interfaces. Both +/// of these conform to an LLVM "Allocator" concept which consists of an +/// Allocate method accepting a size and alignment, and a Deallocate accepting +/// a pointer and size. Further, the LLVM "Allocator" concept has overloads of +/// Allocate and Deallocate for setting size and alignment based on the final +/// type. These overloads are typically provided by a base class template \c +/// AllocatorBase. +/// //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_ALLOCATOR_H @@ -25,62 +32,90 @@ #include namespace llvm { -template struct ReferenceAdder { - typedef T &result; -}; -template struct ReferenceAdder { - typedef T result; -}; -class MallocAllocator { +/// \brief CRTP base class providing obvious overloads for the core \c +/// Allocate() methods of LLVM-style allocators. +/// +/// This base class both documents the full public interface exposed by all +/// LLVM-style allocators, and redirects all of the overloads to a single core +/// set of methods which the derived class must define. +template class AllocatorBase { public: - MallocAllocator() {} - ~MallocAllocator() {} + /// \brief Allocate \a Size bytes of \a Alignment aligned memory. This method + /// must be implemented by \c DerivedT. + void *Allocate(size_t Size, size_t Alignment) { +#ifdef __clang__ + static_assert(static_cast( + &AllocatorBase::Allocate) != + static_cast( + &DerivedT::Allocate), + "Class derives from AllocatorBase without implementing the " + "core Allocate(size_t, size_t) overload!"); +#endif + return static_cast(this)->Allocate(Size, Alignment); + } - void Reset() {} + /// \brief Deallocate \a Ptr to \a Size bytes of memory allocated by this + /// allocator. + void Deallocate(const void *Ptr, size_t Size) { +#ifdef __clang__ + static_assert(static_cast( + &AllocatorBase::Deallocate) != + static_cast( + &DerivedT::Deallocate), + "Class derives from AllocatorBase without implementing the " + "core Deallocate(void *) overload!"); +#endif + return static_cast(this)->Deallocate(Ptr, Size); + } - void *Allocate(size_t Size, size_t /*Alignment*/) { return malloc(Size); } + // The rest of these methods are helpers that redirect to one of the above + // core methods. - template T *Allocate() { - return static_cast(malloc(sizeof(T))); + /// \brief Allocate space for a sequence of objects without constructing them. + template T *Allocate(size_t Num = 1) { + return static_cast(Allocate(Num * sizeof(T), AlignOf::Alignment)); } - template T *Allocate(size_t Num) { - return static_cast(malloc(sizeof(T) * Num)); + /// \brief Deallocate space for a sequence of objects without constructing them. + template + typename std::enable_if< + !std::is_same::type, void>::value, void>::type + Deallocate(T *Ptr, size_t Num = 1) { + Deallocate(static_cast(Ptr), Num * sizeof(T)); } - - void Deallocate(const void *Ptr) { free(const_cast(Ptr)); } - - void PrintStats() const {} }; -/// SlabAllocator - This class can be used to parameterize the underlying -/// allocation strategy for the bump allocator. In particular, this is used -/// by the JIT to allocate contiguous swathes of executable memory. The -/// interface uses MemSlab's instead of void *'s so that the allocator -/// doesn't have to remember the size of the pointer it allocated. -class SlabAllocator { +class MallocAllocator : public AllocatorBase { public: - virtual ~SlabAllocator(); - virtual void *Allocate(size_t Size) = 0; - virtual void Deallocate(void *Slab, size_t Size) = 0; -}; + void Reset() {} -/// MallocSlabAllocator - The default slab allocator for the bump allocator -/// is an adapter class for MallocAllocator that just forwards the method -/// calls and translates the arguments. -class MallocSlabAllocator : public SlabAllocator { - /// Allocator - The underlying allocator that we forward to. - /// - MallocAllocator Allocator; + LLVM_ATTRIBUTE_RETURNS_NONNULL void *Allocate(size_t Size, + size_t /*Alignment*/) { + return malloc(Size); + } -public: - MallocSlabAllocator() : Allocator() {} - virtual ~MallocSlabAllocator(); - void *Allocate(size_t Size) override; - void Deallocate(void *Slab, size_t Size) override; + // Pull in base class overloads. + using AllocatorBase::Allocate; + + void Deallocate(const void *Ptr, size_t /*Size*/) { + free(const_cast(Ptr)); + } + + // Pull in base class overloads. + using AllocatorBase::Deallocate; + + void PrintStats() const {} }; +namespace detail { + +// We call out to an external function to actually print the message as the +// printing code uses Allocator.h in its implementation. +void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated, + size_t TotalMemory); +} // End namespace detail. + /// \brief Allocate memory in an ever growing pool, as if by bump-pointer. /// /// This isn't strictly a bump-pointer allocator as it uses backing slabs of @@ -91,11 +126,15 @@ public: /// /// Note that this also has a threshold for forcing allocations above a certain /// size into their own slab. -template -class BumpPtrAllocatorImpl { - BumpPtrAllocatorImpl(const BumpPtrAllocatorImpl &) LLVM_DELETED_FUNCTION; - void operator=(const BumpPtrAllocatorImpl &) LLVM_DELETED_FUNCTION; - +/// +/// The BumpPtrAllocatorImpl template defaults to using a MallocAllocator +/// object, which wraps malloc, to allocate memory, but it can be changed to +/// use a custom allocator. +template +class BumpPtrAllocatorImpl + : public AllocatorBase< + BumpPtrAllocatorImpl> { public: static_assert(SizeThreshold <= SlabSize, "The SizeThreshold must be at most the SlabSize to ensure " @@ -103,16 +142,48 @@ public: "allocation."); BumpPtrAllocatorImpl() + : CurPtr(nullptr), End(nullptr), BytesAllocated(0), Allocator() {} + template + BumpPtrAllocatorImpl(T &&Allocator) : CurPtr(nullptr), End(nullptr), BytesAllocated(0), - Allocator(DefaultSlabAllocator) {} - BumpPtrAllocatorImpl(SlabAllocator &Allocator) - : CurPtr(nullptr), End(nullptr), BytesAllocated(0), Allocator(Allocator) { + Allocator(std::forward(Allocator)) {} + + // Manually implement a move constructor as we must clear the old allocators + // slabs as a matter of correctness. + BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old) + : CurPtr(Old.CurPtr), End(Old.End), Slabs(std::move(Old.Slabs)), + CustomSizedSlabs(std::move(Old.CustomSizedSlabs)), + BytesAllocated(Old.BytesAllocated), + Allocator(std::move(Old.Allocator)) { + Old.CurPtr = Old.End = nullptr; + Old.BytesAllocated = 0; + Old.Slabs.clear(); + Old.CustomSizedSlabs.clear(); } + ~BumpPtrAllocatorImpl() { DeallocateSlabs(Slabs.begin(), Slabs.end()); DeallocateCustomSizedSlabs(); } + BumpPtrAllocatorImpl &operator=(BumpPtrAllocatorImpl &&RHS) { + DeallocateSlabs(Slabs.begin(), Slabs.end()); + DeallocateCustomSizedSlabs(); + + CurPtr = RHS.CurPtr; + End = RHS.End; + BytesAllocated = RHS.BytesAllocated; + Slabs = std::move(RHS.Slabs); + CustomSizedSlabs = std::move(RHS.CustomSizedSlabs); + Allocator = std::move(RHS.Allocator); + + RHS.CurPtr = RHS.End = nullptr; + RHS.BytesAllocated = 0; + RHS.Slabs.clear(); + RHS.CustomSizedSlabs.clear(); + return *this; + } + /// \brief Deallocate all but the current slab and reset the current pointer /// to the beginning of it, freeing all memory allocated so far. void Reset() { @@ -132,70 +203,57 @@ public: } /// \brief Allocate space at the specified alignment. - void *Allocate(size_t Size, size_t Alignment) { - if (!CurPtr) // Start a new slab if we haven't allocated one already. - StartNewSlab(); + LLVM_ATTRIBUTE_RETURNS_NONNULL void *Allocate(size_t Size, size_t Alignment) { + assert(Alignment > 0 && "0-byte alignnment is not allowed. Use 1 instead."); // Keep track of how many bytes we've allocated. BytesAllocated += Size; - // 0-byte alignment means 1-byte alignment. - if (Alignment == 0) - Alignment = 1; + size_t Adjustment = alignmentAdjustment(CurPtr, Alignment); + assert(Adjustment + Size >= Size && "Adjustment + Size must not overflow"); - // Allocate the aligned space, going forwards from CurPtr. - char *Ptr = alignPtr(CurPtr, Alignment); - - // Check if we can hold it. - if (Ptr + Size <= End) { - CurPtr = Ptr + Size; + // Check if we have enough space. + if (Adjustment + Size <= size_t(End - CurPtr)) { + char *AlignedPtr = CurPtr + Adjustment; + CurPtr = AlignedPtr + Size; // Update the allocation point of this memory block in MemorySanitizer. // Without this, MemorySanitizer messages for values originated from here // will point to the allocation of the entire slab. - __msan_allocated_memory(Ptr, Size); - return Ptr; + __msan_allocated_memory(AlignedPtr, Size); + return AlignedPtr; } // If Size is really big, allocate a separate slab for it. size_t PaddedSize = Size + Alignment - 1; if (PaddedSize > SizeThreshold) { - void *NewSlab = Allocator.Allocate(PaddedSize); + void *NewSlab = Allocator.Allocate(PaddedSize, 0); CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize)); - Ptr = alignPtr((char *)NewSlab, Alignment); - assert((uintptr_t)Ptr + Size <= (uintptr_t)NewSlab + PaddedSize); - __msan_allocated_memory(Ptr, Size); - return Ptr; + uintptr_t AlignedAddr = alignAddr(NewSlab, Alignment); + assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize); + char *AlignedPtr = (char*)AlignedAddr; + __msan_allocated_memory(AlignedPtr, Size); + return AlignedPtr; } // Otherwise, start a new slab and try again. StartNewSlab(); - Ptr = alignPtr(CurPtr, Alignment); - CurPtr = Ptr + Size; - assert(CurPtr <= End && "Unable to allocate memory!"); - __msan_allocated_memory(Ptr, Size); - return Ptr; + uintptr_t AlignedAddr = alignAddr(CurPtr, Alignment); + assert(AlignedAddr + Size <= (uintptr_t)End && + "Unable to allocate memory!"); + char *AlignedPtr = (char*)AlignedAddr; + CurPtr = AlignedPtr + Size; + __msan_allocated_memory(AlignedPtr, Size); + return AlignedPtr; } - /// \brief Allocate space for one object without constructing it. - template T *Allocate() { - return static_cast(Allocate(sizeof(T), AlignOf::Alignment)); - } + // Pull in base class overloads. + using AllocatorBase::Allocate; - /// \brief Allocate space for an array of objects without constructing them. - template T *Allocate(size_t Num) { - return static_cast(Allocate(Num * sizeof(T), AlignOf::Alignment)); - } - - /// \brief Allocate space for an array of objects with the specified alignment - /// and without constructing them. - template T *Allocate(size_t Num, size_t Alignment) { - // Round EltSize up to the specified alignment. - size_t EltSize = (sizeof(T) + Alignment - 1) & (-Alignment); - return static_cast(Allocate(Num * EltSize, Alignment)); - } + void Deallocate(const void * /*Ptr*/, size_t /*Size*/) {} - void Deallocate(const void * /*Ptr*/) {} + // Pull in base class overloads. + using AllocatorBase::Deallocate; size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); } @@ -209,12 +267,8 @@ public: } void PrintStats() const { - // We call out to an external function to actually print the message as the - // printing code uses Allocator.h in its implementation. - extern void printBumpPtrAllocatorStats( - unsigned NumSlabs, size_t BytesAllocated, size_t TotalMemory); - - printBumpPtrAllocatorStats(Slabs.size(), BytesAllocated, getTotalMemory()); + detail::printBumpPtrAllocatorStats(Slabs.size(), BytesAllocated, + getTotalMemory()); } private: @@ -237,14 +291,8 @@ private: /// Used so that we can compute how much space was wasted. size_t BytesAllocated; - /// \brief The default allocator used if one is not provided. - MallocSlabAllocator DefaultSlabAllocator; - - /// \brief The underlying allocator we use to get slabs of memory. - /// - /// This defaults to MallocSlabAllocator, which wraps malloc, but it could be - /// changed to use a custom allocator. - SlabAllocator &Allocator; + /// \brief The allocator instance we use to get slabs of memory. + AllocatorT Allocator; static size_t computeSlabSize(unsigned SlabIdx) { // Scale the actual allocated slab size based on the number of slabs @@ -259,7 +307,7 @@ private: void StartNewSlab() { size_t AllocatedSlabSize = computeSlabSize(Slabs.size()); - void *NewSlab = Allocator.Allocate(AllocatedSlabSize); + void *NewSlab = Allocator.Allocate(AllocatedSlabSize, 0); Slabs.push_back(NewSlab); CurPtr = (char *)(NewSlab); End = ((char *)NewSlab) + AllocatedSlabSize; @@ -274,8 +322,10 @@ private: #ifndef NDEBUG // Poison the memory so stale pointers crash sooner. Note we must // preserve the Size and NextPtr fields at the beginning. - sys::Memory::setRangeWritable(*I, AllocatedSlabSize); - memset(*I, 0xCD, AllocatedSlabSize); + if (AllocatedSlabSize != 0) { + sys::Memory::setRangeWritable(*I, AllocatedSlabSize); + memset(*I, 0xCD, AllocatedSlabSize); + } #endif Allocator.Deallocate(*I, AllocatedSlabSize); } @@ -313,16 +363,21 @@ template class SpecificBumpPtrAllocator { public: SpecificBumpPtrAllocator() : Allocator() {} - SpecificBumpPtrAllocator(SlabAllocator &allocator) : Allocator(allocator) {} - + SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old) + : Allocator(std::move(Old.Allocator)) {} ~SpecificBumpPtrAllocator() { DestroyAll(); } + SpecificBumpPtrAllocator &operator=(SpecificBumpPtrAllocator &&RHS) { + Allocator = std::move(RHS.Allocator); + return *this; + } + /// Call the destructor of each allocated object and deallocate all but the /// current slab and reset the current pointer to the beginning of it, freeing /// all memory allocated so far. void DestroyAll() { auto DestroyElements = [](char *Begin, char *End) { - assert(Begin == alignPtr(Begin, alignOf())); + assert(Begin == (char*)alignAddr(Begin, alignOf())); for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T)) reinterpret_cast(Ptr)->~T(); }; @@ -331,7 +386,7 @@ public: ++I) { size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize( std::distance(Allocator.Slabs.begin(), I)); - char *Begin = alignPtr((char *)*I, alignOf()); + char *Begin = (char*)alignAddr(*I, alignOf()); char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr : (char *)*I + AllocatedSlabSize; @@ -341,7 +396,7 @@ public: for (auto &PtrAndSize : Allocator.CustomSizedSlabs) { void *Ptr = PtrAndSize.first; size_t Size = PtrAndSize.second; - DestroyElements(alignPtr((char *)Ptr, alignOf()), (char *)Ptr + Size); + DestroyElements((char*)alignAddr(Ptr, alignOf()), (char *)Ptr + Size); } Allocator.Reset(); @@ -349,16 +404,14 @@ public: /// \brief Allocate space for an array of objects without constructing them. T *Allocate(size_t num = 1) { return Allocator.Allocate(num); } - -private: }; } // end namespace llvm -template -void * -operator new(size_t Size, - llvm::BumpPtrAllocatorImpl &Allocator) { +template +void *operator new(size_t Size, + llvm::BumpPtrAllocatorImpl &Allocator) { struct S { char c; union { @@ -372,8 +425,9 @@ operator new(size_t Size, Size, std::min((size_t)llvm::NextPowerOf2(Size), offsetof(S, x))); } -template -void operator delete(void *, - llvm::BumpPtrAllocatorImpl &) {} +template +void operator delete( + void *, llvm::BumpPtrAllocatorImpl &) { +} #endif // LLVM_SUPPORT_ALLOCATOR_H