X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=blobdiff_plain;f=include%2Fllvm%2FSupport%2FAllocator.h;h=ff08622eee548a76c96da4dab6a4725409bd0111;hp=eb6c2d1e25a76b3a776ec97adc82507fb0b6b569;hb=4f240010fdea1fcbe7dac3c8e9ebad082e4036c2;hpb=86bfd34ece523d863282ef2a2e4cf451c387c559 diff --git a/include/llvm/Support/Allocator.h b/include/llvm/Support/Allocator.h index eb6c2d1e25a..ff08622eee5 100644 --- a/include/llvm/Support/Allocator.h +++ b/include/llvm/Support/Allocator.h @@ -6,234 +6,428 @@ // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// -// -// This file defines the MallocAllocator and BumpPtrAllocator interfaces. -// +/// \file +/// +/// This file defines the MallocAllocator and BumpPtrAllocator interfaces. Both +/// of these conform to an LLVM "Allocator" concept which consists of an +/// Allocate method accepting a size and alignment, and a Deallocate accepting +/// a pointer and size. Further, the LLVM "Allocator" concept has overloads of +/// Allocate and Deallocate for setting size and alignment based on the final +/// type. These overloads are typically provided by a base class template \c +/// AllocatorBase. +/// //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_ALLOCATOR_H #define LLVM_SUPPORT_ALLOCATOR_H +#include "llvm/ADT/SmallVector.h" #include "llvm/Support/AlignOf.h" +#include "llvm/Support/DataTypes.h" #include "llvm/Support/MathExtras.h" -#include "llvm/System/DataTypes.h" +#include "llvm/Support/Memory.h" #include #include -#include #include +#include namespace llvm { -class MallocAllocator { +/// \brief CRTP base class providing obvious overloads for the core \c +/// Allocate() methods of LLVM-style allocators. +/// +/// This base class both documents the full public interface exposed by all +/// LLVM-style allocators, and redirects all of the overloads to a single core +/// set of methods which the derived class must define. +template class AllocatorBase { public: - MallocAllocator() {} - ~MallocAllocator() {} + /// \brief Allocate \a Size bytes of \a Alignment aligned memory. This method + /// must be implemented by \c DerivedT. + void *Allocate(size_t Size, size_t Alignment) { +#ifdef __clang__ + static_assert(static_cast( + &AllocatorBase::Allocate) != + static_cast( + &DerivedT::Allocate), + "Class derives from AllocatorBase without implementing the " + "core Allocate(size_t, size_t) overload!"); +#endif + return static_cast(this)->Allocate(Size, Alignment); + } - void Reset() {} + /// \brief Deallocate \a Ptr to \a Size bytes of memory allocated by this + /// allocator. + void Deallocate(const void *Ptr, size_t Size) { +#ifdef __clang__ + static_assert(static_cast( + &AllocatorBase::Deallocate) != + static_cast( + &DerivedT::Deallocate), + "Class derives from AllocatorBase without implementing the " + "core Deallocate(void *) overload!"); +#endif + return static_cast(this)->Deallocate(Ptr, Size); + } - void *Allocate(size_t Size, size_t /*Alignment*/) { return malloc(Size); } + // The rest of these methods are helpers that redirect to one of the above + // core methods. - template - T *Allocate() { return static_cast(malloc(sizeof(T))); } + /// \brief Allocate space for a sequence of objects without constructing them. + template T *Allocate(size_t Num = 1) { + return static_cast(Allocate(Num * sizeof(T), AlignOf::Alignment)); + } + /// \brief Deallocate space for a sequence of objects without constructing them. template - T *Allocate(size_t Num) { - return static_cast(malloc(sizeof(T)*Num)); + typename std::enable_if< + !std::is_same::type, void>::value, void>::type + Deallocate(T *Ptr, size_t Num = 1) { + Deallocate(static_cast(Ptr), Num * sizeof(T)); + } +}; + +class MallocAllocator : public AllocatorBase { +public: + void Reset() {} + + LLVM_ATTRIBUTE_RETURNS_NONNULL void *Allocate(size_t Size, + size_t /*Alignment*/) { + return malloc(Size); + } + + // Pull in base class overloads. + using AllocatorBase::Allocate; + + void Deallocate(const void *Ptr, size_t /*Size*/) { + free(const_cast(Ptr)); } - void Deallocate(const void *Ptr) { free(const_cast(Ptr)); } + // Pull in base class overloads. + using AllocatorBase::Deallocate; void PrintStats() const {} }; -/// MemSlab - This structure lives at the beginning of every slab allocated by -/// the bump allocator. -class MemSlab { +namespace detail { + +// We call out to an external function to actually print the message as the +// printing code uses Allocator.h in its implementation. +void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated, + size_t TotalMemory); +} // End namespace detail. + +/// \brief Allocate memory in an ever growing pool, as if by bump-pointer. +/// +/// This isn't strictly a bump-pointer allocator as it uses backing slabs of +/// memory rather than relying on boundless contiguous heap. However, it has +/// bump-pointer semantics in that is a monotonically growing pool of memory +/// where every allocation is found by merely allocating the next N bytes in +/// the slab, or the next N bytes in the next slab. +/// +/// Note that this also has a threshold for forcing allocations above a certain +/// size into their own slab. +/// +/// The BumpPtrAllocatorImpl template defaults to using a MallocAllocator +/// object, which wraps malloc, to allocate memory, but it can be changed to +/// use a custom allocator. +template +class BumpPtrAllocatorImpl + : public AllocatorBase< + BumpPtrAllocatorImpl> { public: - size_t Size; - MemSlab *NextPtr; -}; + static_assert(SizeThreshold <= SlabSize, + "The SizeThreshold must be at most the SlabSize to ensure " + "that objects larger than a slab go into their own memory " + "allocation."); -/// SlabAllocator - This class can be used to parameterize the underlying -/// allocation strategy for the bump allocator. In particular, this is used -/// by the JIT to allocate contiguous swathes of executable memory. The -/// interface uses MemSlab's instead of void *'s so that the allocator -/// doesn't have to remember the size of the pointer it allocated. -class SlabAllocator { -public: - virtual ~SlabAllocator(); - virtual MemSlab *Allocate(size_t Size) = 0; - virtual void Deallocate(MemSlab *Slab) = 0; -}; + BumpPtrAllocatorImpl() + : CurPtr(nullptr), End(nullptr), BytesAllocated(0), Allocator() {} + template + BumpPtrAllocatorImpl(T &&Allocator) + : CurPtr(nullptr), End(nullptr), BytesAllocated(0), + Allocator(std::forward(Allocator)) {} + + // Manually implement a move constructor as we must clear the old allocators + // slabs as a matter of correctness. + BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old) + : CurPtr(Old.CurPtr), End(Old.End), Slabs(std::move(Old.Slabs)), + CustomSizedSlabs(std::move(Old.CustomSizedSlabs)), + BytesAllocated(Old.BytesAllocated), + Allocator(std::move(Old.Allocator)) { + Old.CurPtr = Old.End = nullptr; + Old.BytesAllocated = 0; + Old.Slabs.clear(); + Old.CustomSizedSlabs.clear(); + } -/// MallocSlabAllocator - The default slab allocator for the bump allocator -/// is an adapter class for MallocAllocator that just forwards the method -/// calls and translates the arguments. -class MallocSlabAllocator : public SlabAllocator { - /// Allocator - The underlying allocator that we forward to. - /// - MallocAllocator Allocator; + ~BumpPtrAllocatorImpl() { + DeallocateSlabs(Slabs.begin(), Slabs.end()); + DeallocateCustomSizedSlabs(); + } -public: - MallocSlabAllocator() : Allocator() { } - virtual ~MallocSlabAllocator(); - virtual MemSlab *Allocate(size_t Size); - virtual void Deallocate(MemSlab *Slab); -}; + BumpPtrAllocatorImpl &operator=(BumpPtrAllocatorImpl &&RHS) { + DeallocateSlabs(Slabs.begin(), Slabs.end()); + DeallocateCustomSizedSlabs(); + + CurPtr = RHS.CurPtr; + End = RHS.End; + BytesAllocated = RHS.BytesAllocated; + Slabs = std::move(RHS.Slabs); + CustomSizedSlabs = std::move(RHS.CustomSizedSlabs); + Allocator = std::move(RHS.Allocator); + + RHS.CurPtr = RHS.End = nullptr; + RHS.BytesAllocated = 0; + RHS.Slabs.clear(); + RHS.CustomSizedSlabs.clear(); + return *this; + } -/// BumpPtrAllocator - This allocator is useful for containers that need -/// very simple memory allocation strategies. In particular, this just keeps -/// allocating memory, and never deletes it until the entire block is dead. This -/// makes allocation speedy, but must only be used when the trade-off is ok. -class BumpPtrAllocator { - BumpPtrAllocator(const BumpPtrAllocator &); // do not implement - void operator=(const BumpPtrAllocator &); // do not implement + /// \brief Deallocate all but the current slab and reset the current pointer + /// to the beginning of it, freeing all memory allocated so far. + void Reset() { + if (Slabs.empty()) + return; + + // Reset the state. + BytesAllocated = 0; + CurPtr = (char *)Slabs.front(); + End = CurPtr + SlabSize; + + // Deallocate all but the first slab, and all custome sized slabs. + DeallocateSlabs(std::next(Slabs.begin()), Slabs.end()); + Slabs.erase(std::next(Slabs.begin()), Slabs.end()); + DeallocateCustomSizedSlabs(); + CustomSizedSlabs.clear(); + } - /// SlabSize - Allocate data into slabs of this size unless we get an - /// allocation above SizeThreshold. - size_t SlabSize; + /// \brief Allocate space at the specified alignment. + LLVM_ATTRIBUTE_RETURNS_NONNULL void *Allocate(size_t Size, size_t Alignment) { + assert(Alignment > 0 && "0-byte alignnment is not allowed. Use 1 instead."); + + // Keep track of how many bytes we've allocated. + BytesAllocated += Size; + + size_t Adjustment = alignmentAdjustment(CurPtr, Alignment); + assert(Adjustment + Size >= Size && "Adjustment + Size must not overflow"); + + // Check if we have enough space. + if (Adjustment + Size <= size_t(End - CurPtr)) { + char *AlignedPtr = CurPtr + Adjustment; + CurPtr = AlignedPtr + Size; + // Update the allocation point of this memory block in MemorySanitizer. + // Without this, MemorySanitizer messages for values originated from here + // will point to the allocation of the entire slab. + __msan_allocated_memory(AlignedPtr, Size); + return AlignedPtr; + } - /// SizeThreshold - For any allocation larger than this threshold, we should - /// allocate a separate slab. - size_t SizeThreshold; + // If Size is really big, allocate a separate slab for it. + size_t PaddedSize = Size + Alignment - 1; + if (PaddedSize > SizeThreshold) { + void *NewSlab = Allocator.Allocate(PaddedSize, 0); + CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize)); + + uintptr_t AlignedAddr = alignAddr(NewSlab, Alignment); + assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize); + char *AlignedPtr = (char*)AlignedAddr; + __msan_allocated_memory(AlignedPtr, Size); + return AlignedPtr; + } - /// Allocator - The underlying allocator we use to get slabs of memory. This - /// defaults to MallocSlabAllocator, which wraps malloc, but it could be - /// changed to use a custom allocator. - SlabAllocator &Allocator; + // Otherwise, start a new slab and try again. + StartNewSlab(); + uintptr_t AlignedAddr = alignAddr(CurPtr, Alignment); + assert(AlignedAddr + Size <= (uintptr_t)End && + "Unable to allocate memory!"); + char *AlignedPtr = (char*)AlignedAddr; + CurPtr = AlignedPtr + Size; + __msan_allocated_memory(AlignedPtr, Size); + return AlignedPtr; + } - /// CurSlab - The slab that we are currently allocating into. - /// - MemSlab *CurSlab; + // Pull in base class overloads. + using AllocatorBase::Allocate; - /// CurPtr - The current pointer into the current slab. This points to the - /// next free byte in the slab. - char *CurPtr; + void Deallocate(const void * /*Ptr*/, size_t /*Size*/) {} - /// End - The end of the current slab. - /// - char *End; + // Pull in base class overloads. + using AllocatorBase::Deallocate; - /// BytesAllocated - This field tracks how many bytes we've allocated, so - /// that we can compute how much space was wasted. - size_t BytesAllocated; + size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); } - /// AlignPtr - Align Ptr to Alignment bytes, rounding up. Alignment should - /// be a power of two. This method rounds up, so AlignPtr(7, 4) == 8 and - /// AlignPtr(8, 4) == 8. - static char *AlignPtr(char *Ptr, size_t Alignment); + size_t getTotalMemory() const { + size_t TotalMemory = 0; + for (auto I = Slabs.begin(), E = Slabs.end(); I != E; ++I) + TotalMemory += computeSlabSize(std::distance(Slabs.begin(), I)); + for (auto &PtrAndSize : CustomSizedSlabs) + TotalMemory += PtrAndSize.second; + return TotalMemory; + } - /// StartNewSlab - Allocate a new slab and move the bump pointers over into - /// the new slab. Modifies CurPtr and End. - void StartNewSlab(); + void PrintStats() const { + detail::printBumpPtrAllocatorStats(Slabs.size(), BytesAllocated, + getTotalMemory()); + } - /// DeallocateSlabs - Deallocate all memory slabs after and including this - /// one. - void DeallocateSlabs(MemSlab *Slab); +private: + /// \brief The current pointer into the current slab. + /// + /// This points to the next free byte in the slab. + char *CurPtr; - static MallocSlabAllocator DefaultSlabAllocator; + /// \brief The end of the current slab. + char *End; - template friend class SpecificBumpPtrAllocator; -public: - BumpPtrAllocator(size_t size = 4096, size_t threshold = 4096, - SlabAllocator &allocator = DefaultSlabAllocator); - ~BumpPtrAllocator(); + /// \brief The slabs allocated so far. + SmallVector Slabs; - /// Reset - Deallocate all but the current slab and reset the current pointer - /// to the beginning of it, freeing all memory allocated so far. - void Reset(); + /// \brief Custom-sized slabs allocated for too-large allocation requests. + SmallVector, 0> CustomSizedSlabs; - /// Allocate - Allocate space at the specified alignment. + /// \brief How many bytes we've allocated. /// - void *Allocate(size_t Size, size_t Alignment); + /// Used so that we can compute how much space was wasted. + size_t BytesAllocated; - /// Allocate space, but do not construct, one object. - /// - template - T *Allocate() { - return static_cast(Allocate(sizeof(T),AlignOf::Alignment)); - } + /// \brief The allocator instance we use to get slabs of memory. + AllocatorT Allocator; - /// Allocate space for an array of objects. This does not construct the - /// objects though. - template - T *Allocate(size_t Num) { - return static_cast(Allocate(Num * sizeof(T), AlignOf::Alignment)); + static size_t computeSlabSize(unsigned SlabIdx) { + // Scale the actual allocated slab size based on the number of slabs + // allocated. Every 128 slabs allocated, we double the allocated size to + // reduce allocation frequency, but saturate at multiplying the slab size by + // 2^30. + return SlabSize * ((size_t)1 << std::min(30, SlabIdx / 128)); } - /// Allocate space for a specific count of elements and with a specified - /// alignment. - template - T *Allocate(size_t Num, size_t Alignment) { - // Round EltSize up to the specified alignment. - size_t EltSize = (sizeof(T)+Alignment-1)&(-Alignment); - return static_cast(Allocate(Num * EltSize, Alignment)); + /// \brief Allocate a new slab and move the bump pointers over into the new + /// slab, modifying CurPtr and End. + void StartNewSlab() { + size_t AllocatedSlabSize = computeSlabSize(Slabs.size()); + + void *NewSlab = Allocator.Allocate(AllocatedSlabSize, 0); + Slabs.push_back(NewSlab); + CurPtr = (char *)(NewSlab); + End = ((char *)NewSlab) + AllocatedSlabSize; } - void Deallocate(const void * /*Ptr*/) {} + /// \brief Deallocate a sequence of slabs. + void DeallocateSlabs(SmallVectorImpl::iterator I, + SmallVectorImpl::iterator E) { + for (; I != E; ++I) { + size_t AllocatedSlabSize = + computeSlabSize(std::distance(Slabs.begin(), I)); +#ifndef NDEBUG + // Poison the memory so stale pointers crash sooner. Note we must + // preserve the Size and NextPtr fields at the beginning. + if (AllocatedSlabSize != 0) { + sys::Memory::setRangeWritable(*I, AllocatedSlabSize); + memset(*I, 0xCD, AllocatedSlabSize); + } +#endif + Allocator.Deallocate(*I, AllocatedSlabSize); + } + } - unsigned GetNumSlabs() const; + /// \brief Deallocate all memory for custom sized slabs. + void DeallocateCustomSizedSlabs() { + for (auto &PtrAndSize : CustomSizedSlabs) { + void *Ptr = PtrAndSize.first; + size_t Size = PtrAndSize.second; +#ifndef NDEBUG + // Poison the memory so stale pointers crash sooner. Note we must + // preserve the Size and NextPtr fields at the beginning. + sys::Memory::setRangeWritable(Ptr, Size); + memset(Ptr, 0xCD, Size); +#endif + Allocator.Deallocate(Ptr, Size); + } + } - void PrintStats() const; + template friend class SpecificBumpPtrAllocator; }; -/// SpecificBumpPtrAllocator - Same as BumpPtrAllocator but allows only -/// elements of one type to be allocated. This allows calling the destructor -/// in DestroyAll() and when the allocator is destroyed. -template -class SpecificBumpPtrAllocator { +/// \brief The standard BumpPtrAllocator which just uses the default template +/// paramaters. +typedef BumpPtrAllocatorImpl<> BumpPtrAllocator; + +/// \brief A BumpPtrAllocator that allows only elements of a specific type to be +/// allocated. +/// +/// This allows calling the destructor in DestroyAll() and when the allocator is +/// destroyed. +template class SpecificBumpPtrAllocator { BumpPtrAllocator Allocator; -public: - SpecificBumpPtrAllocator(size_t size = 4096, size_t threshold = 4096, - SlabAllocator &allocator = BumpPtrAllocator::DefaultSlabAllocator) - : Allocator(size, threshold, allocator) {} - ~SpecificBumpPtrAllocator() { - DestroyAll(); +public: + SpecificBumpPtrAllocator() : Allocator() {} + SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old) + : Allocator(std::move(Old.Allocator)) {} + ~SpecificBumpPtrAllocator() { DestroyAll(); } + + SpecificBumpPtrAllocator &operator=(SpecificBumpPtrAllocator &&RHS) { + Allocator = std::move(RHS.Allocator); + return *this; } /// Call the destructor of each allocated object and deallocate all but the /// current slab and reset the current pointer to the beginning of it, freeing /// all memory allocated so far. void DestroyAll() { - MemSlab *Slab = Allocator.CurSlab; - while (Slab) { - char *End = Slab == Allocator.CurSlab ? Allocator.CurPtr : - (char *)Slab + Slab->Size; - for (char *Ptr = (char*)(Slab+1); Ptr < End; Ptr += sizeof(T)) { - Ptr = Allocator.AlignPtr(Ptr, alignof()); - if (Ptr + sizeof(T) <= End) - reinterpret_cast(Ptr)->~T(); - } - Slab = Slab->NextPtr; + auto DestroyElements = [](char *Begin, char *End) { + assert(Begin == (char*)alignAddr(Begin, alignOf())); + for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T)) + reinterpret_cast(Ptr)->~T(); + }; + + for (auto I = Allocator.Slabs.begin(), E = Allocator.Slabs.end(); I != E; + ++I) { + size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize( + std::distance(Allocator.Slabs.begin(), I)); + char *Begin = (char*)alignAddr(*I, alignOf()); + char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr + : (char *)*I + AllocatedSlabSize; + + DestroyElements(Begin, End); } + + for (auto &PtrAndSize : Allocator.CustomSizedSlabs) { + void *Ptr = PtrAndSize.first; + size_t Size = PtrAndSize.second; + DestroyElements((char*)alignAddr(Ptr, alignOf()), (char *)Ptr + Size); + } + Allocator.Reset(); } - /// Allocate space for a specific count of elements. - T *Allocate(size_t num = 1) { - return Allocator.Allocate(num); - } + /// \brief Allocate space for an array of objects without constructing them. + T *Allocate(size_t num = 1) { return Allocator.Allocate(num); } }; } // end namespace llvm -inline void *operator new(size_t Size, llvm::BumpPtrAllocator &Allocator) { +template +void *operator new(size_t Size, + llvm::BumpPtrAllocatorImpl &Allocator) { struct S { char c; -#ifdef __GNUC__ - char x __attribute__((aligned)); -#else union { double D; long double LD; long long L; void *P; } x; -#endif }; - return Allocator.Allocate(Size, std::min((size_t)llvm::NextPowerOf2(Size), - offsetof(S, x))); + return Allocator.Allocate( + Size, std::min((size_t)llvm::NextPowerOf2(Size), offsetof(S, x))); +} + +template +void operator delete( + void *, llvm::BumpPtrAllocatorImpl &) { } #endif // LLVM_SUPPORT_ALLOCATOR_H