X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTransforms%2FScalar%2FSROA.cpp;h=ff08401377024a7d8ce0000ad67f613ab45cec7e;hb=16d857e06ed633a18d3cc6fe0caa7e9421a5dc9e;hp=adf7e7af83ed674d841fccdfee4d6f559eca9ee2;hpb=2f87640b86315beab8a5671cc23f524e59c58bd3;p=oota-llvm.git diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp index adf7e7af83e..d37d1e753eb 100644 --- a/lib/Transforms/Scalar/SROA.cpp +++ b/lib/Transforms/Scalar/SROA.cpp @@ -23,353 +23,561 @@ /// //===----------------------------------------------------------------------===// -#define DEBUG_TYPE "sroa" #include "llvm/Transforms/Scalar.h" -#include "llvm/Constants.h" -#include "llvm/DIBuilder.h" -#include "llvm/DebugInfo.h" -#include "llvm/DerivedTypes.h" -#include "llvm/Function.h" -#include "llvm/IRBuilder.h" -#include "llvm/Instructions.h" -#include "llvm/IntrinsicInst.h" -#include "llvm/LLVMContext.h" -#include "llvm/Module.h" -#include "llvm/Operator.h" -#include "llvm/Pass.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" -#include "llvm/ADT/STLExtras.h" -#include "llvm/Analysis/Dominators.h" +#include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/Loads.h" +#include "llvm/Analysis/PtrUseVisitor.h" #include "llvm/Analysis/ValueTracking.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DIBuilder.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/DebugInfo.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/InstVisitor.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Operator.h" +#include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" +#include "llvm/Support/Compiler.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/GetElementPtrTypeIterator.h" -#include "llvm/Support/InstVisitor.h" #include "llvm/Support/MathExtras.h" +#include "llvm/Support/TimeValue.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/DataLayout.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/PromoteMemToReg.h" -#include "llvm/Transforms/Utils/SSAUpdater.h" + +#if __cplusplus >= 201103L && !defined(NDEBUG) +// We only use this for a debug check in C++11 +#include +#endif + using namespace llvm; +#define DEBUG_TYPE "sroa" + STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement"); -STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced"); -STATISTIC(NumPromoted, "Number of allocas promoted to SSA values"); +STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed"); +STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions per alloca"); +STATISTIC(NumAllocaPartitionUses, "Number of alloca partition uses rewritten"); +STATISTIC(MaxUsesPerAllocaPartition, "Maximum number of uses of a partition"); +STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced"); +STATISTIC(NumPromoted, "Number of allocas promoted to SSA values"); STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion"); -STATISTIC(NumDeleted, "Number of instructions deleted"); -STATISTIC(NumVectorized, "Number of vectorized aggregates"); +STATISTIC(NumDeleted, "Number of instructions deleted"); +STATISTIC(NumVectorized, "Number of vectorized aggregates"); + +/// Hidden option to enable randomly shuffling the slices to help uncover +/// instability in their order. +static cl::opt SROARandomShuffleSlices("sroa-random-shuffle-slices", + cl::init(false), cl::Hidden); -/// Hidden option to force the pass to not use DomTree and mem2reg, instead -/// forming SSA values through the SSAUpdater infrastructure. -static cl::opt -ForceSSAUpdater("force-ssa-updater", cl::init(false), cl::Hidden); +/// Hidden option to experiment with completely strict handling of inbounds +/// GEPs. +static cl::opt SROAStrictInbounds("sroa-strict-inbounds", cl::init(false), + cl::Hidden); namespace { -/// \brief Alloca partitioning representation. -/// -/// This class represents a partitioning of an alloca into slices, and -/// information about the nature of uses of each slice of the alloca. The goal -/// is that this information is sufficient to decide if and how to split the -/// alloca apart and replace slices with scalars. It is also intended that this -/// structure can capture the relevant information needed both to decide about -/// and to enact these transformations. -class AllocaPartitioning { +/// \brief A custom IRBuilder inserter which prefixes all names if they are +/// preserved. +template +class IRBuilderPrefixedInserter + : public IRBuilderDefaultInserter { + std::string Prefix; + public: - /// \brief A common base class for representing a half-open byte range. - struct ByteRange { - /// \brief The beginning offset of the range. - uint64_t BeginOffset; + void SetNamePrefix(const Twine &P) { Prefix = P.str(); } - /// \brief The ending offset, not included in the range. - uint64_t EndOffset; +protected: + void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB, + BasicBlock::iterator InsertPt) const { + IRBuilderDefaultInserter::InsertHelper( + I, Name.isTriviallyEmpty() ? Name : Prefix + Name, BB, InsertPt); + } +}; - ByteRange() : BeginOffset(), EndOffset() {} - ByteRange(uint64_t BeginOffset, uint64_t EndOffset) - : BeginOffset(BeginOffset), EndOffset(EndOffset) {} +// Specialization for not preserving the name is trivial. +template <> +class IRBuilderPrefixedInserter + : public IRBuilderDefaultInserter { +public: + void SetNamePrefix(const Twine &P) {} +}; - /// \brief Support for ordering ranges. - /// - /// This provides an ordering over ranges such that start offsets are - /// always increasing, and within equal start offsets, the end offsets are - /// decreasing. Thus the spanning range comes first in a cluster with the - /// same start position. - bool operator<(const ByteRange &RHS) const { - if (BeginOffset < RHS.BeginOffset) return true; - if (BeginOffset > RHS.BeginOffset) return false; - if (EndOffset > RHS.EndOffset) return true; - return false; - } +/// \brief Provide a typedef for IRBuilder that drops names in release builds. +#ifndef NDEBUG +typedef llvm::IRBuilder> + IRBuilderTy; +#else +typedef llvm::IRBuilder> + IRBuilderTy; +#endif +} - /// \brief Support comparison with a single offset to allow binary searches. - friend bool operator<(const ByteRange &LHS, uint64_t RHSOffset) { - return LHS.BeginOffset < RHSOffset; - } +namespace { +/// \brief A used slice of an alloca. +/// +/// This structure represents a slice of an alloca used by some instruction. It +/// stores both the begin and end offsets of this use, a pointer to the use +/// itself, and a flag indicating whether we can classify the use as splittable +/// or not when forming partitions of the alloca. +class Slice { + /// \brief The beginning offset of the range. + uint64_t BeginOffset; - friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset, - const ByteRange &RHS) { - return LHSOffset < RHS.BeginOffset; - } + /// \brief The ending offset, not included in the range. + uint64_t EndOffset; - bool operator==(const ByteRange &RHS) const { - return BeginOffset == RHS.BeginOffset && EndOffset == RHS.EndOffset; - } - bool operator!=(const ByteRange &RHS) const { return !operator==(RHS); } - }; + /// \brief Storage for both the use of this slice and whether it can be + /// split. + PointerIntPair UseAndIsSplittable; - /// \brief A partition of an alloca. - /// - /// This structure represents a contiguous partition of the alloca. These are - /// formed by examining the uses of the alloca. During formation, they may - /// overlap but once an AllocaPartitioning is built, the Partitions within it - /// are all disjoint. - struct Partition : public ByteRange { - /// \brief Whether this partition is splittable into smaller partitions. - /// - /// We flag partitions as splittable when they are formed entirely due to - /// accesses by trivially splittable operations such as memset and memcpy. - bool IsSplittable; - - /// \brief Test whether a partition has been marked as dead. - bool isDead() const { - if (BeginOffset == UINT64_MAX) { - assert(EndOffset == UINT64_MAX); - return true; - } - return false; - } +public: + Slice() : BeginOffset(), EndOffset() {} + Slice(uint64_t BeginOffset, uint64_t EndOffset, Use *U, bool IsSplittable) + : BeginOffset(BeginOffset), EndOffset(EndOffset), + UseAndIsSplittable(U, IsSplittable) {} - /// \brief Kill a partition. - /// This is accomplished by setting both its beginning and end offset to - /// the maximum possible value. - void kill() { - assert(!isDead() && "He's Dead, Jim!"); - BeginOffset = EndOffset = UINT64_MAX; - } + uint64_t beginOffset() const { return BeginOffset; } + uint64_t endOffset() const { return EndOffset; } - Partition() : ByteRange(), IsSplittable() {} - Partition(uint64_t BeginOffset, uint64_t EndOffset, bool IsSplittable) - : ByteRange(BeginOffset, EndOffset), IsSplittable(IsSplittable) {} - }; + bool isSplittable() const { return UseAndIsSplittable.getInt(); } + void makeUnsplittable() { UseAndIsSplittable.setInt(false); } - /// \brief A particular use of a partition of the alloca. - /// - /// This structure is used to associate uses of a partition with it. They - /// mark the range of bytes which are referenced by a particular instruction, - /// and includes a handle to the user itself and the pointer value in use. - /// The bounds of these uses are determined by intersecting the bounds of the - /// memory use itself with a particular partition. As a consequence there is - /// intentionally overlap between various uses of the same partition. - struct PartitionUse : public ByteRange { - /// \brief The use in question. Provides access to both user and used value. - /// - /// Note that this may be null if the partition use is *dead*, that is, it - /// should be ignored. - Use *U; + Use *getUse() const { return UseAndIsSplittable.getPointer(); } - PartitionUse() : ByteRange(), U() {} - PartitionUse(uint64_t BeginOffset, uint64_t EndOffset, Use *U) - : ByteRange(BeginOffset, EndOffset), U(U) {} - }; + bool isDead() const { return getUse() == nullptr; } + void kill() { UseAndIsSplittable.setPointer(nullptr); } - /// \brief Construct a partitioning of a particular alloca. + /// \brief Support for ordering ranges. /// - /// Construction does most of the work for partitioning the alloca. This - /// performs the necessary walks of users and builds a partitioning from it. - AllocaPartitioning(const DataLayout &TD, AllocaInst &AI); + /// This provides an ordering over ranges such that start offsets are + /// always increasing, and within equal start offsets, the end offsets are + /// decreasing. Thus the spanning range comes first in a cluster with the + /// same start position. + bool operator<(const Slice &RHS) const { + if (beginOffset() < RHS.beginOffset()) + return true; + if (beginOffset() > RHS.beginOffset()) + return false; + if (isSplittable() != RHS.isSplittable()) + return !isSplittable(); + if (endOffset() > RHS.endOffset()) + return true; + return false; + } + + /// \brief Support comparison with a single offset to allow binary searches. + friend LLVM_ATTRIBUTE_UNUSED bool operator<(const Slice &LHS, + uint64_t RHSOffset) { + return LHS.beginOffset() < RHSOffset; + } + friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset, + const Slice &RHS) { + return LHSOffset < RHS.beginOffset(); + } + + bool operator==(const Slice &RHS) const { + return isSplittable() == RHS.isSplittable() && + beginOffset() == RHS.beginOffset() && endOffset() == RHS.endOffset(); + } + bool operator!=(const Slice &RHS) const { return !operator==(RHS); } +}; +} // end anonymous namespace + +namespace llvm { +template struct isPodLike; +template <> struct isPodLike { static const bool value = true; }; +} + +namespace { +/// \brief Representation of the alloca slices. +/// +/// This class represents the slices of an alloca which are formed by its +/// various uses. If a pointer escapes, we can't fully build a representation +/// for the slices used and we reflect that in this structure. The uses are +/// stored, sorted by increasing beginning offset and with unsplittable slices +/// starting at a particular offset before splittable slices. +class AllocaSlices { +public: + /// \brief Construct the slices of a particular alloca. + AllocaSlices(const DataLayout &DL, AllocaInst &AI); /// \brief Test whether a pointer to the allocation escapes our analysis. /// - /// If this is true, the partitioning is never fully built and should be + /// If this is true, the slices are never fully built and should be /// ignored. bool isEscaped() const { return PointerEscapingInstr; } - /// \brief Support for iterating over the partitions. + /// \brief Support for iterating over the slices. /// @{ - typedef SmallVectorImpl::iterator iterator; - iterator begin() { return Partitions.begin(); } - iterator end() { return Partitions.end(); } - - typedef SmallVectorImpl::const_iterator const_iterator; - const_iterator begin() const { return Partitions.begin(); } - const_iterator end() const { return Partitions.end(); } + typedef SmallVectorImpl::iterator iterator; + typedef iterator_range range; + iterator begin() { return Slices.begin(); } + iterator end() { return Slices.end(); } + + typedef SmallVectorImpl::const_iterator const_iterator; + typedef iterator_range const_range; + const_iterator begin() const { return Slices.begin(); } + const_iterator end() const { return Slices.end(); } /// @} - /// \brief Support for iterating over and manipulating a particular - /// partition's uses. - /// - /// The iteration support provided for uses is more limited, but also - /// includes some manipulation routines to support rewriting the uses of - /// partitions during SROA. - /// @{ - typedef SmallVectorImpl::iterator use_iterator; - use_iterator use_begin(unsigned Idx) { return Uses[Idx].begin(); } - use_iterator use_begin(const_iterator I) { return Uses[I - begin()].begin(); } - use_iterator use_end(unsigned Idx) { return Uses[Idx].end(); } - use_iterator use_end(const_iterator I) { return Uses[I - begin()].end(); } + /// \brief Erase a range of slices. + void erase(iterator Start, iterator Stop) { Slices.erase(Start, Stop); } - typedef SmallVectorImpl::const_iterator const_use_iterator; - const_use_iterator use_begin(unsigned Idx) const { return Uses[Idx].begin(); } - const_use_iterator use_begin(const_iterator I) const { - return Uses[I - begin()].begin(); - } - const_use_iterator use_end(unsigned Idx) const { return Uses[Idx].end(); } - const_use_iterator use_end(const_iterator I) const { - return Uses[I - begin()].end(); + /// \brief Insert new slices for this alloca. + /// + /// This moves the slices into the alloca's slices collection, and re-sorts + /// everything so that the usual ordering properties of the alloca's slices + /// hold. + void insert(ArrayRef NewSlices) { + int OldSize = Slices.size(); + Slices.append(NewSlices.begin(), NewSlices.end()); + auto SliceI = Slices.begin() + OldSize; + std::sort(SliceI, Slices.end()); + std::inplace_merge(Slices.begin(), SliceI, Slices.end()); } - unsigned use_size(unsigned Idx) const { return Uses[Idx].size(); } - unsigned use_size(const_iterator I) const { return Uses[I - begin()].size(); } - const PartitionUse &getUse(unsigned PIdx, unsigned UIdx) const { - return Uses[PIdx][UIdx]; - } - const PartitionUse &getUse(const_iterator I, unsigned UIdx) const { - return Uses[I - begin()][UIdx]; - } + // Forward declare an iterator to befriend it. + class partition_iterator; - void use_push_back(unsigned Idx, const PartitionUse &PU) { - Uses[Idx].push_back(PU); - } - void use_push_back(const_iterator I, const PartitionUse &PU) { - Uses[I - begin()].push_back(PU); - } - /// @} - - /// \brief Allow iterating the dead users for this alloca. + /// \brief A partition of the slices. /// - /// These are instructions which will never actually use the alloca as they - /// are outside the allocated range. They are safe to replace with undef and - /// delete. - /// @{ - typedef SmallVectorImpl::const_iterator dead_user_iterator; - dead_user_iterator dead_user_begin() const { return DeadUsers.begin(); } - dead_user_iterator dead_user_end() const { return DeadUsers.end(); } - /// @} - - /// \brief Allow iterating the dead expressions referring to this alloca. + /// An ephemeral representation for a range of slices which can be viewed as + /// a partition of the alloca. This range represents a span of the alloca's + /// memory which cannot be split, and provides access to all of the slices + /// overlapping some part of the partition. /// - /// These are operands which have cannot actually be used to refer to the - /// alloca as they are outside its range and the user doesn't correct for - /// that. These mostly consist of PHI node inputs and the like which we just - /// need to replace with undef. - /// @{ - typedef SmallVectorImpl::const_iterator dead_op_iterator; - dead_op_iterator dead_op_begin() const { return DeadOperands.begin(); } - dead_op_iterator dead_op_end() const { return DeadOperands.end(); } - /// @} + /// Objects of this type are produced by traversing the alloca's slices, but + /// are only ephemeral and not persistent. + class Partition { + private: + friend class AllocaSlices; + friend class AllocaSlices::partition_iterator; + + /// \brief The beginning and ending offsets of the alloca for this + /// partition. + uint64_t BeginOffset, EndOffset; + + /// \brief The start end end iterators of this partition. + iterator SI, SJ; - /// \brief MemTransferInst auxiliary data. - /// This struct provides some auxiliary data about memory transfer - /// intrinsics such as memcpy and memmove. These intrinsics can use two - /// different ranges within the same alloca, and provide other challenges to - /// correctly represent. We stash extra data to help us untangle this - /// after the partitioning is complete. - struct MemTransferOffsets { - /// The destination begin and end offsets when the destination is within - /// this alloca. If the end offset is zero the destination is not within - /// this alloca. - uint64_t DestBegin, DestEnd; - - /// The source begin and end offsets when the source is within this alloca. - /// If the end offset is zero, the source is not within this alloca. - uint64_t SourceBegin, SourceEnd; - - /// Flag for whether an alloca is splittable. - bool IsSplittable; + /// \brief A collection of split slice tails overlapping the partition. + SmallVector SplitTails; + + /// \brief Raw constructor builds an empty partition starting and ending at + /// the given iterator. + Partition(iterator SI) : SI(SI), SJ(SI) {} + + public: + /// \brief The start offset of this partition. + /// + /// All of the contained slices start at or after this offset. + uint64_t beginOffset() const { return BeginOffset; } + + /// \brief The end offset of this partition. + /// + /// All of the contained slices end at or before this offset. + uint64_t endOffset() const { return EndOffset; } + + /// \brief The size of the partition. + /// + /// Note that this can never be zero. + uint64_t size() const { + assert(BeginOffset < EndOffset && "Partitions must span some bytes!"); + return EndOffset - BeginOffset; + } + + /// \brief Test whether this partition contains no slices, and merely spans + /// a region occupied by split slices. + bool empty() const { return SI == SJ; } + + /// \name Iterate slices that start within the partition. + /// These may be splittable or unsplittable. They have a begin offset >= the + /// partition begin offset. + /// @{ + // FIXME: We should probably define a "concat_iterator" helper and use that + // to stitch together pointee_iterators over the split tails and the + // contiguous iterators of the partition. That would give a much nicer + // interface here. We could then additionally expose filtered iterators for + // split, unsplit, and unsplittable splices based on the usage patterns. + iterator begin() const { return SI; } + iterator end() const { return SJ; } + /// @} + + /// \brief Get the sequence of split slice tails. + /// + /// These tails are of slices which start before this partition but are + /// split and overlap into the partition. We accumulate these while forming + /// partitions. + ArrayRef splitSliceTails() const { return SplitTails; } }; - MemTransferOffsets getMemTransferOffsets(MemTransferInst &II) const { - return MemTransferInstData.lookup(&II); - } - /// \brief Map from a PHI or select operand back to a partition. + /// \brief An iterator over partitions of the alloca's slices. + /// + /// This iterator implements the core algorithm for partitioning the alloca's + /// slices. It is a forward iterator as we don't support backtracking for + /// efficiency reasons, and re-use a single storage area to maintain the + /// current set of split slices. /// - /// When manipulating PHI nodes or selects, they can use more than one - /// partition of an alloca. We store a special mapping to allow finding the - /// partition referenced by each of these operands, if any. - iterator findPartitionForPHIOrSelectOperand(Use *U) { - SmallDenseMap >::const_iterator MapIt - = PHIOrSelectOpMap.find(U); - if (MapIt == PHIOrSelectOpMap.end()) - return end(); + /// It is templated on the slice iterator type to use so that it can operate + /// with either const or non-const slice iterators. + class partition_iterator + : public iterator_facade_base { + friend class AllocaSlices; + + /// \brief Most of the state for walking the partitions is held in a class + /// with a nice interface for examining them. + Partition P; + + /// \brief We need to keep the end of the slices to know when to stop. + AllocaSlices::iterator SE; + + /// \brief We also need to keep track of the maximum split end offset seen. + /// FIXME: Do we really? + uint64_t MaxSplitSliceEndOffset; + + /// \brief Sets the partition to be empty at given iterator, and sets the + /// end iterator. + partition_iterator(AllocaSlices::iterator SI, AllocaSlices::iterator SE) + : P(SI), SE(SE), MaxSplitSliceEndOffset(0) { + // If not already at the end, advance our state to form the initial + // partition. + if (SI != SE) + advance(); + } - return begin() + MapIt->second.first; - } + /// \brief Advance the iterator to the next partition. + /// + /// Requires that the iterator not be at the end of the slices. + void advance() { + assert((P.SI != SE || !P.SplitTails.empty()) && + "Cannot advance past the end of the slices!"); + + // Clear out any split uses which have ended. + if (!P.SplitTails.empty()) { + if (P.EndOffset >= MaxSplitSliceEndOffset) { + // If we've finished all splits, this is easy. + P.SplitTails.clear(); + MaxSplitSliceEndOffset = 0; + } else { + // Remove the uses which have ended in the prior partition. This + // cannot change the max split slice end because we just checked that + // the prior partition ended prior to that max. + P.SplitTails.erase( + std::remove_if( + P.SplitTails.begin(), P.SplitTails.end(), + [&](Slice *S) { return S->endOffset() <= P.EndOffset; }), + P.SplitTails.end()); + assert(std::any_of(P.SplitTails.begin(), P.SplitTails.end(), + [&](Slice *S) { + return S->endOffset() == MaxSplitSliceEndOffset; + }) && + "Could not find the current max split slice offset!"); + assert(std::all_of(P.SplitTails.begin(), P.SplitTails.end(), + [&](Slice *S) { + return S->endOffset() <= MaxSplitSliceEndOffset; + }) && + "Max split slice end offset is not actually the max!"); + } + } + + // If P.SI is already at the end, then we've cleared the split tail and + // now have an end iterator. + if (P.SI == SE) { + assert(P.SplitTails.empty() && "Failed to clear the split slices!"); + return; + } + + // If we had a non-empty partition previously, set up the state for + // subsequent partitions. + if (P.SI != P.SJ) { + // Accumulate all the splittable slices which started in the old + // partition into the split list. + for (Slice &S : P) + if (S.isSplittable() && S.endOffset() > P.EndOffset) { + P.SplitTails.push_back(&S); + MaxSplitSliceEndOffset = + std::max(S.endOffset(), MaxSplitSliceEndOffset); + } + + // Start from the end of the previous partition. + P.SI = P.SJ; + + // If P.SI is now at the end, we at most have a tail of split slices. + if (P.SI == SE) { + P.BeginOffset = P.EndOffset; + P.EndOffset = MaxSplitSliceEndOffset; + return; + } + + // If the we have split slices and the next slice is after a gap and is + // not splittable immediately form an empty partition for the split + // slices up until the next slice begins. + if (!P.SplitTails.empty() && P.SI->beginOffset() != P.EndOffset && + !P.SI->isSplittable()) { + P.BeginOffset = P.EndOffset; + P.EndOffset = P.SI->beginOffset(); + return; + } + } - /// \brief Map from a PHI or select operand back to the specific use of - /// a partition. + // OK, we need to consume new slices. Set the end offset based on the + // current slice, and step SJ past it. The beginning offset of the + // partition is the beginning offset of the next slice unless we have + // pre-existing split slices that are continuing, in which case we begin + // at the prior end offset. + P.BeginOffset = P.SplitTails.empty() ? P.SI->beginOffset() : P.EndOffset; + P.EndOffset = P.SI->endOffset(); + ++P.SJ; + + // There are two strategies to form a partition based on whether the + // partition starts with an unsplittable slice or a splittable slice. + if (!P.SI->isSplittable()) { + // When we're forming an unsplittable region, it must always start at + // the first slice and will extend through its end. + assert(P.BeginOffset == P.SI->beginOffset()); + + // Form a partition including all of the overlapping slices with this + // unsplittable slice. + while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) { + if (!P.SJ->isSplittable()) + P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset()); + ++P.SJ; + } + + // We have a partition across a set of overlapping unsplittable + // partitions. + return; + } + + // If we're starting with a splittable slice, then we need to form + // a synthetic partition spanning it and any other overlapping splittable + // splices. + assert(P.SI->isSplittable() && "Forming a splittable partition!"); + + // Collect all of the overlapping splittable slices. + while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset && + P.SJ->isSplittable()) { + P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset()); + ++P.SJ; + } + + // Back upiP.EndOffset if we ended the span early when encountering an + // unsplittable slice. This synthesizes the early end offset of + // a partition spanning only splittable slices. + if (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) { + assert(!P.SJ->isSplittable()); + P.EndOffset = P.SJ->beginOffset(); + } + } + + public: + bool operator==(const partition_iterator &RHS) const { + assert(SE == RHS.SE && + "End iterators don't match between compared partition iterators!"); + + // The observed positions of partitions is marked by the P.SI iterator and + // the emptiness of the split slices. The latter is only relevant when + // P.SI == SE, as the end iterator will additionally have an empty split + // slices list, but the prior may have the same P.SI and a tail of split + // slices. + if (P.SI == RHS.P.SI && + P.SplitTails.empty() == RHS.P.SplitTails.empty()) { + assert(P.SJ == RHS.P.SJ && + "Same set of slices formed two different sized partitions!"); + assert(P.SplitTails.size() == RHS.P.SplitTails.size() && + "Same slice position with differently sized non-empty split " + "slice tails!"); + return true; + } + return false; + } + + partition_iterator &operator++() { + advance(); + return *this; + } + + Partition &operator*() { return P; } + }; + + /// \brief A forward range over the partitions of the alloca's slices. /// - /// Similar to mapping these operands back to the partitions, this maps - /// directly to the use structure of that partition. - use_iterator findPartitionUseForPHIOrSelectOperand(Use *U) { - SmallDenseMap >::const_iterator MapIt - = PHIOrSelectOpMap.find(U); - assert(MapIt != PHIOrSelectOpMap.end()); - return Uses[MapIt->second.first].begin() + MapIt->second.second; + /// This accesses an iterator range over the partitions of the alloca's + /// slices. It computes these partitions on the fly based on the overlapping + /// offsets of the slices and the ability to split them. It will visit "empty" + /// partitions to cover regions of the alloca only accessed via split + /// slices. + iterator_range partitions() { + return make_range(partition_iterator(begin(), end()), + partition_iterator(end(), end())); } - /// \brief Compute a common type among the uses of a particular partition. + /// \brief Access the dead users for this alloca. + ArrayRef getDeadUsers() const { return DeadUsers; } + + /// \brief Access the dead operands referring to this alloca. /// - /// This routines walks all of the uses of a particular partition and tries - /// to find a common type between them. Untyped operations such as memset and - /// memcpy are ignored. - Type *getCommonType(iterator I) const; + /// These are operands which have cannot actually be used to refer to the + /// alloca as they are outside its range and the user doesn't correct for + /// that. These mostly consist of PHI node inputs and the like which we just + /// need to replace with undef. + ArrayRef getDeadOperands() const { return DeadOperands; } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void print(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const; - void printUsers(raw_ostream &OS, const_iterator I, + void printSlice(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const; + void printUse(raw_ostream &OS, const_iterator I, + StringRef Indent = " ") const; void print(raw_ostream &OS) const; - void LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED dump(const_iterator I) const; - void LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED dump() const; + void dump(const_iterator I) const; + void dump() const; #endif private: template class BuilderBase; - class PartitionBuilder; - friend class AllocaPartitioning::PartitionBuilder; - class UseBuilder; - friend class AllocaPartitioning::UseBuilder; + class SliceBuilder; + friend class AllocaSlices::SliceBuilder; -#ifndef NDEBUG +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) /// \brief Handle to alloca instruction to simplify method interfaces. AllocaInst &AI; #endif - /// \brief The instruction responsible for this alloca having no partitioning. + /// \brief The instruction responsible for this alloca not having a known set + /// of slices. /// /// When an instruction (potentially) escapes the pointer to the alloca, we - /// store a pointer to that here and abort trying to partition the alloca. - /// This will be null if the alloca is partitioned successfully. + /// store a pointer to that here and abort trying to form slices of the + /// alloca. This will be null if the alloca slices are analyzed successfully. Instruction *PointerEscapingInstr; - /// \brief The partitions of the alloca. - /// - /// We store a vector of the partitions over the alloca here. This vector is - /// sorted by increasing begin offset, and then by decreasing end offset. See - /// the Partition inner class for more details. Initially (during - /// construction) there are overlaps, but we form a disjoint sequence of - /// partitions while finishing construction and a fully constructed object is - /// expected to always have this as a disjoint space. - SmallVector Partitions; - - /// \brief The uses of the partitions. + /// \brief The slices of the alloca. /// - /// This is essentially a mapping from each partition to a list of uses of - /// that partition. The mapping is done with a Uses vector that has the exact - /// same number of entries as the partition vector. Each entry is itself - /// a vector of the uses. - SmallVector, 8> Uses; + /// We store a vector of the slices formed by uses of the alloca here. This + /// vector is sorted by increasing begin offset, and then the unsplittable + /// slices before the splittable ones. See the Slice inner class for more + /// details. + SmallVector Slices; /// \brief Instructions which will become dead if we rewrite the alloca. /// - /// Note that these are not separated by partition. This is because we expect - /// a partitioned alloca to be completely rewritten or not rewritten at all. - /// If rewritten, all these instructions can simply be removed and replaced - /// with undef as they come from outside of the allocated space. + /// Note that these are not separated by slice. This is because we expect an + /// alloca to be completely rewritten or not rewritten at all. If rewritten, + /// all these instructions can simply be removed and replaced with undef as + /// they come from outside of the allocated space. SmallVector DeadUsers; /// \brief Operands which will become dead if we rewrite the alloca. @@ -381,379 +589,330 @@ private: /// want to swap this particular input for undef to simplify the use lists of /// the alloca. SmallVector DeadOperands; - - /// \brief The underlying storage for auxiliary memcpy and memset info. - SmallDenseMap MemTransferInstData; - - /// \brief A side datastructure used when building up the partitions and uses. - /// - /// This mapping is only really used during the initial building of the - /// partitioning so that we can retain information about PHI and select nodes - /// processed. - SmallDenseMap > PHIOrSelectSizes; - - /// \brief Auxiliary information for particular PHI or select operands. - SmallDenseMap, 4> PHIOrSelectOpMap; - - /// \brief A utility routine called from the constructor. - /// - /// This does what it says on the tin. It is the key of the alloca partition - /// splitting and merging. After it is called we have the desired disjoint - /// collection of partitions. - void splitAndMergePartitions(); }; } -template -class AllocaPartitioning::BuilderBase - : public InstVisitor { -public: - BuilderBase(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P) - : TD(TD), - AllocSize(TD.getTypeAllocSize(AI.getAllocatedType())), - P(P) { - enqueueUsers(AI, 0); - } - -protected: - const DataLayout &TD; - const uint64_t AllocSize; - AllocaPartitioning &P; - - SmallPtrSet VisitedUses; +static Value *foldSelectInst(SelectInst &SI) { + // If the condition being selected on is a constant or the same value is + // being selected between, fold the select. Yes this does (rarely) happen + // early on. + if (ConstantInt *CI = dyn_cast(SI.getCondition())) + return SI.getOperand(1 + CI->isZero()); + if (SI.getOperand(1) == SI.getOperand(2)) + return SI.getOperand(1); - struct OffsetUse { - Use *U; - int64_t Offset; - }; - SmallVector Queue; + return nullptr; +} - // The active offset and use while visiting. - Use *U; - int64_t Offset; - - void enqueueUsers(Instruction &I, int64_t UserOffset) { - for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); - UI != UE; ++UI) { - if (VisitedUses.insert(&UI.getUse())) { - OffsetUse OU = { &UI.getUse(), UserOffset }; - Queue.push_back(OU); - } - } +/// \brief A helper that folds a PHI node or a select. +static Value *foldPHINodeOrSelectInst(Instruction &I) { + if (PHINode *PN = dyn_cast(&I)) { + // If PN merges together the same value, return that value. + return PN->hasConstantValue(); } + return foldSelectInst(cast(I)); +} - bool computeConstantGEPOffset(GetElementPtrInst &GEPI, int64_t &GEPOffset) { - GEPOffset = Offset; - unsigned int AS = GEPI.getPointerAddressSpace(); - for (gep_type_iterator GTI = gep_type_begin(GEPI), GTE = gep_type_end(GEPI); - GTI != GTE; ++GTI) { - ConstantInt *OpC = dyn_cast(GTI.getOperand()); - if (!OpC) - return false; - if (OpC->isZero()) - continue; - - // Handle a struct index, which adds its field offset to the pointer. - if (StructType *STy = dyn_cast(*GTI)) { - unsigned ElementIdx = OpC->getZExtValue(); - const StructLayout *SL = TD.getStructLayout(STy); - uint64_t ElementOffset = SL->getElementOffset(ElementIdx); - // Check that we can continue to model this GEP in a signed 64-bit offset. - if (ElementOffset > INT64_MAX || - (GEPOffset >= 0 && - ((uint64_t)GEPOffset + ElementOffset) > INT64_MAX)) { - DEBUG(dbgs() << "WARNING: Encountered a cumulative offset exceeding " - << "what can be represented in an int64_t!\n" - << " alloca: " << P.AI << "\n"); - return false; - } - if (GEPOffset < 0) - GEPOffset = ElementOffset + (uint64_t)-GEPOffset; - else - GEPOffset += ElementOffset; - continue; - } - - APInt Index = OpC->getValue().sextOrTrunc(TD.getPointerSizeInBits(AS)); - Index *= APInt(Index.getBitWidth(), - TD.getTypeAllocSize(GTI.getIndexedType())); - Index += APInt(Index.getBitWidth(), (uint64_t)GEPOffset, - /*isSigned*/true); - // Check if the result can be stored in our int64_t offset. - if (!Index.isSignedIntN(sizeof(GEPOffset) * 8)) { - DEBUG(dbgs() << "WARNING: Encountered a cumulative offset exceeding " - << "what can be represented in an int64_t!\n" - << " alloca: " << P.AI << "\n"); - return false; - } - - GEPOffset = Index.getSExtValue(); - } - return true; - } +/// \brief Builder for the alloca slices. +/// +/// This class builds a set of alloca slices by recursively visiting the uses +/// of an alloca and making a slice for each load and store at each offset. +class AllocaSlices::SliceBuilder : public PtrUseVisitor { + friend class PtrUseVisitor; + friend class InstVisitor; + typedef PtrUseVisitor Base; - Value *foldSelectInst(SelectInst &SI) { - // If the condition being selected on is a constant or the same value is - // being selected between, fold the select. Yes this does (rarely) happen - // early on. - if (ConstantInt *CI = dyn_cast(SI.getCondition())) - return SI.getOperand(1+CI->isZero()); - if (SI.getOperand(1) == SI.getOperand(2)) { - assert(*U == SI.getOperand(1)); - return SI.getOperand(1); - } - return 0; - } -}; + const uint64_t AllocSize; + AllocaSlices &AS; -/// \brief Builder for the alloca partitioning. -/// -/// This class builds an alloca partitioning by recursively visiting the uses -/// of an alloca and splitting the partitions for each load and store at each -/// offset. -class AllocaPartitioning::PartitionBuilder - : public BuilderBase { - friend class InstVisitor; + SmallDenseMap MemTransferSliceMap; + SmallDenseMap PHIOrSelectSizes; - SmallDenseMap MemTransferPartitionMap; + /// \brief Set to de-duplicate dead instructions found in the use walk. + SmallPtrSet VisitedDeadInsts; public: - PartitionBuilder(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P) - : BuilderBase(TD, AI, P) {} - - /// \brief Run the builder over the allocation. - bool operator()() { - // Note that we have to re-evaluate size on each trip through the loop as - // the queue grows at the tail. - for (unsigned Idx = 0; Idx < Queue.size(); ++Idx) { - U = Queue[Idx].U; - Offset = Queue[Idx].Offset; - if (!visit(cast(U->getUser()))) - return false; - } - return true; - } + SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &AS) + : PtrUseVisitor(DL), + AllocSize(DL.getTypeAllocSize(AI.getAllocatedType())), AS(AS) {} private: - bool markAsEscaping(Instruction &I) { - P.PointerEscapingInstr = &I; - return false; + void markAsDead(Instruction &I) { + if (VisitedDeadInsts.insert(&I).second) + AS.DeadUsers.push_back(&I); } - void insertUse(Instruction &I, int64_t Offset, uint64_t Size, + void insertUse(Instruction &I, const APInt &Offset, uint64_t Size, bool IsSplittable = false) { - // Completely skip uses which have a zero size or don't overlap the - // allocation. - if (Size == 0 || - (Offset >= 0 && (uint64_t)Offset >= AllocSize) || - (Offset < 0 && (uint64_t)-Offset >= Size)) { + // Completely skip uses which have a zero size or start either before or + // past the end of the allocation. + if (Size == 0 || Offset.uge(AllocSize)) { DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset - << " which starts past the end of the " << AllocSize - << " byte alloca:\n" - << " alloca: " << P.AI << "\n" - << " use: " << I << "\n"); - return; - } - - // Clamp the start to the beginning of the allocation. - if (Offset < 0) { - DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset - << " to start at the beginning of the alloca:\n" - << " alloca: " << P.AI << "\n" + << " which has zero size or starts outside of the " + << AllocSize << " byte alloca:\n" + << " alloca: " << AS.AI << "\n" << " use: " << I << "\n"); - Size -= (uint64_t)-Offset; - Offset = 0; + return markAsDead(I); } - uint64_t BeginOffset = Offset, EndOffset = BeginOffset + Size; + uint64_t BeginOffset = Offset.getZExtValue(); + uint64_t EndOffset = BeginOffset + Size; // Clamp the end offset to the end of the allocation. Note that this is // formulated to handle even the case where "BeginOffset + Size" overflows. + // This may appear superficially to be something we could ignore entirely, + // but that is not so! There may be widened loads or PHI-node uses where + // some instructions are dead but not others. We can't completely ignore + // them, and so have to record at least the information here. assert(AllocSize >= BeginOffset); // Established above. if (Size > AllocSize - BeginOffset) { DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset << " to remain within the " << AllocSize << " byte alloca:\n" - << " alloca: " << P.AI << "\n" + << " alloca: " << AS.AI << "\n" << " use: " << I << "\n"); EndOffset = AllocSize; } - Partition New(BeginOffset, EndOffset, IsSplittable); - P.Partitions.push_back(New); + AS.Slices.push_back(Slice(BeginOffset, EndOffset, U, IsSplittable)); } - bool handleLoadOrStore(Type *Ty, Instruction &I, int64_t Offset) { - uint64_t Size = TD.getTypeStoreSize(Ty); - - // If this memory access can be shown to *statically* extend outside the - // bounds of of the allocation, it's behavior is undefined, so simply - // ignore it. Note that this is more strict than the generic clamping - // behavior of insertUse. We also try to handle cases which might run the - // risk of overflow. - // FIXME: We should instead consider the pointer to have escaped if this - // function is being instrumented for addressing bugs or race conditions. - if (Offset < 0 || (uint64_t)Offset >= AllocSize || - Size > (AllocSize - (uint64_t)Offset)) { - DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte " - << (isa(I) ? "load" : "store") << " @" << Offset - << " which extends past the end of the " << AllocSize - << " byte alloca:\n" - << " alloca: " << P.AI << "\n" - << " use: " << I << "\n"); - return true; - } + void visitBitCastInst(BitCastInst &BC) { + if (BC.use_empty()) + return markAsDead(BC); - insertUse(I, Offset, Size); - return true; + return Base::visitBitCastInst(BC); } - bool visitBitCastInst(BitCastInst &BC) { - enqueueUsers(BC, Offset); - return true; + void visitGetElementPtrInst(GetElementPtrInst &GEPI) { + if (GEPI.use_empty()) + return markAsDead(GEPI); + + if (SROAStrictInbounds && GEPI.isInBounds()) { + // FIXME: This is a manually un-factored variant of the basic code inside + // of GEPs with checking of the inbounds invariant specified in the + // langref in a very strict sense. If we ever want to enable + // SROAStrictInbounds, this code should be factored cleanly into + // PtrUseVisitor, but it is easier to experiment with SROAStrictInbounds + // by writing out the code here where we have tho underlying allocation + // size readily available. + APInt GEPOffset = Offset; + const DataLayout &DL = GEPI.getModule()->getDataLayout(); + for (gep_type_iterator GTI = gep_type_begin(GEPI), + GTE = gep_type_end(GEPI); + GTI != GTE; ++GTI) { + ConstantInt *OpC = dyn_cast(GTI.getOperand()); + if (!OpC) + break; + + // Handle a struct index, which adds its field offset to the pointer. + if (StructType *STy = dyn_cast(*GTI)) { + unsigned ElementIdx = OpC->getZExtValue(); + const StructLayout *SL = DL.getStructLayout(STy); + GEPOffset += + APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx)); + } else { + // For array or vector indices, scale the index by the size of the + // type. + APInt Index = OpC->getValue().sextOrTrunc(Offset.getBitWidth()); + GEPOffset += Index * APInt(Offset.getBitWidth(), + DL.getTypeAllocSize(GTI.getIndexedType())); + } + + // If this index has computed an intermediate pointer which is not + // inbounds, then the result of the GEP is a poison value and we can + // delete it and all uses. + if (GEPOffset.ugt(AllocSize)) + return markAsDead(GEPI); + } + } + + return Base::visitGetElementPtrInst(GEPI); } - bool visitGetElementPtrInst(GetElementPtrInst &GEPI) { - int64_t GEPOffset; - if (!computeConstantGEPOffset(GEPI, GEPOffset)) - return markAsEscaping(GEPI); + void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset, + uint64_t Size, bool IsVolatile) { + // We allow splitting of non-volatile loads and stores where the type is an + // integer type. These may be used to implement 'memcpy' or other "transfer + // of bits" patterns. + bool IsSplittable = Ty->isIntegerTy() && !IsVolatile; - enqueueUsers(GEPI, GEPOffset); - return true; + insertUse(I, Offset, Size, IsSplittable); } - bool visitLoadInst(LoadInst &LI) { + void visitLoadInst(LoadInst &LI) { assert((!LI.isSimple() || LI.getType()->isSingleValueType()) && "All simple FCA loads should have been pre-split"); - return handleLoadOrStore(LI.getType(), LI, Offset); + + if (!IsOffsetKnown) + return PI.setAborted(&LI); + + const DataLayout &DL = LI.getModule()->getDataLayout(); + uint64_t Size = DL.getTypeStoreSize(LI.getType()); + return handleLoadOrStore(LI.getType(), LI, Offset, Size, LI.isVolatile()); } - bool visitStoreInst(StoreInst &SI) { + void visitStoreInst(StoreInst &SI) { Value *ValOp = SI.getValueOperand(); if (ValOp == *U) - return markAsEscaping(SI); + return PI.setEscapedAndAborted(&SI); + if (!IsOffsetKnown) + return PI.setAborted(&SI); + + const DataLayout &DL = SI.getModule()->getDataLayout(); + uint64_t Size = DL.getTypeStoreSize(ValOp->getType()); + + // If this memory access can be shown to *statically* extend outside the + // bounds of of the allocation, it's behavior is undefined, so simply + // ignore it. Note that this is more strict than the generic clamping + // behavior of insertUse. We also try to handle cases which might run the + // risk of overflow. + // FIXME: We should instead consider the pointer to have escaped if this + // function is being instrumented for addressing bugs or race conditions. + if (Size > AllocSize || Offset.ugt(AllocSize - Size)) { + DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @" << Offset + << " which extends past the end of the " << AllocSize + << " byte alloca:\n" + << " alloca: " << AS.AI << "\n" + << " use: " << SI << "\n"); + return markAsDead(SI); + } assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) && "All simple FCA stores should have been pre-split"); - return handleLoadOrStore(ValOp->getType(), SI, Offset); + handleLoadOrStore(ValOp->getType(), SI, Offset, Size, SI.isVolatile()); } - - bool visitMemSetInst(MemSetInst &II) { + void visitMemSetInst(MemSetInst &II) { assert(II.getRawDest() == *U && "Pointer use is not the destination?"); ConstantInt *Length = dyn_cast(II.getLength()); - uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset; - insertUse(II, Offset, Size, Length); - return true; + if ((Length && Length->getValue() == 0) || + (IsOffsetKnown && Offset.uge(AllocSize))) + // Zero-length mem transfer intrinsics can be ignored entirely. + return markAsDead(II); + + if (!IsOffsetKnown) + return PI.setAborted(&II); + + insertUse(II, Offset, Length ? Length->getLimitedValue() + : AllocSize - Offset.getLimitedValue(), + (bool)Length); } - bool visitMemTransferInst(MemTransferInst &II) { + void visitMemTransferInst(MemTransferInst &II) { ConstantInt *Length = dyn_cast(II.getLength()); - uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset; - if (!Size) + if (Length && Length->getValue() == 0) // Zero-length mem transfer intrinsics can be ignored entirely. - return true; - - MemTransferOffsets &Offsets = P.MemTransferInstData[&II]; + return markAsDead(II); - // Only intrinsics with a constant length can be split. - Offsets.IsSplittable = Length; + // Because we can visit these intrinsics twice, also check to see if the + // first time marked this instruction as dead. If so, skip it. + if (VisitedDeadInsts.count(&II)) + return; - if (*U == II.getRawDest()) { - Offsets.DestBegin = Offset; - Offsets.DestEnd = Offset + Size; + if (!IsOffsetKnown) + return PI.setAborted(&II); + + // This side of the transfer is completely out-of-bounds, and so we can + // nuke the entire transfer. However, we also need to nuke the other side + // if already added to our partitions. + // FIXME: Yet another place we really should bypass this when + // instrumenting for ASan. + if (Offset.uge(AllocSize)) { + SmallDenseMap::iterator MTPI = + MemTransferSliceMap.find(&II); + if (MTPI != MemTransferSliceMap.end()) + AS.Slices[MTPI->second].kill(); + return markAsDead(II); } - if (*U == II.getRawSource()) { - Offsets.SourceBegin = Offset; - Offsets.SourceEnd = Offset + Size; + + uint64_t RawOffset = Offset.getLimitedValue(); + uint64_t Size = Length ? Length->getLimitedValue() : AllocSize - RawOffset; + + // Check for the special case where the same exact value is used for both + // source and dest. + if (*U == II.getRawDest() && *U == II.getRawSource()) { + // For non-volatile transfers this is a no-op. + if (!II.isVolatile()) + return markAsDead(II); + + return insertUse(II, Offset, Size, /*IsSplittable=*/false); } - // If we have set up end offsets for both the source and the destination, - // we have found both sides of this transfer pointing at the same alloca. - bool SeenBothEnds = Offsets.SourceEnd && Offsets.DestEnd; - if (SeenBothEnds && II.getRawDest() != II.getRawSource()) { - unsigned PrevIdx = MemTransferPartitionMap[&II]; + // If we have seen both source and destination for a mem transfer, then + // they both point to the same alloca. + bool Inserted; + SmallDenseMap::iterator MTPI; + std::tie(MTPI, Inserted) = + MemTransferSliceMap.insert(std::make_pair(&II, AS.Slices.size())); + unsigned PrevIdx = MTPI->second; + if (!Inserted) { + Slice &PrevP = AS.Slices[PrevIdx]; // Check if the begin offsets match and this is a non-volatile transfer. // In that case, we can completely elide the transfer. - if (!II.isVolatile() && Offsets.SourceBegin == Offsets.DestBegin) { - P.Partitions[PrevIdx].kill(); - return true; + if (!II.isVolatile() && PrevP.beginOffset() == RawOffset) { + PrevP.kill(); + return markAsDead(II); } // Otherwise we have an offset transfer within the same alloca. We can't // split those. - P.Partitions[PrevIdx].IsSplittable = Offsets.IsSplittable = false; - } else if (SeenBothEnds) { - // Handle the case where this exact use provides both ends of the - // operation. - assert(II.getRawDest() == II.getRawSource()); - - // For non-volatile transfers this is a no-op. - if (!II.isVolatile()) - return true; - - // Otherwise just suppress splitting. - Offsets.IsSplittable = false; + PrevP.makeUnsplittable(); } - // Insert the use now that we've fixed up the splittable nature. - insertUse(II, Offset, Size, Offsets.IsSplittable); - - // Setup the mapping from intrinsic to partition of we've not seen both - // ends of this transfer. - if (!SeenBothEnds) { - unsigned NewIdx = P.Partitions.size() - 1; - bool Inserted - = MemTransferPartitionMap.insert(std::make_pair(&II, NewIdx)).second; - assert(Inserted && - "Already have intrinsic in map but haven't seen both ends"); - (void)Inserted; - } + insertUse(II, Offset, Size, /*IsSplittable=*/Inserted && Length); - return true; + // Check that we ended up with a valid index in the map. + assert(AS.Slices[PrevIdx].getUse()->getUser() == &II && + "Map index doesn't point back to a slice with this user."); } // Disable SRoA for any intrinsics except for lifetime invariants. - // FIXME: What about debug instrinsics? This matches old behavior, but + // FIXME: What about debug intrinsics? This matches old behavior, but // doesn't make sense. - bool visitIntrinsicInst(IntrinsicInst &II) { + void visitIntrinsicInst(IntrinsicInst &II) { + if (!IsOffsetKnown) + return PI.setAborted(&II); + if (II.getIntrinsicID() == Intrinsic::lifetime_start || II.getIntrinsicID() == Intrinsic::lifetime_end) { ConstantInt *Length = cast(II.getArgOperand(0)); - uint64_t Size = std::min(AllocSize - Offset, Length->getLimitedValue()); + uint64_t Size = std::min(AllocSize - Offset.getLimitedValue(), + Length->getLimitedValue()); insertUse(II, Offset, Size, true); - return true; + return; } - return markAsEscaping(II); + Base::visitIntrinsicInst(II); } Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) { // We consider any PHI or select that results in a direct load or store of - // the same offset to be a viable use for partitioning purposes. These uses + // the same offset to be a viable use for slicing purposes. These uses // are considered unsplittable and the size is the maximum loaded or stored // size. SmallPtrSet Visited; SmallVector, 4> Uses; Visited.insert(Root); Uses.push_back(std::make_pair(cast(*U), Root)); + const DataLayout &DL = Root->getModule()->getDataLayout(); // If there are no loads or stores, the access is dead. We mark that as // a size zero access. Size = 0; do { Instruction *I, *UsedI; - llvm::tie(UsedI, I) = Uses.pop_back_val(); + std::tie(UsedI, I) = Uses.pop_back_val(); if (LoadInst *LI = dyn_cast(I)) { - Size = std::max(Size, TD.getTypeStoreSize(LI->getType())); + Size = std::max(Size, DL.getTypeStoreSize(LI->getType())); continue; } if (StoreInst *SI = dyn_cast(I)) { Value *Op = SI->getOperand(0); if (Op == UsedI) return SI; - Size = std::max(Size, TD.getTypeStoreSize(Op->getType())); + Size = std::max(Size, DL.getTypeStoreSize(Op->getType())); continue; } @@ -765,563 +924,149 @@ private: return I; } - for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); UI != UE; - ++UI) - if (Visited.insert(cast(*UI))) - Uses.push_back(std::make_pair(I, cast(*UI))); + for (User *U : I->users()) + if (Visited.insert(cast(U)).second) + Uses.push_back(std::make_pair(I, cast(U))); } while (!Uses.empty()); - return 0; + return nullptr; } - bool visitPHINode(PHINode &PN) { - // See if we already have computed info on this node. - std::pair &PHIInfo = P.PHIOrSelectSizes[&PN]; - if (PHIInfo.first) { - PHIInfo.second = true; - insertUse(PN, Offset, PHIInfo.first); - return true; - } - - // Check for an unsafe use of the PHI node. - if (Instruction *EscapingI = hasUnsafePHIOrSelectUse(&PN, PHIInfo.first)) - return markAsEscaping(*EscapingI); - - insertUse(PN, Offset, PHIInfo.first); - return true; - } + void visitPHINodeOrSelectInst(Instruction &I) { + assert(isa(I) || isa(I)); + if (I.use_empty()) + return markAsDead(I); - bool visitSelectInst(SelectInst &SI) { - if (Value *Result = foldSelectInst(SI)) { + // TODO: We could use SimplifyInstruction here to fold PHINodes and + // SelectInsts. However, doing so requires to change the current + // dead-operand-tracking mechanism. For instance, suppose neither loading + // from %U nor %other traps. Then "load (select undef, %U, %other)" does not + // trap either. However, if we simply replace %U with undef using the + // current dead-operand-tracking mechanism, "load (select undef, undef, + // %other)" may trap because the select may return the first operand + // "undef". + if (Value *Result = foldPHINodeOrSelectInst(I)) { if (Result == *U) // If the result of the constant fold will be the pointer, recurse - // through the select as if we had RAUW'ed it. - enqueueUsers(SI, Offset); - - return true; - } - - // See if we already have computed info on this node. - std::pair &SelectInfo = P.PHIOrSelectSizes[&SI]; - if (SelectInfo.first) { - SelectInfo.second = true; - insertUse(SI, Offset, SelectInfo.first); - return true; - } - - // Check for an unsafe use of the PHI node. - if (Instruction *EscapingI = hasUnsafePHIOrSelectUse(&SI, SelectInfo.first)) - return markAsEscaping(*EscapingI); - - insertUse(SI, Offset, SelectInfo.first); - return true; - } - - /// \brief Disable SROA entirely if there are unhandled users of the alloca. - bool visitInstruction(Instruction &I) { return markAsEscaping(I); } -}; - - -/// \brief Use adder for the alloca partitioning. -/// -/// This class adds the uses of an alloca to all of the partitions which they -/// use. For splittable partitions, this can end up doing essentially a linear -/// walk of the partitions, but the number of steps remains bounded by the -/// total result instruction size: -/// - The number of partitions is a result of the number unsplittable -/// instructions using the alloca. -/// - The number of users of each partition is at worst the total number of -/// splittable instructions using the alloca. -/// Thus we will produce N * M instructions in the end, where N are the number -/// of unsplittable uses and M are the number of splittable. This visitor does -/// the exact same number of updates to the partitioning. -/// -/// In the more common case, this visitor will leverage the fact that the -/// partition space is pre-sorted, and do a logarithmic search for the -/// partition needed, making the total visit a classical ((N + M) * log(N)) -/// complexity operation. -class AllocaPartitioning::UseBuilder : public BuilderBase { - friend class InstVisitor; - - /// \brief Set to de-duplicate dead instructions found in the use walk. - SmallPtrSet VisitedDeadInsts; - -public: - UseBuilder(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P) - : BuilderBase(TD, AI, P) {} - - /// \brief Run the builder over the allocation. - void operator()() { - // Note that we have to re-evaluate size on each trip through the loop as - // the queue grows at the tail. - for (unsigned Idx = 0; Idx < Queue.size(); ++Idx) { - U = Queue[Idx].U; - Offset = Queue[Idx].Offset; - this->visit(cast(U->getUser())); - } - } - -private: - void markAsDead(Instruction &I) { - if (VisitedDeadInsts.insert(&I)) - P.DeadUsers.push_back(&I); - } - - void insertUse(Instruction &User, int64_t Offset, uint64_t Size) { - // If the use has a zero size or extends outside of the allocation, record - // it as a dead use for elimination later. - if (Size == 0 || (uint64_t)Offset >= AllocSize || - (Offset < 0 && (uint64_t)-Offset >= Size)) - return markAsDead(User); + // through the PHI/select as if we had RAUW'ed it. + enqueueUsers(I); + else + // Otherwise the operand to the PHI/select is dead, and we can replace + // it with undef. + AS.DeadOperands.push_back(U); - // Clamp the start to the beginning of the allocation. - if (Offset < 0) { - Size -= (uint64_t)-Offset; - Offset = 0; + return; } - uint64_t BeginOffset = Offset, EndOffset = BeginOffset + Size; - - // Clamp the end offset to the end of the allocation. Note that this is - // formulated to handle even the case where "BeginOffset + Size" overflows. - assert(AllocSize >= BeginOffset); // Established above. - if (Size > AllocSize - BeginOffset) - EndOffset = AllocSize; + if (!IsOffsetKnown) + return PI.setAborted(&I); - // NB: This only works if we have zero overlapping partitions. - iterator B = std::lower_bound(P.begin(), P.end(), BeginOffset); - if (B != P.begin() && llvm::prior(B)->EndOffset > BeginOffset) - B = llvm::prior(B); - for (iterator I = B, E = P.end(); I != E && I->BeginOffset < EndOffset; - ++I) { - PartitionUse NewPU(std::max(I->BeginOffset, BeginOffset), - std::min(I->EndOffset, EndOffset), U); - P.use_push_back(I, NewPU); - if (isa(U->getUser()) || isa(U->getUser())) - P.PHIOrSelectOpMap[U] - = std::make_pair(I - P.begin(), P.Uses[I - P.begin()].size() - 1); + // See if we already have computed info on this node. + uint64_t &Size = PHIOrSelectSizes[&I]; + if (!Size) { + // This is a new PHI/Select, check for an unsafe use of it. + if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&I, Size)) + return PI.setAborted(UnsafeI); } - } - - void handleLoadOrStore(Type *Ty, Instruction &I, int64_t Offset) { - uint64_t Size = TD.getTypeStoreSize(Ty); - - // If this memory access can be shown to *statically* extend outside the - // bounds of of the allocation, it's behavior is undefined, so simply - // ignore it. Note that this is more strict than the generic clamping - // behavior of insertUse. - if (Offset < 0 || (uint64_t)Offset >= AllocSize || - Size > (AllocSize - (uint64_t)Offset)) - return markAsDead(I); - - insertUse(I, Offset, Size); - } - - void visitBitCastInst(BitCastInst &BC) { - if (BC.use_empty()) - return markAsDead(BC); - - enqueueUsers(BC, Offset); - } - - void visitGetElementPtrInst(GetElementPtrInst &GEPI) { - if (GEPI.use_empty()) - return markAsDead(GEPI); - - int64_t GEPOffset; - if (!computeConstantGEPOffset(GEPI, GEPOffset)) - llvm_unreachable("Unable to compute constant offset for use"); - - enqueueUsers(GEPI, GEPOffset); - } - - void visitLoadInst(LoadInst &LI) { - handleLoadOrStore(LI.getType(), LI, Offset); - } - - void visitStoreInst(StoreInst &SI) { - handleLoadOrStore(SI.getOperand(0)->getType(), SI, Offset); - } - - void visitMemSetInst(MemSetInst &II) { - ConstantInt *Length = dyn_cast(II.getLength()); - uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset; - insertUse(II, Offset, Size); - } - - void visitMemTransferInst(MemTransferInst &II) { - ConstantInt *Length = dyn_cast(II.getLength()); - uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset; - if (!Size) - return markAsDead(II); - - MemTransferOffsets &Offsets = P.MemTransferInstData[&II]; - if (!II.isVolatile() && Offsets.DestEnd && Offsets.SourceEnd && - Offsets.DestBegin == Offsets.SourceBegin) - return markAsDead(II); // Skip identity transfers without side-effects. - - insertUse(II, Offset, Size); - } - - void visitIntrinsicInst(IntrinsicInst &II) { - assert(II.getIntrinsicID() == Intrinsic::lifetime_start || - II.getIntrinsicID() == Intrinsic::lifetime_end); - - ConstantInt *Length = cast(II.getArgOperand(0)); - insertUse(II, Offset, - std::min(AllocSize - Offset, Length->getLimitedValue())); - } - - void insertPHIOrSelect(Instruction &User, uint64_t Offset) { - uint64_t Size = P.PHIOrSelectSizes.lookup(&User).first; // For PHI and select operands outside the alloca, we can't nuke the entire // phi or select -- the other side might still be relevant, so we special // case them here and use a separate structure to track the operands // themselves which should be replaced with undef. - if (Offset >= AllocSize) { - P.DeadOperands.push_back(U); + // FIXME: This should instead be escaped in the event we're instrumenting + // for address sanitization. + if (Offset.uge(AllocSize)) { + AS.DeadOperands.push_back(U); return; } - insertUse(User, Offset, Size); - } - void visitPHINode(PHINode &PN) { - if (PN.use_empty()) - return markAsDead(PN); - - insertPHIOrSelect(PN, Offset); + insertUse(I, Offset, Size); } - void visitSelectInst(SelectInst &SI) { - if (SI.use_empty()) - return markAsDead(SI); - if (Value *Result = foldSelectInst(SI)) { - if (Result == *U) - // If the result of the constant fold will be the pointer, recurse - // through the select as if we had RAUW'ed it. - enqueueUsers(SI, Offset); - else - // Otherwise the operand to the select is dead, and we can replace it - // with undef. - P.DeadOperands.push_back(U); - - return; - } + void visitPHINode(PHINode &PN) { visitPHINodeOrSelectInst(PN); } - insertPHIOrSelect(SI, Offset); - } + void visitSelectInst(SelectInst &SI) { visitPHINodeOrSelectInst(SI); } - /// \brief Unreachable, we've already visited the alloca once. - void visitInstruction(Instruction &I) { - llvm_unreachable("Unhandled instruction in use builder."); - } + /// \brief Disable SROA entirely if there are unhandled users of the alloca. + void visitInstruction(Instruction &I) { PI.setAborted(&I); } }; -void AllocaPartitioning::splitAndMergePartitions() { - size_t NumDeadPartitions = 0; - - // Track the range of splittable partitions that we pass when accumulating - // overlapping unsplittable partitions. - uint64_t SplitEndOffset = 0ull; - - Partition New(0ull, 0ull, false); - - for (unsigned i = 0, j = i, e = Partitions.size(); i != e; i = j) { - ++j; - - if (!Partitions[i].IsSplittable || New.BeginOffset == New.EndOffset) { - assert(New.BeginOffset == New.EndOffset); - New = Partitions[i]; - } else { - assert(New.IsSplittable); - New.EndOffset = std::max(New.EndOffset, Partitions[i].EndOffset); - } - assert(New.BeginOffset != New.EndOffset); - - // Scan the overlapping partitions. - while (j != e && New.EndOffset > Partitions[j].BeginOffset) { - // If the new partition we are forming is splittable, stop at the first - // unsplittable partition. - if (New.IsSplittable && !Partitions[j].IsSplittable) - break; - - // Grow the new partition to include any equally splittable range. 'j' is - // always equally splittable when New is splittable, but when New is not - // splittable, we may subsume some (or part of some) splitable partition - // without growing the new one. - if (New.IsSplittable == Partitions[j].IsSplittable) { - New.EndOffset = std::max(New.EndOffset, Partitions[j].EndOffset); - } else { - assert(!New.IsSplittable); - assert(Partitions[j].IsSplittable); - SplitEndOffset = std::max(SplitEndOffset, Partitions[j].EndOffset); - } - - Partitions[j].kill(); - ++NumDeadPartitions; - ++j; - } - - // If the new partition is splittable, chop off the end as soon as the - // unsplittable subsequent partition starts and ensure we eventually cover - // the splittable area. - if (j != e && New.IsSplittable) { - SplitEndOffset = std::max(SplitEndOffset, New.EndOffset); - New.EndOffset = std::min(New.EndOffset, Partitions[j].BeginOffset); - } - - // Add the new partition if it differs from the original one and is - // non-empty. We can end up with an empty partition here if it was - // splittable but there is an unsplittable one that starts at the same - // offset. - if (New != Partitions[i]) { - if (New.BeginOffset != New.EndOffset) - Partitions.push_back(New); - // Mark the old one for removal. - Partitions[i].kill(); - ++NumDeadPartitions; - } - - New.BeginOffset = New.EndOffset; - if (!New.IsSplittable) { - New.EndOffset = std::max(New.EndOffset, SplitEndOffset); - if (j != e && !Partitions[j].IsSplittable) - New.EndOffset = std::min(New.EndOffset, Partitions[j].BeginOffset); - New.IsSplittable = true; - // If there is a trailing splittable partition which won't be fused into - // the next splittable partition go ahead and add it onto the partitions - // list. - if (New.BeginOffset < New.EndOffset && - (j == e || !Partitions[j].IsSplittable || - New.EndOffset < Partitions[j].BeginOffset)) { - Partitions.push_back(New); - New.BeginOffset = New.EndOffset = 0ull; - } - } - } - - // Re-sort the partitions now that they have been split and merged into - // disjoint set of partitions. Also remove any of the dead partitions we've - // replaced in the process. - std::sort(Partitions.begin(), Partitions.end()); - if (NumDeadPartitions) { - assert(Partitions.back().isDead()); - assert((ptrdiff_t)NumDeadPartitions == - std::count(Partitions.begin(), Partitions.end(), Partitions.back())); - } - Partitions.erase(Partitions.end() - NumDeadPartitions, Partitions.end()); -} - -AllocaPartitioning::AllocaPartitioning(const DataLayout &TD, AllocaInst &AI) +AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI) : -#ifndef NDEBUG +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) AI(AI), #endif - PointerEscapingInstr(0) { - PartitionBuilder PB(TD, AI, *this); - if (!PB()) + PointerEscapingInstr(nullptr) { + SliceBuilder PB(DL, AI, *this); + SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI); + if (PtrI.isEscaped() || PtrI.isAborted()) { + // FIXME: We should sink the escape vs. abort info into the caller nicely, + // possibly by just storing the PtrInfo in the AllocaSlices. + PointerEscapingInstr = PtrI.getEscapingInst() ? PtrI.getEscapingInst() + : PtrI.getAbortingInst(); + assert(PointerEscapingInstr && "Did not track a bad instruction"); return; - - // Sort the uses. This arranges for the offsets to be in ascending order, - // and the sizes to be in descending order. - std::sort(Partitions.begin(), Partitions.end()); - - // Remove any partitions from the back which are marked as dead. - while (!Partitions.empty() && Partitions.back().isDead()) - Partitions.pop_back(); - - if (Partitions.size() > 1) { - // Intersect splittability for all partitions with equal offsets and sizes. - // Then remove all but the first so that we have a sequence of non-equal but - // potentially overlapping partitions. - for (iterator I = Partitions.begin(), J = I, E = Partitions.end(); I != E; - I = J) { - ++J; - while (J != E && *I == *J) { - I->IsSplittable &= J->IsSplittable; - ++J; - } - } - Partitions.erase(std::unique(Partitions.begin(), Partitions.end()), - Partitions.end()); - - // Split splittable and merge unsplittable partitions into a disjoint set - // of partitions over the used space of the allocation. - splitAndMergePartitions(); } - // Now build up the user lists for each of these disjoint partitions by - // re-walking the recursive users of the alloca. - Uses.resize(Partitions.size()); - UseBuilder UB(TD, AI, *this); - UB(); -} - -Type *AllocaPartitioning::getCommonType(iterator I) const { - Type *Ty = 0; - for (const_use_iterator UI = use_begin(I), UE = use_end(I); UI != UE; ++UI) { - if (!UI->U) - continue; // Skip dead uses. - if (isa(*UI->U->getUser())) - continue; - if (UI->BeginOffset != I->BeginOffset || UI->EndOffset != I->EndOffset) - continue; - - Type *UserTy = 0; - if (LoadInst *LI = dyn_cast(UI->U->getUser())) { - UserTy = LI->getType(); - } else if (StoreInst *SI = dyn_cast(UI->U->getUser())) { - UserTy = SI->getValueOperand()->getType(); - } - - if (Ty && Ty != UserTy) - return 0; + Slices.erase(std::remove_if(Slices.begin(), Slices.end(), + [](const Slice &S) { + return S.isDead(); + }), + Slices.end()); - Ty = UserTy; +#if __cplusplus >= 201103L && !defined(NDEBUG) + if (SROARandomShuffleSlices) { + std::mt19937 MT(static_cast(sys::TimeValue::now().msec())); + std::shuffle(Slices.begin(), Slices.end(), MT); } - return Ty; +#endif + + // Sort the uses. This arranges for the offsets to be in ascending order, + // and the sizes to be in descending order. + std::sort(Slices.begin(), Slices.end()); } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) -void AllocaPartitioning::print(raw_ostream &OS, const_iterator I, - StringRef Indent) const { - OS << Indent << "partition #" << (I - begin()) - << " [" << I->BeginOffset << "," << I->EndOffset << ")" - << (I->IsSplittable ? " (splittable)" : "") - << (Uses[I - begin()].empty() ? " (zero uses)" : "") - << "\n"; +void AllocaSlices::print(raw_ostream &OS, const_iterator I, + StringRef Indent) const { + printSlice(OS, I, Indent); + OS << "\n"; + printUse(OS, I, Indent); } -void AllocaPartitioning::printUsers(raw_ostream &OS, const_iterator I, - StringRef Indent) const { - for (const_use_iterator UI = use_begin(I), UE = use_end(I); - UI != UE; ++UI) { - if (!UI->U) - continue; // Skip dead uses. - OS << Indent << " [" << UI->BeginOffset << "," << UI->EndOffset << ") " - << "used by: " << *UI->U->getUser() << "\n"; - if (MemTransferInst *II = dyn_cast(UI->U->getUser())) { - const MemTransferOffsets &MTO = MemTransferInstData.lookup(II); - bool IsDest; - if (!MTO.IsSplittable) - IsDest = UI->BeginOffset == MTO.DestBegin; - else - IsDest = MTO.DestBegin != 0u; - OS << Indent << " (original " << (IsDest ? "dest" : "source") << ": " - << "[" << (IsDest ? MTO.DestBegin : MTO.SourceBegin) - << "," << (IsDest ? MTO.DestEnd : MTO.SourceEnd) << ")\n"; - } - } +void AllocaSlices::printSlice(raw_ostream &OS, const_iterator I, + StringRef Indent) const { + OS << Indent << "[" << I->beginOffset() << "," << I->endOffset() << ")" + << " slice #" << (I - begin()) + << (I->isSplittable() ? " (splittable)" : ""); } -void AllocaPartitioning::print(raw_ostream &OS) const { +void AllocaSlices::printUse(raw_ostream &OS, const_iterator I, + StringRef Indent) const { + OS << Indent << " used by: " << *I->getUse()->getUser() << "\n"; +} + +void AllocaSlices::print(raw_ostream &OS) const { if (PointerEscapingInstr) { - OS << "No partitioning for alloca: " << AI << "\n" + OS << "Can't analyze slices for alloca: " << AI << "\n" << " A pointer to this alloca escaped by:\n" << " " << *PointerEscapingInstr << "\n"; return; } - OS << "Partitioning of alloca: " << AI << "\n"; - unsigned Num = 0; - for (const_iterator I = begin(), E = end(); I != E; ++I, ++Num) { + OS << "Slices of alloca: " << AI << "\n"; + for (const_iterator I = begin(), E = end(); I != E; ++I) print(OS, I); - printUsers(OS, I); - } } -void AllocaPartitioning::dump(const_iterator I) const { print(dbgs(), I); } -void AllocaPartitioning::dump() const { print(dbgs()); } +LLVM_DUMP_METHOD void AllocaSlices::dump(const_iterator I) const { + print(dbgs(), I); +} +LLVM_DUMP_METHOD void AllocaSlices::dump() const { print(dbgs()); } #endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) - -namespace { -/// \brief Implementation of LoadAndStorePromoter for promoting allocas. -/// -/// This subclass of LoadAndStorePromoter adds overrides to handle promoting -/// the loads and stores of an alloca instruction, as well as updating its -/// debug information. This is used when a domtree is unavailable and thus -/// mem2reg in its full form can't be used to handle promotion of allocas to -/// scalar values. -class AllocaPromoter : public LoadAndStorePromoter { - AllocaInst &AI; - DIBuilder &DIB; - - SmallVector DDIs; - SmallVector DVIs; - -public: - AllocaPromoter(const SmallVectorImpl &Insts, SSAUpdater &S, - AllocaInst &AI, DIBuilder &DIB) - : LoadAndStorePromoter(Insts, S), AI(AI), DIB(DIB) {} - - void run(const SmallVectorImpl &Insts) { - // Remember which alloca we're promoting (for isInstInList). - if (MDNode *DebugNode = MDNode::getIfExists(AI.getContext(), &AI)) { - for (Value::use_iterator UI = DebugNode->use_begin(), - UE = DebugNode->use_end(); - UI != UE; ++UI) - if (DbgDeclareInst *DDI = dyn_cast(*UI)) - DDIs.push_back(DDI); - else if (DbgValueInst *DVI = dyn_cast(*UI)) - DVIs.push_back(DVI); - } - - LoadAndStorePromoter::run(Insts); - AI.eraseFromParent(); - while (!DDIs.empty()) - DDIs.pop_back_val()->eraseFromParent(); - while (!DVIs.empty()) - DVIs.pop_back_val()->eraseFromParent(); - } - - virtual bool isInstInList(Instruction *I, - const SmallVectorImpl &Insts) const { - if (LoadInst *LI = dyn_cast(I)) - return LI->getOperand(0) == &AI; - return cast(I)->getPointerOperand() == &AI; - } - - virtual void updateDebugInfo(Instruction *Inst) const { - for (SmallVector::const_iterator I = DDIs.begin(), - E = DDIs.end(); I != E; ++I) { - DbgDeclareInst *DDI = *I; - if (StoreInst *SI = dyn_cast(Inst)) - ConvertDebugDeclareToDebugValue(DDI, SI, DIB); - else if (LoadInst *LI = dyn_cast(Inst)) - ConvertDebugDeclareToDebugValue(DDI, LI, DIB); - } - for (SmallVector::const_iterator I = DVIs.begin(), - E = DVIs.end(); I != E; ++I) { - DbgValueInst *DVI = *I; - Value *Arg = NULL; - if (StoreInst *SI = dyn_cast(Inst)) { - // If an argument is zero extended then use argument directly. The ZExt - // may be zapped by an optimization pass in future. - if (ZExtInst *ZExt = dyn_cast(SI->getOperand(0))) - Arg = dyn_cast(ZExt->getOperand(0)); - if (SExtInst *SExt = dyn_cast(SI->getOperand(0))) - Arg = dyn_cast(SExt->getOperand(0)); - if (!Arg) - Arg = SI->getOperand(0); - } else if (LoadInst *LI = dyn_cast(Inst)) { - Arg = LI->getOperand(0); - } else { - continue; - } - Instruction *DbgVal = - DIB.insertDbgValueIntrinsic(Arg, 0, DIVariable(DVI->getVariable()), - Inst); - DbgVal->setDebugLoc(DVI->getDebugLoc()); - } - } -}; -} // end anon namespace - - namespace { /// \brief An optimization pass providing Scalar Replacement of Aggregates. /// @@ -1332,7 +1077,7 @@ namespace { /// 1) It takes allocations of aggregates and analyzes the ways in which they /// are used to try to split them into smaller allocations, ideally of /// a single scalar data type. It will split up memcpy and memset accesses -/// as necessary and try to isolate invidual scalar accesses. +/// as necessary and try to isolate individual scalar accesses. /// 2) It will transform accesses into forms which are suitable for SSA value /// promotion. This can be replacing a memset with a scalar store of an /// integer value, or it can involve speculating operations on a PHI or @@ -1342,11 +1087,9 @@ namespace { /// this form. By doing so, it will enable promotion of vector aggregates to /// SSA vector values. class SROA : public FunctionPass { - const bool RequiresDomTree; - LLVMContext *C; - const DataLayout *TD; DominatorTree *DT; + AssumptionCache *AC; /// \brief Worklist of alloca instructions to simplify. /// @@ -1355,16 +1098,12 @@ class SROA : public FunctionPass { /// directly promoted. Finally, each time we rewrite a use of an alloca other /// the one being actively rewritten, we add it back onto the list if not /// already present to ensure it is re-visited. - SetVector > Worklist; + SetVector> Worklist; /// \brief A collection of instructions to delete. /// We try to batch deletions to simplify code and make things a bit more /// efficient. - SmallVector DeadInsts; - - /// \brief A set to prevent repeatedly marking an instruction split into many - /// uses as dead. Only used to guard insertion into DeadInsts. - SmallPtrSet DeadSplitInsts; + SetVector> DeadInsts; /// \brief Post-promotion worklist. /// @@ -1374,378 +1113,325 @@ class SROA : public FunctionPass { /// /// Note that we have to be very careful to clear allocas out of this list in /// the event they are deleted. - SetVector > PostPromotionWorklist; + SetVector> PostPromotionWorklist; /// \brief A collection of alloca instructions we can directly promote. std::vector PromotableAllocas; + /// \brief A worklist of PHIs to speculate prior to promoting allocas. + /// + /// All of these PHIs have been checked for the safety of speculation and by + /// being speculated will allow promoting allocas currently in the promotable + /// queue. + SetVector> SpeculatablePHIs; + + /// \brief A worklist of select instructions to speculate prior to promoting + /// allocas. + /// + /// All of these select instructions have been checked for the safety of + /// speculation and by being speculated will allow promoting allocas + /// currently in the promotable queue. + SetVector> SpeculatableSelects; + public: - SROA(bool RequiresDomTree = true) - : FunctionPass(ID), RequiresDomTree(RequiresDomTree), - C(0), TD(0), DT(0) { + SROA() : FunctionPass(ID), C(nullptr), DT(nullptr) { initializeSROAPass(*PassRegistry::getPassRegistry()); } - bool runOnFunction(Function &F); - void getAnalysisUsage(AnalysisUsage &AU) const; + bool runOnFunction(Function &F) override; + void getAnalysisUsage(AnalysisUsage &AU) const override; - const char *getPassName() const { return "SROA"; } + const char *getPassName() const override { return "SROA"; } static char ID; private: friend class PHIOrSelectSpeculator; - friend class AllocaPartitionRewriter; - friend class AllocaPartitionVectorRewriter; + friend class AllocaSliceRewriter; - bool rewriteAllocaPartition(AllocaInst &AI, - AllocaPartitioning &P, - AllocaPartitioning::iterator PI); - bool splitAlloca(AllocaInst &AI, AllocaPartitioning &P); + bool presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS); + AllocaInst *rewritePartition(AllocaInst &AI, AllocaSlices &AS, + AllocaSlices::Partition &P); + bool splitAlloca(AllocaInst &AI, AllocaSlices &AS); bool runOnAlloca(AllocaInst &AI); - void deleteDeadInstructions(SmallPtrSet &DeletedAllocas); + void clobberUse(Use &U); + void deleteDeadInstructions(SmallPtrSetImpl &DeletedAllocas); bool promoteAllocas(Function &F); }; } char SROA::ID = 0; -FunctionPass *llvm::createSROAPass(bool RequiresDomTree) { - return new SROA(RequiresDomTree); +FunctionPass *llvm::createSROAPass() { + return new SROA(); } -INITIALIZE_PASS_BEGIN(SROA, "sroa", "Scalar Replacement Of Aggregates", - false, false) -INITIALIZE_PASS_DEPENDENCY(DominatorTree) -INITIALIZE_PASS_END(SROA, "sroa", "Scalar Replacement Of Aggregates", - false, false) - -namespace { -/// \brief Visitor to speculate PHIs and Selects where possible. -class PHIOrSelectSpeculator : public InstVisitor { - // Befriend the base class so it can delegate to private visit methods. - friend class llvm::InstVisitor; - - const DataLayout &TD; - AllocaPartitioning &P; - SROA &Pass; +INITIALIZE_PASS_BEGIN(SROA, "sroa", "Scalar Replacement Of Aggregates", false, + false) +INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) +INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) +INITIALIZE_PASS_END(SROA, "sroa", "Scalar Replacement Of Aggregates", false, + false) + +/// Walk the range of a partitioning looking for a common type to cover this +/// sequence of slices. +static Type *findCommonType(AllocaSlices::const_iterator B, + AllocaSlices::const_iterator E, + uint64_t EndOffset) { + Type *Ty = nullptr; + bool TyIsCommon = true; + IntegerType *ITy = nullptr; + + // Note that we need to look at *every* alloca slice's Use to ensure we + // always get consistent results regardless of the order of slices. + for (AllocaSlices::const_iterator I = B; I != E; ++I) { + Use *U = I->getUse(); + if (isa(*U->getUser())) + continue; + if (I->beginOffset() != B->beginOffset() || I->endOffset() != EndOffset) + continue; -public: - PHIOrSelectSpeculator(const DataLayout &TD, AllocaPartitioning &P, SROA &Pass) - : TD(TD), P(P), Pass(Pass) {} + Type *UserTy = nullptr; + if (LoadInst *LI = dyn_cast(U->getUser())) { + UserTy = LI->getType(); + } else if (StoreInst *SI = dyn_cast(U->getUser())) { + UserTy = SI->getValueOperand()->getType(); + } - /// \brief Visit the users of an alloca partition and rewrite them. - void visitUsers(AllocaPartitioning::const_iterator PI) { - // Note that we need to use an index here as the underlying vector of uses - // may be grown during speculation. However, we never need to re-visit the - // new uses, and so we can use the initial size bound. - for (unsigned Idx = 0, Size = P.use_size(PI); Idx != Size; ++Idx) { - const AllocaPartitioning::PartitionUse &PU = P.getUse(PI, Idx); - if (!PU.U) - continue; // Skip dead use. + if (IntegerType *UserITy = dyn_cast_or_null(UserTy)) { + // If the type is larger than the partition, skip it. We only encounter + // this for split integer operations where we want to use the type of the + // entity causing the split. Also skip if the type is not a byte width + // multiple. + if (UserITy->getBitWidth() % 8 != 0 || + UserITy->getBitWidth() / 8 > (EndOffset - B->beginOffset())) + continue; - visit(cast(PU.U->getUser())); + // Track the largest bitwidth integer type used in this way in case there + // is no common type. + if (!ITy || ITy->getBitWidth() < UserITy->getBitWidth()) + ITy = UserITy; } - } - -private: - // By default, skip this instruction. - void visitInstruction(Instruction &I) {} - - /// PHI instructions that use an alloca and are subsequently loaded can be - /// rewritten to load both input pointers in the pred blocks and then PHI the - /// results, allowing the load of the alloca to be promoted. - /// From this: - /// %P2 = phi [i32* %Alloca, i32* %Other] - /// %V = load i32* %P2 - /// to: - /// %V1 = load i32* %Alloca -> will be mem2reg'd - /// ... - /// %V2 = load i32* %Other - /// ... - /// %V = phi [i32 %V1, i32 %V2] - /// - /// We can do this to a select if its only uses are loads and if the operands - /// to the select can be loaded unconditionally. - /// - /// FIXME: This should be hoisted into a generic utility, likely in - /// Transforms/Util/Local.h - bool isSafePHIToSpeculate(PHINode &PN, SmallVectorImpl &Loads) { - // For now, we can only do this promotion if the load is in the same block - // as the PHI, and if there are no stores between the phi and load. - // TODO: Allow recursive phi users. - // TODO: Allow stores. - BasicBlock *BB = PN.getParent(); - unsigned MaxAlign = 0; - for (Value::use_iterator UI = PN.use_begin(), UE = PN.use_end(); - UI != UE; ++UI) { - LoadInst *LI = dyn_cast(*UI); - if (LI == 0 || !LI->isSimple()) return false; - - // For now we only allow loads in the same block as the PHI. This is - // a common case that happens when instcombine merges two loads through - // a PHI. - if (LI->getParent() != BB) return false; - - // Ensure that there are no instructions between the PHI and the load that - // could store. - for (BasicBlock::iterator BBI = &PN; &*BBI != LI; ++BBI) - if (BBI->mayWriteToMemory()) - return false; - - MaxAlign = std::max(MaxAlign, LI->getAlignment()); - Loads.push_back(LI); - } - - // We can only transform this if it is safe to push the loads into the - // predecessor blocks. The only thing to watch out for is that we can't put - // a possibly trapping load in the predecessor if it is a critical edge. - for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; - ++Idx) { - TerminatorInst *TI = PN.getIncomingBlock(Idx)->getTerminator(); - Value *InVal = PN.getIncomingValue(Idx); - - // If the value is produced by the terminator of the predecessor (an - // invoke) or it has side-effects, there is no valid place to put a load - // in the predecessor. - if (TI == InVal || TI->mayHaveSideEffects()) - return false; - // If the predecessor has a single successor, then the edge isn't - // critical. - if (TI->getNumSuccessors() == 1) - continue; + // To avoid depending on the order of slices, Ty and TyIsCommon must not + // depend on types skipped above. + if (!UserTy || (Ty && Ty != UserTy)) + TyIsCommon = false; // Give up on anything but an iN type. + else + Ty = UserTy; + } - // If this pointer is always safe to load, or if we can prove that there - // is already a load in the block, then we can move the load to the pred - // block. - if (InVal->isDereferenceablePointer() || - isSafeToLoadUnconditionally(InVal, TI, MaxAlign, &TD)) - continue; + return TyIsCommon ? Ty : ITy; +} +/// PHI instructions that use an alloca and are subsequently loaded can be +/// rewritten to load both input pointers in the pred blocks and then PHI the +/// results, allowing the load of the alloca to be promoted. +/// From this: +/// %P2 = phi [i32* %Alloca, i32* %Other] +/// %V = load i32* %P2 +/// to: +/// %V1 = load i32* %Alloca -> will be mem2reg'd +/// ... +/// %V2 = load i32* %Other +/// ... +/// %V = phi [i32 %V1, i32 %V2] +/// +/// We can do this to a select if its only uses are loads and if the operands +/// to the select can be loaded unconditionally. +/// +/// FIXME: This should be hoisted into a generic utility, likely in +/// Transforms/Util/Local.h +static bool isSafePHIToSpeculate(PHINode &PN) { + // For now, we can only do this promotion if the load is in the same block + // as the PHI, and if there are no stores between the phi and load. + // TODO: Allow recursive phi users. + // TODO: Allow stores. + BasicBlock *BB = PN.getParent(); + unsigned MaxAlign = 0; + bool HaveLoad = false; + for (User *U : PN.users()) { + LoadInst *LI = dyn_cast(U); + if (!LI || !LI->isSimple()) return false; - } - return true; - } + // For now we only allow loads in the same block as the PHI. This is + // a common case that happens when instcombine merges two loads through + // a PHI. + if (LI->getParent() != BB) + return false; - void visitPHINode(PHINode &PN) { - DEBUG(dbgs() << " original: " << PN << "\n"); + // Ensure that there are no instructions between the PHI and the load that + // could store. + for (BasicBlock::iterator BBI = &PN; &*BBI != LI; ++BBI) + if (BBI->mayWriteToMemory()) + return false; - SmallVector Loads; - if (!isSafePHIToSpeculate(PN, Loads)) - return; + MaxAlign = std::max(MaxAlign, LI->getAlignment()); + HaveLoad = true; + } - assert(!Loads.empty()); + if (!HaveLoad) + return false; - Type *LoadTy = cast(PN.getType())->getElementType(); - IRBuilder<> PHIBuilder(&PN); - PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(), - PN.getName() + ".sroa.speculated"); + const DataLayout &DL = PN.getModule()->getDataLayout(); - // Get the TBAA tag and alignment to use from one of the loads. It doesn't - // matter which one we get and if any differ, it doesn't matter. - LoadInst *SomeLoad = cast(Loads.back()); - MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa); - unsigned Align = SomeLoad->getAlignment(); + // We can only transform this if it is safe to push the loads into the + // predecessor blocks. The only thing to watch out for is that we can't put + // a possibly trapping load in the predecessor if it is a critical edge. + for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) { + TerminatorInst *TI = PN.getIncomingBlock(Idx)->getTerminator(); + Value *InVal = PN.getIncomingValue(Idx); - // Rewrite all loads of the PN to use the new PHI. - do { - LoadInst *LI = Loads.pop_back_val(); - LI->replaceAllUsesWith(NewPN); - Pass.DeadInsts.push_back(LI); - } while (!Loads.empty()); - - // Inject loads into all of the pred blocks. - for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) { - BasicBlock *Pred = PN.getIncomingBlock(Idx); - TerminatorInst *TI = Pred->getTerminator(); - Use *InUse = &PN.getOperandUse(PN.getOperandNumForIncomingValue(Idx)); - Value *InVal = PN.getIncomingValue(Idx); - IRBuilder<> PredBuilder(TI); - - LoadInst *Load - = PredBuilder.CreateLoad(InVal, (PN.getName() + ".sroa.speculate.load." + - Pred->getName())); - ++NumLoadsSpeculated; - Load->setAlignment(Align); - if (TBAATag) - Load->setMetadata(LLVMContext::MD_tbaa, TBAATag); - NewPN->addIncoming(Load, Pred); - - Instruction *Ptr = dyn_cast(InVal); - if (!Ptr) - // No uses to rewrite. - continue; + // If the value is produced by the terminator of the predecessor (an + // invoke) or it has side-effects, there is no valid place to put a load + // in the predecessor. + if (TI == InVal || TI->mayHaveSideEffects()) + return false; - // Try to lookup and rewrite any partition uses corresponding to this phi - // input. - AllocaPartitioning::iterator PI - = P.findPartitionForPHIOrSelectOperand(InUse); - if (PI == P.end()) - continue; + // If the predecessor has a single successor, then the edge isn't + // critical. + if (TI->getNumSuccessors() == 1) + continue; - // Replace the Use in the PartitionUse for this operand with the Use - // inside the load. - AllocaPartitioning::use_iterator UI - = P.findPartitionUseForPHIOrSelectOperand(InUse); - assert(isa(*UI->U->getUser())); - UI->U = &Load->getOperandUse(Load->getPointerOperandIndex()); - } - DEBUG(dbgs() << " speculated to: " << *NewPN << "\n"); - } - - /// Select instructions that use an alloca and are subsequently loaded can be - /// rewritten to load both input pointers and then select between the result, - /// allowing the load of the alloca to be promoted. - /// From this: - /// %P2 = select i1 %cond, i32* %Alloca, i32* %Other - /// %V = load i32* %P2 - /// to: - /// %V1 = load i32* %Alloca -> will be mem2reg'd - /// %V2 = load i32* %Other - /// %V = select i1 %cond, i32 %V1, i32 %V2 - /// - /// We can do this to a select if its only uses are loads and if the operand - /// to the select can be loaded unconditionally. - bool isSafeSelectToSpeculate(SelectInst &SI, - SmallVectorImpl &Loads) { - Value *TValue = SI.getTrueValue(); - Value *FValue = SI.getFalseValue(); - bool TDerefable = TValue->isDereferenceablePointer(); - bool FDerefable = FValue->isDereferenceablePointer(); - - for (Value::use_iterator UI = SI.use_begin(), UE = SI.use_end(); - UI != UE; ++UI) { - LoadInst *LI = dyn_cast(*UI); - if (LI == 0 || !LI->isSimple()) return false; - - // Both operands to the select need to be dereferencable, either - // absolutely (e.g. allocas) or at this point because we can see other - // accesses to it. - if (!TDerefable && !isSafeToLoadUnconditionally(TValue, LI, - LI->getAlignment(), &TD)) - return false; - if (!FDerefable && !isSafeToLoadUnconditionally(FValue, LI, - LI->getAlignment(), &TD)) - return false; - Loads.push_back(LI); - } + // If this pointer is always safe to load, or if we can prove that there + // is already a load in the block, then we can move the load to the pred + // block. + if (isDereferenceablePointer(InVal, DL) || + isSafeToLoadUnconditionally(InVal, TI, MaxAlign)) + continue; - return true; + return false; } - void visitSelectInst(SelectInst &SI) { - DEBUG(dbgs() << " original: " << SI << "\n"); - IRBuilder<> IRB(&SI); - - // If the select isn't safe to speculate, just use simple logic to emit it. - SmallVector Loads; - if (!isSafeSelectToSpeculate(SI, Loads)) - return; + return true; +} - Use *Ops[2] = { &SI.getOperandUse(1), &SI.getOperandUse(2) }; - AllocaPartitioning::iterator PIs[2]; - AllocaPartitioning::PartitionUse PUs[2]; - for (unsigned i = 0, e = 2; i != e; ++i) { - PIs[i] = P.findPartitionForPHIOrSelectOperand(Ops[i]); - if (PIs[i] != P.end()) { - // If the pointer is within the partitioning, remove the select from - // its uses. We'll add in the new loads below. - AllocaPartitioning::use_iterator UI - = P.findPartitionUseForPHIOrSelectOperand(Ops[i]); - PUs[i] = *UI; - // Clear out the use here so that the offsets into the use list remain - // stable but this use is ignored when rewriting. - UI->U = 0; - } - } +static void speculatePHINodeLoads(PHINode &PN) { + DEBUG(dbgs() << " original: " << PN << "\n"); - Value *TV = SI.getTrueValue(); - Value *FV = SI.getFalseValue(); - // Replace the loads of the select with a select of two loads. - while (!Loads.empty()) { - LoadInst *LI = Loads.pop_back_val(); + Type *LoadTy = cast(PN.getType())->getElementType(); + IRBuilderTy PHIBuilder(&PN); + PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(), + PN.getName() + ".sroa.speculated"); - IRB.SetInsertPoint(LI); - LoadInst *TL = - IRB.CreateLoad(TV, LI->getName() + ".sroa.speculate.load.true"); - LoadInst *FL = - IRB.CreateLoad(FV, LI->getName() + ".sroa.speculate.load.false"); - NumLoadsSpeculated += 2; - - // Transfer alignment and TBAA info if present. - TL->setAlignment(LI->getAlignment()); - FL->setAlignment(LI->getAlignment()); - if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa)) { - TL->setMetadata(LLVMContext::MD_tbaa, Tag); - FL->setMetadata(LLVMContext::MD_tbaa, Tag); - } + // Get the AA tags and alignment to use from one of the loads. It doesn't + // matter which one we get and if any differ. + LoadInst *SomeLoad = cast(PN.user_back()); - Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL, - LI->getName() + ".sroa.speculated"); + AAMDNodes AATags; + SomeLoad->getAAMetadata(AATags); + unsigned Align = SomeLoad->getAlignment(); - LoadInst *Loads[2] = { TL, FL }; - for (unsigned i = 0, e = 2; i != e; ++i) { - if (PIs[i] != P.end()) { - Use *LoadUse = &Loads[i]->getOperandUse(0); - assert(PUs[i].U->get() == LoadUse->get()); - PUs[i].U = LoadUse; - P.use_push_back(PIs[i], PUs[i]); - } - } + // Rewrite all loads of the PN to use the new PHI. + while (!PN.use_empty()) { + LoadInst *LI = cast(PN.user_back()); + LI->replaceAllUsesWith(NewPN); + LI->eraseFromParent(); + } - DEBUG(dbgs() << " speculated to: " << *V << "\n"); - LI->replaceAllUsesWith(V); - Pass.DeadInsts.push_back(LI); - } + // Inject loads into all of the pred blocks. + for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) { + BasicBlock *Pred = PN.getIncomingBlock(Idx); + TerminatorInst *TI = Pred->getTerminator(); + Value *InVal = PN.getIncomingValue(Idx); + IRBuilderTy PredBuilder(TI); + + LoadInst *Load = PredBuilder.CreateLoad( + InVal, (PN.getName() + ".sroa.speculate.load." + Pred->getName())); + ++NumLoadsSpeculated; + Load->setAlignment(Align); + if (AATags) + Load->setAAMetadata(AATags); + NewPN->addIncoming(Load, Pred); } -}; + + DEBUG(dbgs() << " speculated to: " << *NewPN << "\n"); + PN.eraseFromParent(); } -/// \brief Accumulate the constant offsets in a GEP into a single APInt offset. +/// Select instructions that use an alloca and are subsequently loaded can be +/// rewritten to load both input pointers and then select between the result, +/// allowing the load of the alloca to be promoted. +/// From this: +/// %P2 = select i1 %cond, i32* %Alloca, i32* %Other +/// %V = load i32* %P2 +/// to: +/// %V1 = load i32* %Alloca -> will be mem2reg'd +/// %V2 = load i32* %Other +/// %V = select i1 %cond, i32 %V1, i32 %V2 /// -/// If the provided GEP is all-constant, the total byte offset formed by the -/// GEP is computed and Offset is set to it. If the GEP has any non-constant -/// operands, the function returns false and the value of Offset is unmodified. -static bool accumulateGEPOffsets(const DataLayout &TD, GEPOperator &GEP, - APInt &Offset) { - APInt GEPOffset(Offset.getBitWidth(), 0); - for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); - GTI != GTE; ++GTI) { - ConstantInt *OpC = dyn_cast(GTI.getOperand()); - if (!OpC) +/// We can do this to a select if its only uses are loads and if the operand +/// to the select can be loaded unconditionally. +static bool isSafeSelectToSpeculate(SelectInst &SI) { + Value *TValue = SI.getTrueValue(); + Value *FValue = SI.getFalseValue(); + const DataLayout &DL = SI.getModule()->getDataLayout(); + bool TDerefable = isDereferenceablePointer(TValue, DL); + bool FDerefable = isDereferenceablePointer(FValue, DL); + + for (User *U : SI.users()) { + LoadInst *LI = dyn_cast(U); + if (!LI || !LI->isSimple()) return false; - if (OpC->isZero()) continue; - - // Handle a struct index, which adds its field offset to the pointer. - if (StructType *STy = dyn_cast(*GTI)) { - unsigned ElementIdx = OpC->getZExtValue(); - const StructLayout *SL = TD.getStructLayout(STy); - GEPOffset += APInt(Offset.getBitWidth(), - SL->getElementOffset(ElementIdx)); - continue; - } - APInt TypeSize(Offset.getBitWidth(), - TD.getTypeAllocSize(GTI.getIndexedType())); - if (VectorType *VTy = dyn_cast(*GTI)) { - assert((VTy->getScalarSizeInBits() % 8) == 0 && - "vector element size is not a multiple of 8, cannot GEP over it"); - TypeSize = VTy->getScalarSizeInBits() / 8; + // Both operands to the select need to be dereferencable, either + // absolutely (e.g. allocas) or at this point because we can see other + // accesses to it. + if (!TDerefable && + !isSafeToLoadUnconditionally(TValue, LI, LI->getAlignment())) + return false; + if (!FDerefable && + !isSafeToLoadUnconditionally(FValue, LI, LI->getAlignment())) + return false; + } + + return true; +} + +static void speculateSelectInstLoads(SelectInst &SI) { + DEBUG(dbgs() << " original: " << SI << "\n"); + + IRBuilderTy IRB(&SI); + Value *TV = SI.getTrueValue(); + Value *FV = SI.getFalseValue(); + // Replace the loads of the select with a select of two loads. + while (!SI.use_empty()) { + LoadInst *LI = cast(SI.user_back()); + assert(LI->isSimple() && "We only speculate simple loads"); + + IRB.SetInsertPoint(LI); + LoadInst *TL = + IRB.CreateLoad(TV, LI->getName() + ".sroa.speculate.load.true"); + LoadInst *FL = + IRB.CreateLoad(FV, LI->getName() + ".sroa.speculate.load.false"); + NumLoadsSpeculated += 2; + + // Transfer alignment and AA info if present. + TL->setAlignment(LI->getAlignment()); + FL->setAlignment(LI->getAlignment()); + + AAMDNodes Tags; + LI->getAAMetadata(Tags); + if (Tags) { + TL->setAAMetadata(Tags); + FL->setAAMetadata(Tags); } - GEPOffset += OpC->getValue().sextOrTrunc(Offset.getBitWidth()) * TypeSize; + Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL, + LI->getName() + ".sroa.speculated"); + + DEBUG(dbgs() << " speculated to: " << *V << "\n"); + LI->replaceAllUsesWith(V); + LI->eraseFromParent(); } - Offset = GEPOffset; - return true; + SI.eraseFromParent(); } /// \brief Build a GEP out of a base pointer and indices. /// /// This will return the BasePtr if that is valid, or build a new GEP /// instruction using the IRBuilder if GEP-ing is needed. -static Value *buildGEP(IRBuilder<> &IRB, Value *BasePtr, - SmallVectorImpl &Indices, - const Twine &Prefix) { +static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr, + SmallVectorImpl &Indices, Twine NamePrefix) { if (Indices.empty()) return BasePtr; @@ -1754,7 +1440,8 @@ static Value *buildGEP(IRBuilder<> &IRB, Value *BasePtr, if (Indices.size() == 1 && cast(Indices.back())->isZero()) return BasePtr; - return IRB.CreateInBoundsGEP(BasePtr, Indices, Prefix + ".idx"); + return IRB.CreateInBoundsGEP(nullptr, BasePtr, Indices, + NamePrefix + "sroa_idx"); } /// \brief Get a natural GEP off of the BasePtr walking through Ty toward @@ -1766,12 +1453,15 @@ static Value *buildGEP(IRBuilder<> &IRB, Value *BasePtr, /// TargetTy. If we can't find one with the same type, we at least try to use /// one with the same size. If none of that works, we just produce the GEP as /// indicated by Indices to have the correct offset. -static Value *getNaturalGEPWithType(IRBuilder<> &IRB, const DataLayout &TD, +static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL, Value *BasePtr, Type *Ty, Type *TargetTy, SmallVectorImpl &Indices, - const Twine &Prefix) { + Twine NamePrefix) { if (Ty == TargetTy) - return buildGEP(IRB, BasePtr, Indices, Prefix); + return buildGEP(IRB, BasePtr, Indices, NamePrefix); + + // Pointer size to use for the indices. + unsigned PtrSize = DL.getPointerTypeSizeInBits(BasePtr->getType()); // See if we can descend into a struct and locate a field with the correct // type. @@ -1780,11 +1470,13 @@ static Value *getNaturalGEPWithType(IRBuilder<> &IRB, const DataLayout &TD, do { if (ElementTy->isPointerTy()) break; - if (SequentialType *SeqTy = dyn_cast(ElementTy)) { - ElementTy = SeqTy->getElementType(); - // Note that we use the default address space as this index is over an - // array or a vector, not a pointer. - Indices.push_back(IRB.getInt(APInt(TD.getPointerSizeInBits(0), 0))); + + if (ArrayType *ArrayTy = dyn_cast(ElementTy)) { + ElementTy = ArrayTy->getElementType(); + Indices.push_back(IRB.getIntN(PtrSize, 0)); + } else if (VectorType *VectorTy = dyn_cast(ElementTy)) { + ElementTy = VectorTy->getElementType(); + Indices.push_back(IRB.getInt32(0)); } else if (StructType *STy = dyn_cast(ElementTy)) { if (STy->element_begin() == STy->element_end()) break; // Nothing left to descend into. @@ -1798,72 +1490,75 @@ static Value *getNaturalGEPWithType(IRBuilder<> &IRB, const DataLayout &TD, if (ElementTy != TargetTy) Indices.erase(Indices.end() - NumLayers, Indices.end()); - return buildGEP(IRB, BasePtr, Indices, Prefix); + return buildGEP(IRB, BasePtr, Indices, NamePrefix); } /// \brief Recursively compute indices for a natural GEP. /// /// This is the recursive step for getNaturalGEPWithOffset that walks down the /// element types adding appropriate indices for the GEP. -static Value *getNaturalGEPRecursively(IRBuilder<> &IRB, const DataLayout &TD, +static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, Type *Ty, APInt &Offset, Type *TargetTy, SmallVectorImpl &Indices, - const Twine &Prefix) { + Twine NamePrefix) { if (Offset == 0) - return getNaturalGEPWithType(IRB, TD, Ptr, Ty, TargetTy, Indices, Prefix); + return getNaturalGEPWithType(IRB, DL, Ptr, Ty, TargetTy, Indices, + NamePrefix); // We can't recurse through pointer types. if (Ty->isPointerTy()) - return 0; + return nullptr; // We try to analyze GEPs over vectors here, but note that these GEPs are // extremely poorly defined currently. The long-term goal is to remove GEPing // over a vector from the IR completely. if (VectorType *VecTy = dyn_cast(Ty)) { - unsigned ElementSizeInBits = VecTy->getScalarSizeInBits(); - if (ElementSizeInBits % 8) - return 0; // GEPs over non-multiple of 8 size vector elements are invalid. + unsigned ElementSizeInBits = DL.getTypeSizeInBits(VecTy->getScalarType()); + if (ElementSizeInBits % 8 != 0) { + // GEPs over non-multiple of 8 size vector elements are invalid. + return nullptr; + } APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8); APInt NumSkippedElements = Offset.sdiv(ElementSize); if (NumSkippedElements.ugt(VecTy->getNumElements())) - return 0; + return nullptr; Offset -= NumSkippedElements * ElementSize; Indices.push_back(IRB.getInt(NumSkippedElements)); - return getNaturalGEPRecursively(IRB, TD, Ptr, VecTy->getElementType(), - Offset, TargetTy, Indices, Prefix); + return getNaturalGEPRecursively(IRB, DL, Ptr, VecTy->getElementType(), + Offset, TargetTy, Indices, NamePrefix); } if (ArrayType *ArrTy = dyn_cast(Ty)) { Type *ElementTy = ArrTy->getElementType(); - APInt ElementSize(Offset.getBitWidth(), TD.getTypeAllocSize(ElementTy)); + APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy)); APInt NumSkippedElements = Offset.sdiv(ElementSize); if (NumSkippedElements.ugt(ArrTy->getNumElements())) - return 0; + return nullptr; Offset -= NumSkippedElements * ElementSize; Indices.push_back(IRB.getInt(NumSkippedElements)); - return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy, - Indices, Prefix); + return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, + Indices, NamePrefix); } StructType *STy = dyn_cast(Ty); if (!STy) - return 0; + return nullptr; - const StructLayout *SL = TD.getStructLayout(STy); + const StructLayout *SL = DL.getStructLayout(STy); uint64_t StructOffset = Offset.getZExtValue(); if (StructOffset >= SL->getSizeInBytes()) - return 0; + return nullptr; unsigned Index = SL->getElementContainingOffset(StructOffset); Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index)); Type *ElementTy = STy->getElementType(Index); - if (Offset.uge(TD.getTypeAllocSize(ElementTy))) - return 0; // The offset points into alignment padding. + if (Offset.uge(DL.getTypeAllocSize(ElementTy))) + return nullptr; // The offset points into alignment padding. Indices.push_back(IRB.getInt32(Index)); - return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy, - Indices, Prefix); + return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, + Indices, NamePrefix); } /// \brief Get a natural GEP from a base pointer to a particular offset and @@ -1876,29 +1571,29 @@ static Value *getNaturalGEPRecursively(IRBuilder<> &IRB, const DataLayout &TD, /// Indices, and setting Ty to the result subtype. /// /// If no natural GEP can be constructed, this function returns null. -static Value *getNaturalGEPWithOffset(IRBuilder<> &IRB, const DataLayout &TD, +static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, APInt Offset, Type *TargetTy, SmallVectorImpl &Indices, - const Twine &Prefix) { + Twine NamePrefix) { PointerType *Ty = cast(Ptr->getType()); // Don't consider any GEPs through an i8* as natural unless the TargetTy is // an i8. - if (Ty == IRB.getInt8PtrTy() && TargetTy->isIntegerTy(8)) - return 0; + if (Ty == IRB.getInt8PtrTy(Ty->getAddressSpace()) && TargetTy->isIntegerTy(8)) + return nullptr; Type *ElementTy = Ty->getElementType(); if (!ElementTy->isSized()) - return 0; // We can't GEP through an unsized element. - APInt ElementSize(Offset.getBitWidth(), TD.getTypeAllocSize(ElementTy)); + return nullptr; // We can't GEP through an unsized element. + APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy)); if (ElementSize == 0) - return 0; // Zero-length arrays can't help us build a natural GEP. + return nullptr; // Zero-length arrays can't help us build a natural GEP. APInt NumSkippedElements = Offset.sdiv(ElementSize); Offset -= NumSkippedElements * ElementSize; Indices.push_back(IRB.getInt(NumSkippedElements)); - return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy, - Indices, Prefix); + return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, + Indices, NamePrefix); } /// \brief Compute an adjusted pointer from Ptr by Offset bytes where the @@ -1913,12 +1608,11 @@ static Value *getNaturalGEPWithOffset(IRBuilder<> &IRB, const DataLayout &TD, /// The strategy for finding the more natural GEPs is to peel off layers of the /// pointer, walking back through bit casts and GEPs, searching for a base /// pointer from which we can compute a natural GEP with the desired -/// properities. The algorithm tries to fold as many constant indices into +/// properties. The algorithm tries to fold as many constant indices into /// a single GEP as possible, thus making each GEP more independent of the /// surrounding code. -static Value *getAdjustedPtr(IRBuilder<> &IRB, const DataLayout &TD, - Value *Ptr, APInt Offset, Type *PointerTy, - const Twine &Prefix) { +static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, + APInt Offset, Type *PointerTy, Twine NamePrefix) { // Even though we don't look through PHI nodes, we could be called on an // instruction in an unreachable block, which may be on a cycle. SmallPtrSet Visited; @@ -1927,12 +1621,13 @@ static Value *getAdjustedPtr(IRBuilder<> &IRB, const DataLayout &TD, // We may end up computing an offset pointer that has the wrong type. If we // never are able to compute one directly that has the correct type, we'll - // fall back to it, so keep it around here. - Value *OffsetPtr = 0; + // fall back to it, so keep it and the base it was computed from around here. + Value *OffsetPtr = nullptr; + Value *OffsetBasePtr; // Remember any i8 pointer we come across to re-use if we need to do a raw // byte offset. - Value *Int8Ptr = 0; + Value *Int8Ptr = nullptr; APInt Int8PtrOffset(Offset.getBitWidth(), 0); Type *TargetTy = PointerTy->getPointerElementType(); @@ -1941,28 +1636,31 @@ static Value *getAdjustedPtr(IRBuilder<> &IRB, const DataLayout &TD, // First fold any existing GEPs into the offset. while (GEPOperator *GEP = dyn_cast(Ptr)) { APInt GEPOffset(Offset.getBitWidth(), 0); - if (!accumulateGEPOffsets(TD, *GEP, GEPOffset)) + if (!GEP->accumulateConstantOffset(DL, GEPOffset)) break; Offset += GEPOffset; Ptr = GEP->getPointerOperand(); - if (!Visited.insert(Ptr)) + if (!Visited.insert(Ptr).second) break; } // See if we can perform a natural GEP here. Indices.clear(); - if (Value *P = getNaturalGEPWithOffset(IRB, TD, Ptr, Offset, TargetTy, - Indices, Prefix)) { - if (P->getType() == PointerTy) { - // Zap any offset pointer that we ended up computing in previous rounds. - if (OffsetPtr && OffsetPtr->use_empty()) - if (Instruction *I = dyn_cast(OffsetPtr)) - I->eraseFromParent(); + if (Value *P = getNaturalGEPWithOffset(IRB, DL, Ptr, Offset, TargetTy, + Indices, NamePrefix)) { + // If we have a new natural pointer at the offset, clear out any old + // offset pointer we computed. Unless it is the base pointer or + // a non-instruction, we built a GEP we don't need. Zap it. + if (OffsetPtr && OffsetPtr != OffsetBasePtr) + if (Instruction *I = dyn_cast(OffsetPtr)) { + assert(I->use_empty() && "Built a GEP with uses some how!"); + I->eraseFromParent(); + } + OffsetPtr = P; + OffsetBasePtr = Ptr; + // If we also found a pointer of the right type, we're done. + if (P->getType() == PointerTy) return P; - } - if (!OffsetPtr) { - OffsetPtr = P; - } } // Stash this pointer if we've found an i8*. @@ -1982,28 +1680,52 @@ static Value *getAdjustedPtr(IRBuilder<> &IRB, const DataLayout &TD, break; } assert(Ptr->getType()->isPointerTy() && "Unexpected operand type!"); - } while (Visited.insert(Ptr)); + } while (Visited.insert(Ptr).second); if (!OffsetPtr) { if (!Int8Ptr) { - Int8Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy(), - Prefix + ".raw_cast"); + Int8Ptr = IRB.CreateBitCast( + Ptr, IRB.getInt8PtrTy(PointerTy->getPointerAddressSpace()), + NamePrefix + "sroa_raw_cast"); Int8PtrOffset = Offset; } - OffsetPtr = Int8PtrOffset == 0 ? Int8Ptr : - IRB.CreateInBoundsGEP(Int8Ptr, IRB.getInt(Int8PtrOffset), - Prefix + ".raw_idx"); + OffsetPtr = Int8PtrOffset == 0 + ? Int8Ptr + : IRB.CreateInBoundsGEP(IRB.getInt8Ty(), Int8Ptr, + IRB.getInt(Int8PtrOffset), + NamePrefix + "sroa_raw_idx"); } Ptr = OffsetPtr; // On the off chance we were targeting i8*, guard the bitcast here. if (Ptr->getType() != PointerTy) - Ptr = IRB.CreateBitCast(Ptr, PointerTy, Prefix + ".cast"); + Ptr = IRB.CreateBitCast(Ptr, PointerTy, NamePrefix + "sroa_cast"); return Ptr; } +/// \brief Compute the adjusted alignment for a load or store from an offset. +static unsigned getAdjustedAlignment(Instruction *I, uint64_t Offset, + const DataLayout &DL) { + unsigned Alignment; + Type *Ty; + if (auto *LI = dyn_cast(I)) { + Alignment = LI->getAlignment(); + Ty = LI->getType(); + } else if (auto *SI = dyn_cast(I)) { + Alignment = SI->getAlignment(); + Ty = SI->getValueOperand()->getType(); + } else { + llvm_unreachable("Only loads and stores are allowed!"); + } + + if (!Alignment) + Alignment = DL.getABITypeAlignment(Ty); + + return MinAlign(Alignment, Offset); +} + /// \brief Test whether we can convert a value from the old to the new type. /// /// This predicate should be used to guard calls to convertValue in order to @@ -2013,11 +1735,26 @@ static Value *getAdjustedPtr(IRBuilder<> &IRB, const DataLayout &TD, static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) { if (OldTy == NewTy) return true; + + // For integer types, we can't handle any bit-width differences. This would + // break both vector conversions with extension and introduce endianness + // issues when in conjunction with loads and stores. + if (isa(OldTy) && isa(NewTy)) { + assert(cast(OldTy)->getBitWidth() != + cast(NewTy)->getBitWidth() && + "We can't have the same bitwidth for different int types"); + return false; + } + if (DL.getTypeSizeInBits(NewTy) != DL.getTypeSizeInBits(OldTy)) return false; if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType()) return false; + // We can convert pointers to integers and vice-versa. Same for vectors + // of pointers and integers. + OldTy = OldTy->getScalarType(); + NewTy = NewTy->getScalarType(); if (NewTy->isPointerTy() || OldTy->isPointerTy()) { if (NewTy->isPointerTy() && OldTy->isPointerTy()) return true; @@ -2035,21 +1772,127 @@ static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) { /// This will try various different casting techniques, such as bitcasts, /// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test /// two types for viability with this routine. -static Value *convertValue(const DataLayout &DL, IRBuilder<> &IRB, Value *V, - Type *Ty) { - assert(canConvertValue(DL, V->getType(), Ty) && - "Value not convertable to type"); - if (V->getType() == Ty) +static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V, + Type *NewTy) { + Type *OldTy = V->getType(); + assert(canConvertValue(DL, OldTy, NewTy) && "Value not convertable to type"); + + if (OldTy == NewTy) return V; - if (V->getType()->isIntegerTy() && Ty->isPointerTy()) - return IRB.CreateIntToPtr(V, Ty); - if (V->getType()->isPointerTy() && Ty->isIntegerTy()) - return IRB.CreatePtrToInt(V, Ty); - return IRB.CreateBitCast(V, Ty); + assert(!(isa(OldTy) && isa(NewTy)) && + "Integer types must be the exact same to convert."); + + // See if we need inttoptr for this type pair. A cast involving both scalars + // and vectors requires and additional bitcast. + if (OldTy->getScalarType()->isIntegerTy() && + NewTy->getScalarType()->isPointerTy()) { + // Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8* + if (OldTy->isVectorTy() && !NewTy->isVectorTy()) + return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)), + NewTy); + + // Expand i128 to <2 x i8*> --> i128 to <2 x i64> to <2 x i8*> + if (!OldTy->isVectorTy() && NewTy->isVectorTy()) + return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)), + NewTy); + + return IRB.CreateIntToPtr(V, NewTy); + } + + // See if we need ptrtoint for this type pair. A cast involving both scalars + // and vectors requires and additional bitcast. + if (OldTy->getScalarType()->isPointerTy() && + NewTy->getScalarType()->isIntegerTy()) { + // Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128 + if (OldTy->isVectorTy() && !NewTy->isVectorTy()) + return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)), + NewTy); + + // Expand i8* to <2 x i32> --> i8* to i64 to <2 x i32> + if (!OldTy->isVectorTy() && NewTy->isVectorTy()) + return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)), + NewTy); + + return IRB.CreatePtrToInt(V, NewTy); + } + + return IRB.CreateBitCast(V, NewTy); +} + +/// \brief Test whether the given slice use can be promoted to a vector. +/// +/// This function is called to test each entry in a partition which is slated +/// for a single slice. +static bool isVectorPromotionViableForSlice(AllocaSlices::Partition &P, + const Slice &S, VectorType *Ty, + uint64_t ElementSize, + const DataLayout &DL) { + // First validate the slice offsets. + uint64_t BeginOffset = + std::max(S.beginOffset(), P.beginOffset()) - P.beginOffset(); + uint64_t BeginIndex = BeginOffset / ElementSize; + if (BeginIndex * ElementSize != BeginOffset || + BeginIndex >= Ty->getNumElements()) + return false; + uint64_t EndOffset = + std::min(S.endOffset(), P.endOffset()) - P.beginOffset(); + uint64_t EndIndex = EndOffset / ElementSize; + if (EndIndex * ElementSize != EndOffset || EndIndex > Ty->getNumElements()) + return false; + + assert(EndIndex > BeginIndex && "Empty vector!"); + uint64_t NumElements = EndIndex - BeginIndex; + Type *SliceTy = (NumElements == 1) + ? Ty->getElementType() + : VectorType::get(Ty->getElementType(), NumElements); + + Type *SplitIntTy = + Type::getIntNTy(Ty->getContext(), NumElements * ElementSize * 8); + + Use *U = S.getUse(); + + if (MemIntrinsic *MI = dyn_cast(U->getUser())) { + if (MI->isVolatile()) + return false; + if (!S.isSplittable()) + return false; // Skip any unsplittable intrinsics. + } else if (IntrinsicInst *II = dyn_cast(U->getUser())) { + if (II->getIntrinsicID() != Intrinsic::lifetime_start && + II->getIntrinsicID() != Intrinsic::lifetime_end) + return false; + } else if (U->get()->getType()->getPointerElementType()->isStructTy()) { + // Disable vector promotion when there are loads or stores of an FCA. + return false; + } else if (LoadInst *LI = dyn_cast(U->getUser())) { + if (LI->isVolatile()) + return false; + Type *LTy = LI->getType(); + if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) { + assert(LTy->isIntegerTy()); + LTy = SplitIntTy; + } + if (!canConvertValue(DL, SliceTy, LTy)) + return false; + } else if (StoreInst *SI = dyn_cast(U->getUser())) { + if (SI->isVolatile()) + return false; + Type *STy = SI->getValueOperand()->getType(); + if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) { + assert(STy->isIntegerTy()); + STy = SplitIntTy; + } + if (!canConvertValue(DL, STy, SliceTy)) + return false; + } else { + return false; + } + + return true; } -/// \brief Test whether the given alloca partition can be promoted to a vector. +/// \brief Test whether the given alloca partitioning and range of slices can be +/// promoted to a vector. /// /// This is a quick test to check whether we can rewrite a particular alloca /// partition (and its newly formed alloca) into a vector alloca with only @@ -2057,66 +1900,184 @@ static Value *convertValue(const DataLayout &DL, IRBuilder<> &IRB, Value *V, /// SSA value. We only can ensure this for a limited set of operations, and we /// don't want to do the rewrites unless we are confident that the result will /// be promotable, so we have an early test here. -static bool isVectorPromotionViable(const DataLayout &TD, - Type *AllocaTy, - AllocaPartitioning &P, - uint64_t PartitionBeginOffset, - uint64_t PartitionEndOffset, - AllocaPartitioning::const_use_iterator I, - AllocaPartitioning::const_use_iterator E) { - VectorType *Ty = dyn_cast(AllocaTy); - if (!Ty) - return false; +static VectorType *isVectorPromotionViable(AllocaSlices::Partition &P, + const DataLayout &DL) { + // Collect the candidate types for vector-based promotion. Also track whether + // we have different element types. + SmallVector CandidateTys; + Type *CommonEltTy = nullptr; + bool HaveCommonEltTy = true; + auto CheckCandidateType = [&](Type *Ty) { + if (auto *VTy = dyn_cast(Ty)) { + CandidateTys.push_back(VTy); + if (!CommonEltTy) + CommonEltTy = VTy->getElementType(); + else if (CommonEltTy != VTy->getElementType()) + HaveCommonEltTy = false; + } + }; + // Consider any loads or stores that are the exact size of the slice. + for (const Slice &S : P) + if (S.beginOffset() == P.beginOffset() && + S.endOffset() == P.endOffset()) { + if (auto *LI = dyn_cast(S.getUse()->getUser())) + CheckCandidateType(LI->getType()); + else if (auto *SI = dyn_cast(S.getUse()->getUser())) + CheckCandidateType(SI->getValueOperand()->getType()); + } + + // If we didn't find a vector type, nothing to do here. + if (CandidateTys.empty()) + return nullptr; + + // Remove non-integer vector types if we had multiple common element types. + // FIXME: It'd be nice to replace them with integer vector types, but we can't + // do that until all the backends are known to produce good code for all + // integer vector types. + if (!HaveCommonEltTy) { + CandidateTys.erase(std::remove_if(CandidateTys.begin(), CandidateTys.end(), + [](VectorType *VTy) { + return !VTy->getElementType()->isIntegerTy(); + }), + CandidateTys.end()); + + // If there were no integer vector types, give up. + if (CandidateTys.empty()) + return nullptr; + + // Rank the remaining candidate vector types. This is easy because we know + // they're all integer vectors. We sort by ascending number of elements. + auto RankVectorTypes = [&DL](VectorType *RHSTy, VectorType *LHSTy) { + assert(DL.getTypeSizeInBits(RHSTy) == DL.getTypeSizeInBits(LHSTy) && + "Cannot have vector types of different sizes!"); + assert(RHSTy->getElementType()->isIntegerTy() && + "All non-integer types eliminated!"); + assert(LHSTy->getElementType()->isIntegerTy() && + "All non-integer types eliminated!"); + return RHSTy->getNumElements() < LHSTy->getNumElements(); + }; + std::sort(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes); + CandidateTys.erase( + std::unique(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes), + CandidateTys.end()); + } else { +// The only way to have the same element type in every vector type is to +// have the same vector type. Check that and remove all but one. +#ifndef NDEBUG + for (VectorType *VTy : CandidateTys) { + assert(VTy->getElementType() == CommonEltTy && + "Unaccounted for element type!"); + assert(VTy == CandidateTys[0] && + "Different vector types with the same element type!"); + } +#endif + CandidateTys.resize(1); + } - uint64_t VecSize = TD.getTypeSizeInBits(Ty); - uint64_t ElementSize = Ty->getScalarSizeInBits(); + // Try each vector type, and return the one which works. + auto CheckVectorTypeForPromotion = [&](VectorType *VTy) { + uint64_t ElementSize = DL.getTypeSizeInBits(VTy->getElementType()); - // While the definition of LLVM vectors is bitpacked, we don't support sizes - // that aren't byte sized. - if (ElementSize % 8) - return false; - assert((VecSize % 8) == 0 && "vector size not a multiple of element size?"); - VecSize /= 8; - ElementSize /= 8; - - for (; I != E; ++I) { - if (!I->U) - continue; // Skip dead use. - - uint64_t BeginOffset = I->BeginOffset - PartitionBeginOffset; - uint64_t BeginIndex = BeginOffset / ElementSize; - if (BeginIndex * ElementSize != BeginOffset || - BeginIndex >= Ty->getNumElements()) - return false; - uint64_t EndOffset = I->EndOffset - PartitionBeginOffset; - uint64_t EndIndex = EndOffset / ElementSize; - if (EndIndex * ElementSize != EndOffset || - EndIndex > Ty->getNumElements()) + // While the definition of LLVM vectors is bitpacked, we don't support sizes + // that aren't byte sized. + if (ElementSize % 8) return false; + assert((DL.getTypeSizeInBits(VTy) % 8) == 0 && + "vector size not a multiple of element size?"); + ElementSize /= 8; - // FIXME: We should build shuffle vector instructions to handle - // non-element-sized accesses. - if ((EndOffset - BeginOffset) != ElementSize && - (EndOffset - BeginOffset) != VecSize) - return false; + for (const Slice &S : P) + if (!isVectorPromotionViableForSlice(P, S, VTy, ElementSize, DL)) + return false; - if (MemIntrinsic *MI = dyn_cast(I->U->getUser())) { - if (MI->isVolatile()) + for (const Slice *S : P.splitSliceTails()) + if (!isVectorPromotionViableForSlice(P, *S, VTy, ElementSize, DL)) return false; - if (MemTransferInst *MTI = dyn_cast(I->U->getUser())) { - const AllocaPartitioning::MemTransferOffsets &MTO - = P.getMemTransferOffsets(*MTI); - if (!MTO.IsSplittable) - return false; - } - } else if (I->U->get()->getType()->getPointerElementType()->isStructTy()) { - // Disable vector promotion when there are loads or stores of an FCA. + + return true; + }; + for (VectorType *VTy : CandidateTys) + if (CheckVectorTypeForPromotion(VTy)) + return VTy; + + return nullptr; +} + +/// \brief Test whether a slice of an alloca is valid for integer widening. +/// +/// This implements the necessary checking for the \c isIntegerWideningViable +/// test below on a single slice of the alloca. +static bool isIntegerWideningViableForSlice(const Slice &S, + uint64_t AllocBeginOffset, + Type *AllocaTy, + const DataLayout &DL, + bool &WholeAllocaOp) { + uint64_t Size = DL.getTypeStoreSize(AllocaTy); + + uint64_t RelBegin = S.beginOffset() - AllocBeginOffset; + uint64_t RelEnd = S.endOffset() - AllocBeginOffset; + + // We can't reasonably handle cases where the load or store extends past + // the end of the alloca's type and into its padding. + if (RelEnd > Size) + return false; + + Use *U = S.getUse(); + + if (LoadInst *LI = dyn_cast(U->getUser())) { + if (LI->isVolatile()) + return false; + // We can't handle loads that extend past the allocated memory. + if (DL.getTypeStoreSize(LI->getType()) > Size) + return false; + // Note that we don't count vector loads or stores as whole-alloca + // operations which enable integer widening because we would prefer to use + // vector widening instead. + if (!isa(LI->getType()) && RelBegin == 0 && RelEnd == Size) + WholeAllocaOp = true; + if (IntegerType *ITy = dyn_cast(LI->getType())) { + if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy)) + return false; + } else if (RelBegin != 0 || RelEnd != Size || + !canConvertValue(DL, AllocaTy, LI->getType())) { + // Non-integer loads need to be convertible from the alloca type so that + // they are promotable. return false; - } else if (!isa(I->U->getUser()) && - !isa(I->U->getUser())) { + } + } else if (StoreInst *SI = dyn_cast(U->getUser())) { + Type *ValueTy = SI->getValueOperand()->getType(); + if (SI->isVolatile()) + return false; + // We can't handle stores that extend past the allocated memory. + if (DL.getTypeStoreSize(ValueTy) > Size) + return false; + // Note that we don't count vector loads or stores as whole-alloca + // operations which enable integer widening because we would prefer to use + // vector widening instead. + if (!isa(ValueTy) && RelBegin == 0 && RelEnd == Size) + WholeAllocaOp = true; + if (IntegerType *ITy = dyn_cast(ValueTy)) { + if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy)) + return false; + } else if (RelBegin != 0 || RelEnd != Size || + !canConvertValue(DL, ValueTy, AllocaTy)) { + // Non-integer stores need to be convertible to the alloca type so that + // they are promotable. return false; } + } else if (MemIntrinsic *MI = dyn_cast(U->getUser())) { + if (MI->isVolatile() || !isa(MI->getLength())) + return false; + if (!S.isSplittable()) + return false; // Skip any unsplittable intrinsics. + } else if (IntrinsicInst *II = dyn_cast(U->getUser())) { + if (II->getIntrinsicID() != Intrinsic::lifetime_start && + II->getIntrinsicID() != Intrinsic::lifetime_end) + return false; + } else { + return false; } + return true; } @@ -2126,152 +2087,203 @@ static bool isVectorPromotionViable(const DataLayout &TD, /// This is a quick test to check whether we can rewrite the integer loads and /// stores to a particular alloca into wider loads and stores and be able to /// promote the resulting alloca. -static bool isIntegerWideningViable(const DataLayout &TD, - Type *AllocaTy, - uint64_t AllocBeginOffset, - AllocaPartitioning &P, - AllocaPartitioning::const_use_iterator I, - AllocaPartitioning::const_use_iterator E) { - uint64_t SizeInBits = TD.getTypeSizeInBits(AllocaTy); +static bool isIntegerWideningViable(AllocaSlices::Partition &P, Type *AllocaTy, + const DataLayout &DL) { + uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy); + // Don't create integer types larger than the maximum bitwidth. + if (SizeInBits > IntegerType::MAX_INT_BITS) + return false; // Don't try to handle allocas with bit-padding. - if (SizeInBits != TD.getTypeStoreSizeInBits(AllocaTy)) + if (SizeInBits != DL.getTypeStoreSizeInBits(AllocaTy)) return false; - uint64_t Size = TD.getTypeStoreSize(AllocaTy); - - // Check the uses to ensure the uses are (likely) promoteable integer uses. - // Also ensure that the alloca has a covering load or store. We don't want - // to widen the integer operotains only to fail to promote due to some other - // unsplittable entry (which we may make splittable later). - bool WholeAllocaOp = false; - for (; I != E; ++I) { - if (!I->U) - continue; // Skip dead use. - - uint64_t RelBegin = I->BeginOffset - AllocBeginOffset; - uint64_t RelEnd = I->EndOffset - AllocBeginOffset; + // We need to ensure that an integer type with the appropriate bitwidth can + // be converted to the alloca type, whatever that is. We don't want to force + // the alloca itself to have an integer type if there is a more suitable one. + Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits); + if (!canConvertValue(DL, AllocaTy, IntTy) || + !canConvertValue(DL, IntTy, AllocaTy)) + return false; - // We can't reasonably handle cases where the load or store extends past - // the end of the aloca's type and into its padding. - if (RelEnd > Size) + // While examining uses, we ensure that the alloca has a covering load or + // store. We don't want to widen the integer operations only to fail to + // promote due to some other unsplittable entry (which we may make splittable + // later). However, if there are only splittable uses, go ahead and assume + // that we cover the alloca. + // FIXME: We shouldn't consider split slices that happen to start in the + // partition here... + bool WholeAllocaOp = + P.begin() != P.end() ? false : DL.isLegalInteger(SizeInBits); + + for (const Slice &S : P) + if (!isIntegerWideningViableForSlice(S, P.beginOffset(), AllocaTy, DL, + WholeAllocaOp)) return false; - if (LoadInst *LI = dyn_cast(I->U->getUser())) { - if (LI->isVolatile()) - return false; - if (RelBegin == 0 && RelEnd == Size) - WholeAllocaOp = true; - if (IntegerType *ITy = dyn_cast(LI->getType())) { - if (ITy->getBitWidth() < TD.getTypeStoreSize(ITy)) - return false; - continue; - } - // Non-integer loads need to be convertible from the alloca type so that - // they are promotable. - if (RelBegin != 0 || RelEnd != Size || - !canConvertValue(TD, AllocaTy, LI->getType())) - return false; - } else if (StoreInst *SI = dyn_cast(I->U->getUser())) { - Type *ValueTy = SI->getValueOperand()->getType(); - if (SI->isVolatile()) - return false; - if (RelBegin == 0 && RelEnd == Size) - WholeAllocaOp = true; - if (IntegerType *ITy = dyn_cast(ValueTy)) { - if (ITy->getBitWidth() < TD.getTypeStoreSize(ITy)) - return false; - continue; - } - // Non-integer stores need to be convertible to the alloca type so that - // they are promotable. - if (RelBegin != 0 || RelEnd != Size || - !canConvertValue(TD, ValueTy, AllocaTy)) - return false; - } else if (MemIntrinsic *MI = dyn_cast(I->U->getUser())) { - if (MI->isVolatile()) - return false; - if (MemTransferInst *MTI = dyn_cast(I->U->getUser())) { - const AllocaPartitioning::MemTransferOffsets &MTO - = P.getMemTransferOffsets(*MTI); - if (!MTO.IsSplittable) - return false; - } - } else if (IntrinsicInst *II = dyn_cast(I->U->getUser())) { - if (II->getIntrinsicID() != Intrinsic::lifetime_start && - II->getIntrinsicID() != Intrinsic::lifetime_end) - return false; - } else { + for (const Slice *S : P.splitSliceTails()) + if (!isIntegerWideningViableForSlice(*S, P.beginOffset(), AllocaTy, DL, + WholeAllocaOp)) return false; - } - } + return WholeAllocaOp; } -static Value *extractInteger(const DataLayout &DL, IRBuilder<> &IRB, Value *V, +static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V, IntegerType *Ty, uint64_t Offset, const Twine &Name) { + DEBUG(dbgs() << " start: " << *V << "\n"); IntegerType *IntTy = cast(V->getType()); assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) && "Element extends past full value"); - uint64_t ShAmt = 8*Offset; + uint64_t ShAmt = 8 * Offset; if (DL.isBigEndian()) - ShAmt = 8*(DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset); - if (ShAmt) + ShAmt = 8 * (DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset); + if (ShAmt) { V = IRB.CreateLShr(V, ShAmt, Name + ".shift"); + DEBUG(dbgs() << " shifted: " << *V << "\n"); + } assert(Ty->getBitWidth() <= IntTy->getBitWidth() && "Cannot extract to a larger integer!"); - if (Ty != IntTy) + if (Ty != IntTy) { V = IRB.CreateTrunc(V, Ty, Name + ".trunc"); + DEBUG(dbgs() << " trunced: " << *V << "\n"); + } return V; } -static Value *insertInteger(const DataLayout &DL, IRBuilder<> &IRB, Value *Old, +static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old, Value *V, uint64_t Offset, const Twine &Name) { IntegerType *IntTy = cast(Old->getType()); IntegerType *Ty = cast(V->getType()); assert(Ty->getBitWidth() <= IntTy->getBitWidth() && "Cannot insert a larger integer!"); - if (Ty != IntTy) + DEBUG(dbgs() << " start: " << *V << "\n"); + if (Ty != IntTy) { V = IRB.CreateZExt(V, IntTy, Name + ".ext"); + DEBUG(dbgs() << " extended: " << *V << "\n"); + } assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) && "Element store outside of alloca store"); - uint64_t ShAmt = 8*Offset; + uint64_t ShAmt = 8 * Offset; if (DL.isBigEndian()) - ShAmt = 8*(DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset); - if (ShAmt) + ShAmt = 8 * (DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset); + if (ShAmt) { V = IRB.CreateShl(V, ShAmt, Name + ".shift"); + DEBUG(dbgs() << " shifted: " << *V << "\n"); + } if (ShAmt || Ty->getBitWidth() < IntTy->getBitWidth()) { APInt Mask = ~Ty->getMask().zext(IntTy->getBitWidth()).shl(ShAmt); Old = IRB.CreateAnd(Old, Mask, Name + ".mask"); + DEBUG(dbgs() << " masked: " << *Old << "\n"); V = IRB.CreateOr(Old, V, Name + ".insert"); + DEBUG(dbgs() << " inserted: " << *V << "\n"); + } + return V; +} + +static Value *extractVector(IRBuilderTy &IRB, Value *V, unsigned BeginIndex, + unsigned EndIndex, const Twine &Name) { + VectorType *VecTy = cast(V->getType()); + unsigned NumElements = EndIndex - BeginIndex; + assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); + + if (NumElements == VecTy->getNumElements()) + return V; + + if (NumElements == 1) { + V = IRB.CreateExtractElement(V, IRB.getInt32(BeginIndex), + Name + ".extract"); + DEBUG(dbgs() << " extract: " << *V << "\n"); + return V; + } + + SmallVector Mask; + Mask.reserve(NumElements); + for (unsigned i = BeginIndex; i != EndIndex; ++i) + Mask.push_back(IRB.getInt32(i)); + V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()), + ConstantVector::get(Mask), Name + ".extract"); + DEBUG(dbgs() << " shuffle: " << *V << "\n"); + return V; +} + +static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V, + unsigned BeginIndex, const Twine &Name) { + VectorType *VecTy = cast(Old->getType()); + assert(VecTy && "Can only insert a vector into a vector"); + + VectorType *Ty = dyn_cast(V->getType()); + if (!Ty) { + // Single element to insert. + V = IRB.CreateInsertElement(Old, V, IRB.getInt32(BeginIndex), + Name + ".insert"); + DEBUG(dbgs() << " insert: " << *V << "\n"); + return V; } + + assert(Ty->getNumElements() <= VecTy->getNumElements() && + "Too many elements!"); + if (Ty->getNumElements() == VecTy->getNumElements()) { + assert(V->getType() == VecTy && "Vector type mismatch"); + return V; + } + unsigned EndIndex = BeginIndex + Ty->getNumElements(); + + // When inserting a smaller vector into the larger to store, we first + // use a shuffle vector to widen it with undef elements, and then + // a second shuffle vector to select between the loaded vector and the + // incoming vector. + SmallVector Mask; + Mask.reserve(VecTy->getNumElements()); + for (unsigned i = 0; i != VecTy->getNumElements(); ++i) + if (i >= BeginIndex && i < EndIndex) + Mask.push_back(IRB.getInt32(i - BeginIndex)); + else + Mask.push_back(UndefValue::get(IRB.getInt32Ty())); + V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()), + ConstantVector::get(Mask), Name + ".expand"); + DEBUG(dbgs() << " shuffle: " << *V << "\n"); + + Mask.clear(); + for (unsigned i = 0; i != VecTy->getNumElements(); ++i) + Mask.push_back(IRB.getInt1(i >= BeginIndex && i < EndIndex)); + + V = IRB.CreateSelect(ConstantVector::get(Mask), V, Old, Name + "blend"); + + DEBUG(dbgs() << " blend: " << *V << "\n"); return V; } namespace { -/// \brief Visitor to rewrite instructions using a partition of an alloca to -/// use a new alloca. +/// \brief Visitor to rewrite instructions using p particular slice of an alloca +/// to use a new alloca. /// /// Also implements the rewriting to vector-based accesses when the partition /// passes the isVectorPromotionViable predicate. Most of the rewriting logic /// lives here. -class AllocaPartitionRewriter : public InstVisitor { +class AllocaSliceRewriter : public InstVisitor { // Befriend the base class so it can delegate to private visit methods. - friend class llvm::InstVisitor; + friend class llvm::InstVisitor; + typedef llvm::InstVisitor Base; - const DataLayout &TD; - AllocaPartitioning &P; + const DataLayout &DL; + AllocaSlices &AS; SROA &Pass; AllocaInst &OldAI, &NewAI; const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset; Type *NewAllocaTy; + // This is a convenience and flag variable that will be null unless the new + // alloca's integer operations should be widened to this integer type due to + // passing isIntegerWideningViable above. If it is non-null, the desired + // integer type will be stored here for easy access during rewriting. + IntegerType *IntTy; + // If we are rewriting an alloca partition which can be written as pure // vector operations, we stash extra information here. When VecTy is - // non-null, we have some strict guarantees about the rewriten alloca: + // non-null, we have some strict guarantees about the rewritten alloca: // - The new alloca is exactly the size of the vector type here. // - The accesses all either map to the entire vector or to a single // element. @@ -2282,249 +2294,309 @@ class AllocaPartitionRewriter : public InstVisitor &PHIUsers; + SmallPtrSetImpl &SelectUsers; + + // Utility IR builder, whose name prefix is setup for each visited use, and + // the insertion point is set to point to the user. + IRBuilderTy IRB; public: - AllocaPartitionRewriter(const DataLayout &TD, AllocaPartitioning &P, - AllocaPartitioning::iterator PI, - SROA &Pass, AllocaInst &OldAI, AllocaInst &NewAI, - uint64_t NewBeginOffset, uint64_t NewEndOffset) - : TD(TD), P(P), Pass(Pass), - OldAI(OldAI), NewAI(NewAI), - NewAllocaBeginOffset(NewBeginOffset), - NewAllocaEndOffset(NewEndOffset), - NewAllocaTy(NewAI.getAllocatedType()), - VecTy(), ElementTy(), ElementSize(), IntTy(), - BeginOffset(), EndOffset() { - } - - /// \brief Visit the users of the alloca partition and rewrite them. - bool visitUsers(AllocaPartitioning::const_use_iterator I, - AllocaPartitioning::const_use_iterator E) { - if (isVectorPromotionViable(TD, NewAI.getAllocatedType(), P, - NewAllocaBeginOffset, NewAllocaEndOffset, - I, E)) { - ++NumVectorized; - VecTy = cast(NewAI.getAllocatedType()); - ElementTy = VecTy->getElementType(); - assert((VecTy->getScalarSizeInBits() % 8) == 0 && + AllocaSliceRewriter(const DataLayout &DL, AllocaSlices &AS, SROA &Pass, + AllocaInst &OldAI, AllocaInst &NewAI, + uint64_t NewAllocaBeginOffset, + uint64_t NewAllocaEndOffset, bool IsIntegerPromotable, + VectorType *PromotableVecTy, + SmallPtrSetImpl &PHIUsers, + SmallPtrSetImpl &SelectUsers) + : DL(DL), AS(AS), Pass(Pass), OldAI(OldAI), NewAI(NewAI), + NewAllocaBeginOffset(NewAllocaBeginOffset), + NewAllocaEndOffset(NewAllocaEndOffset), + NewAllocaTy(NewAI.getAllocatedType()), + IntTy(IsIntegerPromotable + ? Type::getIntNTy( + NewAI.getContext(), + DL.getTypeSizeInBits(NewAI.getAllocatedType())) + : nullptr), + VecTy(PromotableVecTy), + ElementTy(VecTy ? VecTy->getElementType() : nullptr), + ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy) / 8 : 0), + BeginOffset(), EndOffset(), IsSplittable(), IsSplit(), OldUse(), + OldPtr(), PHIUsers(PHIUsers), SelectUsers(SelectUsers), + IRB(NewAI.getContext(), ConstantFolder()) { + if (VecTy) { + assert((DL.getTypeSizeInBits(ElementTy) % 8) == 0 && "Only multiple-of-8 sized vector elements are viable"); - ElementSize = VecTy->getScalarSizeInBits() / 8; - } else if (isIntegerWideningViable(TD, NewAI.getAllocatedType(), - NewAllocaBeginOffset, P, I, E)) { - IntTy = Type::getIntNTy(NewAI.getContext(), - TD.getTypeSizeInBits(NewAI.getAllocatedType())); + ++NumVectorized; } + assert((!IntTy && !VecTy) || (IntTy && !VecTy) || (!IntTy && VecTy)); + } + + bool visit(AllocaSlices::const_iterator I) { bool CanSROA = true; - for (; I != E; ++I) { - if (!I->U) - continue; // Skip dead uses. - BeginOffset = I->BeginOffset; - EndOffset = I->EndOffset; - OldUse = I->U; - OldPtr = cast(I->U->get()); - NamePrefix = (Twine(NewAI.getName()) + "." + Twine(BeginOffset)).str(); - CanSROA &= visit(cast(I->U->getUser())); - } - if (VecTy) { + BeginOffset = I->beginOffset(); + EndOffset = I->endOffset(); + IsSplittable = I->isSplittable(); + IsSplit = + BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset; + DEBUG(dbgs() << " rewriting " << (IsSplit ? "split " : "")); + DEBUG(AS.printSlice(dbgs(), I, "")); + DEBUG(dbgs() << "\n"); + + // Compute the intersecting offset range. + assert(BeginOffset < NewAllocaEndOffset); + assert(EndOffset > NewAllocaBeginOffset); + NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); + NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); + + SliceSize = NewEndOffset - NewBeginOffset; + + OldUse = I->getUse(); + OldPtr = cast(OldUse->get()); + + Instruction *OldUserI = cast(OldUse->getUser()); + IRB.SetInsertPoint(OldUserI); + IRB.SetCurrentDebugLocation(OldUserI->getDebugLoc()); + IRB.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) + "."); + + CanSROA &= visit(cast(OldUse->getUser())); + if (VecTy || IntTy) assert(CanSROA); - VecTy = 0; - ElementTy = 0; - ElementSize = 0; - } - if (IntTy) { - assert(CanSROA); - IntTy = 0; - } return CanSROA; } private: + // Make sure the other visit overloads are visible. + using Base::visit; + // Every instruction which can end up as a user must have a rewrite rule. bool visitInstruction(Instruction &I) { DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n"); llvm_unreachable("No rewrite rule for this instruction!"); } - Twine getName(const Twine &Suffix) { - return NamePrefix + Suffix; - } + Value *getNewAllocaSlicePtr(IRBuilderTy &IRB, Type *PointerTy) { + // Note that the offset computation can use BeginOffset or NewBeginOffset + // interchangeably for unsplit slices. + assert(IsSplit || BeginOffset == NewBeginOffset); + uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; - Value *getAdjustedAllocaPtr(IRBuilder<> &IRB, Type *PointerTy) { - assert(BeginOffset >= NewAllocaBeginOffset); - unsigned AS = cast(PointerTy)->getAddressSpace(); - APInt Offset(TD.getPointerSizeInBits(AS), BeginOffset - NewAllocaBeginOffset); - return getAdjustedPtr(IRB, TD, &NewAI, Offset, PointerTy, getName("")); - } +#ifndef NDEBUG + StringRef OldName = OldPtr->getName(); + // Skip through the last '.sroa.' component of the name. + size_t LastSROAPrefix = OldName.rfind(".sroa."); + if (LastSROAPrefix != StringRef::npos) { + OldName = OldName.substr(LastSROAPrefix + strlen(".sroa.")); + // Look for an SROA slice index. + size_t IndexEnd = OldName.find_first_not_of("0123456789"); + if (IndexEnd != StringRef::npos && OldName[IndexEnd] == '.') { + // Strip the index and look for the offset. + OldName = OldName.substr(IndexEnd + 1); + size_t OffsetEnd = OldName.find_first_not_of("0123456789"); + if (OffsetEnd != StringRef::npos && OldName[OffsetEnd] == '.') + // Strip the offset. + OldName = OldName.substr(OffsetEnd + 1); + } + } + // Strip any SROA suffixes as well. + OldName = OldName.substr(0, OldName.find(".sroa_")); +#endif - /// \brief Compute suitable alignment to access an offset into the new alloca. - unsigned getOffsetAlign(uint64_t Offset) { - unsigned NewAIAlign = NewAI.getAlignment(); - if (!NewAIAlign) - NewAIAlign = TD.getABITypeAlignment(NewAI.getAllocatedType()); - return MinAlign(NewAIAlign, Offset); + return getAdjustedPtr(IRB, DL, &NewAI, + APInt(DL.getPointerSizeInBits(), Offset), PointerTy, +#ifndef NDEBUG + Twine(OldName) + "." +#else + Twine() +#endif + ); } - /// \brief Compute suitable alignment to access this partition of the new + /// \brief Compute suitable alignment to access this slice of the *new* /// alloca. - unsigned getPartitionAlign() { - return getOffsetAlign(BeginOffset - NewAllocaBeginOffset); - } - - /// \brief Compute suitable alignment to access a type at an offset of the - /// new alloca. /// - /// \returns zero if the type's ABI alignment is a suitable alignment, - /// otherwise returns the maximal suitable alignment. - unsigned getOffsetTypeAlign(Type *Ty, uint64_t Offset) { - unsigned Align = getOffsetAlign(Offset); - return Align == TD.getABITypeAlignment(Ty) ? 0 : Align; - } - - /// \brief Compute suitable alignment to access a type at the beginning of - /// this partition of the new alloca. - /// - /// See \c getOffsetTypeAlign for details; this routine delegates to it. - unsigned getPartitionTypeAlign(Type *Ty) { - return getOffsetTypeAlign(Ty, BeginOffset - NewAllocaBeginOffset); + /// You can optionally pass a type to this routine and if that type's ABI + /// alignment is itself suitable, this will return zero. + unsigned getSliceAlign(Type *Ty = nullptr) { + unsigned NewAIAlign = NewAI.getAlignment(); + if (!NewAIAlign) + NewAIAlign = DL.getABITypeAlignment(NewAI.getAllocatedType()); + unsigned Align = + MinAlign(NewAIAlign, NewBeginOffset - NewAllocaBeginOffset); + return (Ty && Align == DL.getABITypeAlignment(Ty)) ? 0 : Align; } - ConstantInt *getIndex(IRBuilder<> &IRB, uint64_t Offset) { + unsigned getIndex(uint64_t Offset) { assert(VecTy && "Can only call getIndex when rewriting a vector"); uint64_t RelOffset = Offset - NewAllocaBeginOffset; assert(RelOffset / ElementSize < UINT32_MAX && "Index out of bounds"); uint32_t Index = RelOffset / ElementSize; assert(Index * ElementSize == RelOffset); - return IRB.getInt32(Index); + return Index; } void deleteIfTriviallyDead(Value *V) { Instruction *I = cast(V); if (isInstructionTriviallyDead(I)) - Pass.DeadInsts.push_back(I); + Pass.DeadInsts.insert(I); } - bool rewriteVectorizedLoadInst(IRBuilder<> &IRB, LoadInst &LI, Value *OldOp) { - Value *Result; - if (LI.getType() == VecTy->getElementType() || - BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset) { - Result = IRB.CreateExtractElement( - IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), getName(".load")), - getIndex(IRB, BeginOffset), getName(".extract")); - } else { - Result = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), - getName(".load")); - } - if (Result->getType() != LI.getType()) - Result = convertValue(TD, IRB, Result, LI.getType()); - LI.replaceAllUsesWith(Result); - Pass.DeadInsts.push_back(&LI); + Value *rewriteVectorizedLoadInst() { + unsigned BeginIndex = getIndex(NewBeginOffset); + unsigned EndIndex = getIndex(NewEndOffset); + assert(EndIndex > BeginIndex && "Empty vector!"); - DEBUG(dbgs() << " to: " << *Result << "\n"); - return true; + Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load"); + return extractVector(IRB, V, BeginIndex, EndIndex, "vec"); } - bool rewriteIntegerLoad(IRBuilder<> &IRB, LoadInst &LI) { + Value *rewriteIntegerLoad(LoadInst &LI) { assert(IntTy && "We cannot insert an integer to the alloca"); assert(!LI.isVolatile()); - Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), - getName(".load")); - V = convertValue(TD, IRB, V, IntTy); - assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); - uint64_t Offset = BeginOffset - NewAllocaBeginOffset; - V = extractInteger(TD, IRB, V, cast(LI.getType()), Offset, - getName(".extract")); - LI.replaceAllUsesWith(V); - Pass.DeadInsts.push_back(&LI); - DEBUG(dbgs() << " to: " << *V << "\n"); - return true; + Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load"); + V = convertValue(DL, IRB, V, IntTy); + assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); + uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; + if (Offset > 0 || NewEndOffset < NewAllocaEndOffset) + V = extractInteger(DL, IRB, V, cast(LI.getType()), Offset, + "extract"); + return V; } bool visitLoadInst(LoadInst &LI) { DEBUG(dbgs() << " original: " << LI << "\n"); Value *OldOp = LI.getOperand(0); assert(OldOp == OldPtr); - IRBuilder<> IRB(&LI); - if (VecTy) - return rewriteVectorizedLoadInst(IRB, LI, OldOp); - if (IntTy && LI.getType()->isIntegerTy()) - return rewriteIntegerLoad(IRB, LI); - - if (BeginOffset == NewAllocaBeginOffset && - canConvertValue(TD, NewAllocaTy, LI.getType())) { - Value *NewLI = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), - LI.isVolatile(), getName(".load")); - Value *NewV = convertValue(TD, IRB, NewLI, LI.getType()); - LI.replaceAllUsesWith(NewV); - Pass.DeadInsts.push_back(&LI); - - DEBUG(dbgs() << " to: " << *NewLI << "\n"); - return !LI.isVolatile(); + Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), SliceSize * 8) + : LI.getType(); + const bool IsLoadPastEnd = DL.getTypeStoreSize(TargetTy) > SliceSize; + bool IsPtrAdjusted = false; + Value *V; + if (VecTy) { + V = rewriteVectorizedLoadInst(); + } else if (IntTy && LI.getType()->isIntegerTy()) { + V = rewriteIntegerLoad(LI); + } else if (NewBeginOffset == NewAllocaBeginOffset && + NewEndOffset == NewAllocaEndOffset && + (canConvertValue(DL, NewAllocaTy, TargetTy) || + (IsLoadPastEnd && NewAllocaTy->isIntegerTy() && + TargetTy->isIntegerTy()))) { + LoadInst *NewLI = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), + LI.isVolatile(), LI.getName()); + if (LI.isVolatile()) + NewLI->setAtomic(LI.getOrdering(), LI.getSynchScope()); + V = NewLI; + + // If this is an integer load past the end of the slice (which means the + // bytes outside the slice are undef or this load is dead) just forcibly + // fix the integer size with correct handling of endianness. + if (auto *AITy = dyn_cast(NewAllocaTy)) + if (auto *TITy = dyn_cast(TargetTy)) + if (AITy->getBitWidth() < TITy->getBitWidth()) { + V = IRB.CreateZExt(V, TITy, "load.ext"); + if (DL.isBigEndian()) + V = IRB.CreateShl(V, TITy->getBitWidth() - AITy->getBitWidth(), + "endian_shift"); + } + } else { + Type *LTy = TargetTy->getPointerTo(); + LoadInst *NewLI = IRB.CreateAlignedLoad(getNewAllocaSlicePtr(IRB, LTy), + getSliceAlign(TargetTy), + LI.isVolatile(), LI.getName()); + if (LI.isVolatile()) + NewLI->setAtomic(LI.getOrdering(), LI.getSynchScope()); + + V = NewLI; + IsPtrAdjusted = true; + } + V = convertValue(DL, IRB, V, TargetTy); + + if (IsSplit) { + assert(!LI.isVolatile()); + assert(LI.getType()->isIntegerTy() && + "Only integer type loads and stores are split"); + assert(SliceSize < DL.getTypeStoreSize(LI.getType()) && + "Split load isn't smaller than original load"); + assert(LI.getType()->getIntegerBitWidth() == + DL.getTypeStoreSizeInBits(LI.getType()) && + "Non-byte-multiple bit width"); + // Move the insertion point just past the load so that we can refer to it. + IRB.SetInsertPoint(std::next(BasicBlock::iterator(&LI))); + // Create a placeholder value with the same type as LI to use as the + // basis for the new value. This allows us to replace the uses of LI with + // the computed value, and then replace the placeholder with LI, leaving + // LI only used for this computation. + Value *Placeholder = + new LoadInst(UndefValue::get(LI.getType()->getPointerTo())); + V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset - BeginOffset, + "insert"); + LI.replaceAllUsesWith(V); + Placeholder->replaceAllUsesWith(&LI); + delete Placeholder; + } else { + LI.replaceAllUsesWith(V); } - assert(!IntTy && "Invalid load found with int-op widening enabled"); - - Value *NewPtr = getAdjustedAllocaPtr(IRB, - LI.getPointerOperand()->getType()); - LI.setOperand(0, NewPtr); - LI.setAlignment(getPartitionTypeAlign(LI.getType())); - DEBUG(dbgs() << " to: " << LI << "\n"); - + Pass.DeadInsts.insert(&LI); deleteIfTriviallyDead(OldOp); - return NewPtr == &NewAI && !LI.isVolatile(); + DEBUG(dbgs() << " to: " << *V << "\n"); + return !LI.isVolatile() && !IsPtrAdjusted; } - bool rewriteVectorizedStoreInst(IRBuilder<> &IRB, StoreInst &SI, - Value *OldOp) { - Value *V = SI.getValueOperand(); - if (V->getType() == ElementTy || - BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset) { - if (V->getType() != ElementTy) - V = convertValue(TD, IRB, V, ElementTy); - LoadInst *LI = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), - getName(".load")); - V = IRB.CreateInsertElement(LI, V, getIndex(IRB, BeginOffset), - getName(".insert")); - } else if (V->getType() != VecTy) { - V = convertValue(TD, IRB, V, VecTy); + bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp) { + if (V->getType() != VecTy) { + unsigned BeginIndex = getIndex(NewBeginOffset); + unsigned EndIndex = getIndex(NewEndOffset); + assert(EndIndex > BeginIndex && "Empty vector!"); + unsigned NumElements = EndIndex - BeginIndex; + assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); + Type *SliceTy = (NumElements == 1) + ? ElementTy + : VectorType::get(ElementTy, NumElements); + if (V->getType() != SliceTy) + V = convertValue(DL, IRB, V, SliceTy); + + // Mix in the existing elements. + Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load"); + V = insertVector(IRB, Old, V, BeginIndex, "vec"); } StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment()); - Pass.DeadInsts.push_back(&SI); + Pass.DeadInsts.insert(&SI); (void)Store; DEBUG(dbgs() << " to: " << *Store << "\n"); return true; } - bool rewriteIntegerStore(IRBuilder<> &IRB, StoreInst &SI) { + bool rewriteIntegerStore(Value *V, StoreInst &SI) { assert(IntTy && "We cannot extract an integer from the alloca"); assert(!SI.isVolatile()); - Value *V = SI.getValueOperand(); - if (TD.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) { - Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), - getName(".oldload")); - Old = convertValue(TD, IRB, Old, IntTy); + if (DL.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) { + Value *Old = + IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload"); + Old = convertValue(DL, IRB, Old, IntTy); assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); uint64_t Offset = BeginOffset - NewAllocaBeginOffset; - V = insertInteger(TD, IRB, Old, SI.getValueOperand(), Offset, - getName(".insert")); + V = insertInteger(DL, IRB, Old, SI.getValueOperand(), Offset, "insert"); } - V = convertValue(TD, IRB, V, NewAllocaTy); + V = convertValue(DL, IRB, V, NewAllocaTy); StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment()); - Pass.DeadInsts.push_back(&SI); + Pass.DeadInsts.insert(&SI); (void)Store; DEBUG(dbgs() << " to: " << *Store << "\n"); return true; @@ -2534,64 +2606,121 @@ private: DEBUG(dbgs() << " original: " << SI << "\n"); Value *OldOp = SI.getOperand(1); assert(OldOp == OldPtr); - IRBuilder<> IRB(&SI); - if (VecTy) - return rewriteVectorizedStoreInst(IRB, SI, OldOp); - Type *ValueTy = SI.getValueOperand()->getType(); - if (IntTy && ValueTy->isIntegerTy()) - return rewriteIntegerStore(IRB, SI); + Value *V = SI.getValueOperand(); // Strip all inbounds GEPs and pointer casts to try to dig out any root // alloca that should be re-examined after promoting this alloca. - if (ValueTy->isPointerTy()) - if (AllocaInst *AI = dyn_cast(SI.getValueOperand() - ->stripInBoundsOffsets())) + if (V->getType()->isPointerTy()) + if (AllocaInst *AI = dyn_cast(V->stripInBoundsOffsets())) Pass.PostPromotionWorklist.insert(AI); - if (BeginOffset == NewAllocaBeginOffset && - canConvertValue(TD, ValueTy, NewAllocaTy)) { - Value *NewV = convertValue(TD, IRB, SI.getValueOperand(), NewAllocaTy); - StoreInst *NewSI = IRB.CreateAlignedStore(NewV, &NewAI, NewAI.getAlignment(), - SI.isVolatile()); - (void)NewSI; - Pass.DeadInsts.push_back(&SI); + if (SliceSize < DL.getTypeStoreSize(V->getType())) { + assert(!SI.isVolatile()); + assert(V->getType()->isIntegerTy() && + "Only integer type loads and stores are split"); + assert(V->getType()->getIntegerBitWidth() == + DL.getTypeStoreSizeInBits(V->getType()) && + "Non-byte-multiple bit width"); + IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), SliceSize * 8); + V = extractInteger(DL, IRB, V, NarrowTy, NewBeginOffset - BeginOffset, + "extract"); + } - DEBUG(dbgs() << " to: " << *NewSI << "\n"); - return !SI.isVolatile(); + if (VecTy) + return rewriteVectorizedStoreInst(V, SI, OldOp); + if (IntTy && V->getType()->isIntegerTy()) + return rewriteIntegerStore(V, SI); + + const bool IsStorePastEnd = DL.getTypeStoreSize(V->getType()) > SliceSize; + StoreInst *NewSI; + if (NewBeginOffset == NewAllocaBeginOffset && + NewEndOffset == NewAllocaEndOffset && + (canConvertValue(DL, V->getType(), NewAllocaTy) || + (IsStorePastEnd && NewAllocaTy->isIntegerTy() && + V->getType()->isIntegerTy()))) { + // If this is an integer store past the end of slice (and thus the bytes + // past that point are irrelevant or this is unreachable), truncate the + // value prior to storing. + if (auto *VITy = dyn_cast(V->getType())) + if (auto *AITy = dyn_cast(NewAllocaTy)) + if (VITy->getBitWidth() > AITy->getBitWidth()) { + if (DL.isBigEndian()) + V = IRB.CreateLShr(V, VITy->getBitWidth() - AITy->getBitWidth(), + "endian_shift"); + V = IRB.CreateTrunc(V, AITy, "load.trunc"); + } + + V = convertValue(DL, IRB, V, NewAllocaTy); + NewSI = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(), + SI.isVolatile()); + } else { + Value *NewPtr = getNewAllocaSlicePtr(IRB, V->getType()->getPointerTo()); + NewSI = IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(V->getType()), + SI.isVolatile()); } + if (SI.isVolatile()) + NewSI->setAtomic(SI.getOrdering(), SI.getSynchScope()); + Pass.DeadInsts.insert(&SI); + deleteIfTriviallyDead(OldOp); - assert(!IntTy && "Invalid store found with int-op widening enabled"); + DEBUG(dbgs() << " to: " << *NewSI << "\n"); + return NewSI->getPointerOperand() == &NewAI && !SI.isVolatile(); + } - Value *NewPtr = getAdjustedAllocaPtr(IRB, - SI.getPointerOperand()->getType()); - SI.setOperand(1, NewPtr); - SI.setAlignment(getPartitionTypeAlign(SI.getValueOperand()->getType())); - DEBUG(dbgs() << " to: " << SI << "\n"); + /// \brief Compute an integer value from splatting an i8 across the given + /// number of bytes. + /// + /// Note that this routine assumes an i8 is a byte. If that isn't true, don't + /// call this routine. + /// FIXME: Heed the advice above. + /// + /// \param V The i8 value to splat. + /// \param Size The number of bytes in the output (assuming i8 is one byte) + Value *getIntegerSplat(Value *V, unsigned Size) { + assert(Size > 0 && "Expected a positive number of bytes."); + IntegerType *VTy = cast(V->getType()); + assert(VTy->getBitWidth() == 8 && "Expected an i8 value for the byte"); + if (Size == 1) + return V; + + Type *SplatIntTy = Type::getIntNTy(VTy->getContext(), Size * 8); + V = IRB.CreateMul( + IRB.CreateZExt(V, SplatIntTy, "zext"), + ConstantExpr::getUDiv( + Constant::getAllOnesValue(SplatIntTy), + ConstantExpr::getZExt(Constant::getAllOnesValue(V->getType()), + SplatIntTy)), + "isplat"); + return V; + } - deleteIfTriviallyDead(OldOp); - return NewPtr == &NewAI && !SI.isVolatile(); + /// \brief Compute a vector splat for a given element value. + Value *getVectorSplat(Value *V, unsigned NumElements) { + V = IRB.CreateVectorSplat(NumElements, V, "vsplat"); + DEBUG(dbgs() << " splat: " << *V << "\n"); + return V; } bool visitMemSetInst(MemSetInst &II) { DEBUG(dbgs() << " original: " << II << "\n"); - IRBuilder<> IRB(&II); assert(II.getRawDest() == OldPtr); // If the memset has a variable size, it cannot be split, just adjust the // pointer to the new alloca. if (!isa(II.getLength())) { - II.setDest(getAdjustedAllocaPtr(IRB, II.getRawDest()->getType())); + assert(!IsSplit); + assert(NewBeginOffset == BeginOffset); + II.setDest(getNewAllocaSlicePtr(IRB, OldPtr->getType())); Type *CstTy = II.getAlignmentCst()->getType(); - II.setAlignment(ConstantInt::get(CstTy, getPartitionAlign())); + II.setAlignment(ConstantInt::get(CstTy, getSliceAlign())); deleteIfTriviallyDead(OldPtr); return false; } // Record this instruction for deletion. - if (Pass.DeadSplitInsts.insert(&II)) - Pass.DeadInsts.push_back(&II); + Pass.DeadInsts.insert(&II); Type *AllocaTy = NewAI.getAllocatedType(); Type *ScalarTy = AllocaTy->getScalarType(); @@ -2599,17 +2728,16 @@ private: // If this doesn't map cleanly onto the alloca type, and that type isn't // a single value type, just emit a memset. if (!VecTy && !IntTy && - (BeginOffset != NewAllocaBeginOffset || - EndOffset != NewAllocaEndOffset || + (BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset || + SliceSize != DL.getTypeStoreSize(AllocaTy) || !AllocaTy->isSingleValueType() || - !TD.isLegalInteger(TD.getTypeSizeInBits(ScalarTy)))) { + !DL.isLegalInteger(DL.getTypeSizeInBits(ScalarTy)) || + DL.getTypeSizeInBits(ScalarTy) % 8 != 0)) { Type *SizeTy = II.getLength()->getType(); - Constant *Size = ConstantInt::get(SizeTy, EndOffset - BeginOffset); - CallInst *New - = IRB.CreateMemSet(getAdjustedAllocaPtr(IRB, - II.getRawDest()->getType()), - II.getValue(), Size, getPartitionAlign(), - II.isVolatile()); + Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); + CallInst *New = IRB.CreateMemSet( + getNewAllocaSlicePtr(IRB, OldPtr->getType()), II.getValue(), Size, + getSliceAlign(), II.isVolatile()); (void)New; DEBUG(dbgs() << " to: " << *New << "\n"); return false; @@ -2618,53 +2746,60 @@ private: // If we can represent this as a simple value, we have to build the actual // value to store, which requires expanding the byte present in memset to // a sensible representation for the alloca type. This is essentially - // splatting the byte to a sufficiently wide integer, bitcasting to the - // desired scalar type, and splatting it across any desired vector type. - uint64_t Size = EndOffset - BeginOffset; - Value *V = II.getValue(); - IntegerType *VTy = cast(V->getType()); - Type *SplatIntTy = Type::getIntNTy(VTy->getContext(), Size*8); - if (Size*8 > VTy->getBitWidth()) - V = IRB.CreateMul(IRB.CreateZExt(V, SplatIntTy, getName(".zext")), - ConstantExpr::getUDiv( - Constant::getAllOnesValue(SplatIntTy), - ConstantExpr::getZExt( - Constant::getAllOnesValue(V->getType()), - SplatIntTy)), - getName(".isplat")); - - // If this is an element-wide memset of a vectorizable alloca, insert it. - if (VecTy && (BeginOffset > NewAllocaBeginOffset || - EndOffset < NewAllocaEndOffset)) { - if (V->getType() != ScalarTy) - V = convertValue(TD, IRB, V, ScalarTy); - StoreInst *Store = IRB.CreateAlignedStore( - IRB.CreateInsertElement(IRB.CreateAlignedLoad(&NewAI, - NewAI.getAlignment(), - getName(".load")), - V, getIndex(IRB, BeginOffset), - getName(".insert")), - &NewAI, NewAI.getAlignment()); - (void)Store; - DEBUG(dbgs() << " to: " << *Store << "\n"); - return true; - } + // splatting the byte to a sufficiently wide integer, splatting it across + // any desired vector width, and bitcasting to the final type. + Value *V; - // If this is a memset on an alloca where we can widen stores, insert the - // set integer. - if (IntTy && (BeginOffset > NewAllocaBeginOffset || - EndOffset < NewAllocaEndOffset)) { + if (VecTy) { + // If this is a memset of a vectorized alloca, insert it. + assert(ElementTy == ScalarTy); + + unsigned BeginIndex = getIndex(NewBeginOffset); + unsigned EndIndex = getIndex(NewEndOffset); + assert(EndIndex > BeginIndex && "Empty vector!"); + unsigned NumElements = EndIndex - BeginIndex; + assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); + + Value *Splat = + getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ElementTy) / 8); + Splat = convertValue(DL, IRB, Splat, ElementTy); + if (NumElements > 1) + Splat = getVectorSplat(Splat, NumElements); + + Value *Old = + IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload"); + V = insertVector(IRB, Old, Splat, BeginIndex, "vec"); + } else if (IntTy) { + // If this is a memset on an alloca where we can widen stores, insert the + // set integer. assert(!II.isVolatile()); - Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), - getName(".oldload")); - Old = convertValue(TD, IRB, Old, IntTy); - assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); - uint64_t Offset = BeginOffset - NewAllocaBeginOffset; - V = insertInteger(TD, IRB, Old, V, Offset, getName(".insert")); - } - if (V->getType() != AllocaTy) - V = convertValue(TD, IRB, V, AllocaTy); + uint64_t Size = NewEndOffset - NewBeginOffset; + V = getIntegerSplat(II.getValue(), Size); + + if (IntTy && (BeginOffset != NewAllocaBeginOffset || + EndOffset != NewAllocaBeginOffset)) { + Value *Old = + IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload"); + Old = convertValue(DL, IRB, Old, IntTy); + uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; + V = insertInteger(DL, IRB, Old, V, Offset, "insert"); + } else { + assert(V->getType() == IntTy && + "Wrong type for an alloca wide integer!"); + } + V = convertValue(DL, IRB, V, AllocaTy); + } else { + // Established these invariants above. + assert(NewBeginOffset == NewAllocaBeginOffset); + assert(NewEndOffset == NewAllocaEndOffset); + + V = getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ScalarTy) / 8); + if (VectorType *AllocaVecTy = dyn_cast(AllocaTy)) + V = getVectorSplat(V, AllocaVecTy->getNumElements()); + + V = convertValue(DL, IRB, V, AllocaTy); + } Value *New = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(), II.isVolatile()); @@ -2678,25 +2813,12 @@ private: // them into two categories: split intrinsics and unsplit intrinsics. DEBUG(dbgs() << " original: " << II << "\n"); - IRBuilder<> IRB(&II); - - assert(II.getRawSource() == OldPtr || II.getRawDest() == OldPtr); - bool IsDest = II.getRawDest() == OldPtr; - const AllocaPartitioning::MemTransferOffsets &MTO - = P.getMemTransferOffsets(II); + bool IsDest = &II.getRawDestUse() == OldUse; + assert((IsDest && II.getRawDest() == OldPtr) || + (!IsDest && II.getRawSource() == OldPtr)); - assert(OldPtr->getType()->isPointerTy() && "Must be a pointer type!"); - unsigned AS = cast(OldPtr->getType())->getAddressSpace(); - // Compute the relative offset within the transfer. - unsigned IntPtrWidth = TD.getPointerSizeInBits(AS); - APInt RelOffset(IntPtrWidth, BeginOffset - (IsDest ? MTO.DestBegin - : MTO.SourceBegin)); - - unsigned Align = II.getAlignment(); - if (Align > 1) - Align = MinAlign(RelOffset.zextOrTrunc(64).getZExtValue(), - MinAlign(II.getAlignment(), getPartitionAlign())); + unsigned SliceAlign = getSliceAlign(); // For unsplit intrinsics, we simply modify the source and destination // pointers in place. This isn't just an optimization, it is a matter of @@ -2705,18 +2827,21 @@ private: // a variable length. We may also be dealing with memmove instead of // memcpy, and so simply updating the pointers is the necessary for us to // update both source and dest of a single call. - if (!MTO.IsSplittable) { - Value *OldOp = IsDest ? II.getRawDest() : II.getRawSource(); + if (!IsSplittable) { + Value *AdjustedPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); if (IsDest) - II.setDest(getAdjustedAllocaPtr(IRB, II.getRawDest()->getType())); + II.setDest(AdjustedPtr); else - II.setSource(getAdjustedAllocaPtr(IRB, II.getRawSource()->getType())); + II.setSource(AdjustedPtr); - Type *CstTy = II.getAlignmentCst()->getType(); - II.setAlignment(ConstantInt::get(CstTy, Align)); + if (II.getAlignment() > SliceAlign) { + Type *CstTy = II.getAlignmentCst()->getType(); + II.setAlignment( + ConstantInt::get(CstTy, MinAlign(II.getAlignment(), SliceAlign))); + } DEBUG(dbgs() << " to: " << II << "\n"); - deleteIfTriviallyDead(OldOp); + deleteIfTriviallyDead(OldPtr); return false; } // For split transfer intrinsics we have an incredibly useful assurance: @@ -2727,126 +2852,128 @@ private: // If this doesn't map cleanly onto the alloca type, and that type isn't // a single value type, just emit a memcpy. - bool EmitMemCpy - = !VecTy && !IntTy && (BeginOffset != NewAllocaBeginOffset || - EndOffset != NewAllocaEndOffset || - !NewAI.getAllocatedType()->isSingleValueType()); + bool EmitMemCpy = + !VecTy && !IntTy && + (BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset || + SliceSize != DL.getTypeStoreSize(NewAI.getAllocatedType()) || + !NewAI.getAllocatedType()->isSingleValueType()); // If we're just going to emit a memcpy, the alloca hasn't changed, and the // size hasn't been shrunk based on analysis of the viable range, this is // a no-op. if (EmitMemCpy && &OldAI == &NewAI) { - uint64_t OrigBegin = IsDest ? MTO.DestBegin : MTO.SourceBegin; - uint64_t OrigEnd = IsDest ? MTO.DestEnd : MTO.SourceEnd; // Ensure the start lines up. - assert(BeginOffset == OrigBegin); - (void)OrigBegin; + assert(NewBeginOffset == BeginOffset); // Rewrite the size as needed. - if (EndOffset != OrigEnd) + if (NewEndOffset != EndOffset) II.setLength(ConstantInt::get(II.getLength()->getType(), - EndOffset - BeginOffset)); + NewEndOffset - NewBeginOffset)); return false; } // Record this instruction for deletion. - if (Pass.DeadSplitInsts.insert(&II)) - Pass.DeadInsts.push_back(&II); - - bool IsWholeAlloca = BeginOffset == NewAllocaBeginOffset && - EndOffset == NewAllocaEndOffset; - bool IsVectorElement = VecTy && !IsWholeAlloca; - uint64_t Size = EndOffset - BeginOffset; - IntegerType *SubIntTy - = IntTy ? Type::getIntNTy(IntTy->getContext(), Size*8) : 0; - - Type *OtherPtrTy = IsDest ? II.getRawSource()->getType() - : II.getRawDest()->getType(); - if (!EmitMemCpy) { - if (IsVectorElement) - OtherPtrTy = VecTy->getElementType()->getPointerTo(OtherPtrTy); - else if (IntTy && !IsWholeAlloca) - OtherPtrTy = SubIntTy->getPointerTo(OtherPtrTy); - else - OtherPtrTy = NewAI.getType(); - } - - // Compute the other pointer, folding as much as possible to produce - // a single, simple GEP in most cases. - Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest(); - OtherPtr = getAdjustedPtr(IRB, TD, OtherPtr, RelOffset, OtherPtrTy, - getName("." + OtherPtr->getName())); + Pass.DeadInsts.insert(&II); // Strip all inbounds GEPs and pointer casts to try to dig out any root // alloca that should be re-examined after rewriting this instruction. - if (AllocaInst *AI - = dyn_cast(OtherPtr->stripInBoundsOffsets())) + Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest(); + if (AllocaInst *AI = + dyn_cast(OtherPtr->stripInBoundsOffsets())) { + assert(AI != &OldAI && AI != &NewAI && + "Splittable transfers cannot reach the same alloca on both ends."); Pass.Worklist.insert(AI); + } + + Type *OtherPtrTy = OtherPtr->getType(); + unsigned OtherAS = OtherPtrTy->getPointerAddressSpace(); + + // Compute the relative offset for the other pointer within the transfer. + unsigned IntPtrWidth = DL.getPointerSizeInBits(OtherAS); + APInt OtherOffset(IntPtrWidth, NewBeginOffset - BeginOffset); + unsigned OtherAlign = MinAlign(II.getAlignment() ? II.getAlignment() : 1, + OtherOffset.zextOrTrunc(64).getZExtValue()); if (EmitMemCpy) { - Value *OurPtr - = getAdjustedAllocaPtr(IRB, IsDest ? II.getRawDest()->getType() - : II.getRawSource()->getType()); + // Compute the other pointer, folding as much as possible to produce + // a single, simple GEP in most cases. + OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, + OtherPtr->getName() + "."); + + Value *OurPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); Type *SizeTy = II.getLength()->getType(); - Constant *Size = ConstantInt::get(SizeTy, EndOffset - BeginOffset); + Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); - CallInst *New = IRB.CreateMemCpy(IsDest ? OurPtr : OtherPtr, - IsDest ? OtherPtr : OurPtr, - Size, Align, II.isVolatile()); + CallInst *New = IRB.CreateMemCpy( + IsDest ? OurPtr : OtherPtr, IsDest ? OtherPtr : OurPtr, Size, + MinAlign(SliceAlign, OtherAlign), II.isVolatile()); (void)New; DEBUG(dbgs() << " to: " << *New << "\n"); return false; } - // Note that we clamp the alignment to 1 here as a 0 alignment for a memcpy - // is equivalent to 1, but that isn't true if we end up rewriting this as - // a load or store. - if (!Align) - Align = 1; + bool IsWholeAlloca = NewBeginOffset == NewAllocaBeginOffset && + NewEndOffset == NewAllocaEndOffset; + uint64_t Size = NewEndOffset - NewBeginOffset; + unsigned BeginIndex = VecTy ? getIndex(NewBeginOffset) : 0; + unsigned EndIndex = VecTy ? getIndex(NewEndOffset) : 0; + unsigned NumElements = EndIndex - BeginIndex; + IntegerType *SubIntTy = + IntTy ? Type::getIntNTy(IntTy->getContext(), Size * 8) : nullptr; + + // Reset the other pointer type to match the register type we're going to + // use, but using the address space of the original other pointer. + if (VecTy && !IsWholeAlloca) { + if (NumElements == 1) + OtherPtrTy = VecTy->getElementType(); + else + OtherPtrTy = VectorType::get(VecTy->getElementType(), NumElements); + + OtherPtrTy = OtherPtrTy->getPointerTo(OtherAS); + } else if (IntTy && !IsWholeAlloca) { + OtherPtrTy = SubIntTy->getPointerTo(OtherAS); + } else { + OtherPtrTy = NewAllocaTy->getPointerTo(OtherAS); + } - Value *SrcPtr = OtherPtr; + Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, + OtherPtr->getName() + "."); + unsigned SrcAlign = OtherAlign; Value *DstPtr = &NewAI; - if (!IsDest) + unsigned DstAlign = SliceAlign; + if (!IsDest) { std::swap(SrcPtr, DstPtr); + std::swap(SrcAlign, DstAlign); + } Value *Src; - if (IsVectorElement && !IsDest) { - // We have to extract rather than load. - Src = IRB.CreateExtractElement( - IRB.CreateAlignedLoad(SrcPtr, Align, getName(".copyload")), - getIndex(IRB, BeginOffset), - getName(".copyextract")); + if (VecTy && !IsWholeAlloca && !IsDest) { + Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load"); + Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec"); } else if (IntTy && !IsWholeAlloca && !IsDest) { - Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), - getName(".load")); - Src = convertValue(TD, IRB, Src, IntTy); - assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); - uint64_t Offset = BeginOffset - NewAllocaBeginOffset; - Src = extractInteger(TD, IRB, Src, SubIntTy, Offset, getName(".extract")); + Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load"); + Src = convertValue(DL, IRB, Src, IntTy); + uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; + Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract"); } else { - Src = IRB.CreateAlignedLoad(SrcPtr, Align, II.isVolatile(), - getName(".copyload")); - } - - if (IntTy && !IsWholeAlloca && IsDest) { - Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), - getName(".oldload")); - Old = convertValue(TD, IRB, Old, IntTy); - assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); - uint64_t Offset = BeginOffset - NewAllocaBeginOffset; - Src = insertInteger(TD, IRB, Old, Src, Offset, getName(".insert")); - Src = convertValue(TD, IRB, Src, NewAllocaTy); + Src = + IRB.CreateAlignedLoad(SrcPtr, SrcAlign, II.isVolatile(), "copyload"); } - if (IsVectorElement && IsDest) { - // We have to insert into a loaded copy before storing. - Src = IRB.CreateInsertElement( - IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), getName(".load")), - Src, getIndex(IRB, BeginOffset), - getName(".insert")); + if (VecTy && !IsWholeAlloca && IsDest) { + Value *Old = + IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload"); + Src = insertVector(IRB, Old, Src, BeginIndex, "vec"); + } else if (IntTy && !IsWholeAlloca && IsDest) { + Value *Old = + IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload"); + Old = convertValue(DL, IRB, Old, IntTy); + uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; + Src = insertInteger(DL, IRB, Old, Src, Offset, "insert"); + Src = convertValue(DL, IRB, Src, NewAllocaTy); } StoreInst *Store = cast( - IRB.CreateAlignedStore(Src, DstPtr, Align, II.isVolatile())); + IRB.CreateAlignedStore(Src, DstPtr, DstAlign, II.isVolatile())); (void)Store; DEBUG(dbgs() << " to: " << *Store << "\n"); return !II.isVolatile(); @@ -2856,63 +2983,79 @@ private: assert(II.getIntrinsicID() == Intrinsic::lifetime_start || II.getIntrinsicID() == Intrinsic::lifetime_end); DEBUG(dbgs() << " original: " << II << "\n"); - IRBuilder<> IRB(&II); assert(II.getArgOperand(1) == OldPtr); // Record this instruction for deletion. - if (Pass.DeadSplitInsts.insert(&II)) - Pass.DeadInsts.push_back(&II); + Pass.DeadInsts.insert(&II); - ConstantInt *Size - = ConstantInt::get(cast(II.getArgOperand(0)->getType()), - EndOffset - BeginOffset); - Value *Ptr = getAdjustedAllocaPtr(IRB, II.getArgOperand(1)->getType()); + ConstantInt *Size = + ConstantInt::get(cast(II.getArgOperand(0)->getType()), + NewEndOffset - NewBeginOffset); + Value *Ptr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); Value *New; if (II.getIntrinsicID() == Intrinsic::lifetime_start) New = IRB.CreateLifetimeStart(Ptr, Size); else New = IRB.CreateLifetimeEnd(Ptr, Size); + (void)New; DEBUG(dbgs() << " to: " << *New << "\n"); return true; } bool visitPHINode(PHINode &PN) { DEBUG(dbgs() << " original: " << PN << "\n"); + assert(BeginOffset >= NewAllocaBeginOffset && "PHIs are unsplittable"); + assert(EndOffset <= NewAllocaEndOffset && "PHIs are unsplittable"); // We would like to compute a new pointer in only one place, but have it be // as local as possible to the PHI. To do that, we re-use the location of // the old pointer, which necessarily must be in the right position to // dominate the PHI. - IRBuilder<> PtrBuilder(cast(OldPtr)); + IRBuilderTy PtrBuilder(IRB); + if (isa(OldPtr)) + PtrBuilder.SetInsertPoint(OldPtr->getParent()->getFirstInsertionPt()); + else + PtrBuilder.SetInsertPoint(OldPtr); + PtrBuilder.SetCurrentDebugLocation(OldPtr->getDebugLoc()); - Value *NewPtr = getAdjustedAllocaPtr(PtrBuilder, OldPtr->getType()); + Value *NewPtr = getNewAllocaSlicePtr(PtrBuilder, OldPtr->getType()); // Replace the operands which were using the old pointer. std::replace(PN.op_begin(), PN.op_end(), cast(OldPtr), NewPtr); DEBUG(dbgs() << " to: " << PN << "\n"); deleteIfTriviallyDead(OldPtr); - return false; + + // PHIs can't be promoted on their own, but often can be speculated. We + // check the speculation outside of the rewriter so that we see the + // fully-rewritten alloca. + PHIUsers.insert(&PN); + return true; } bool visitSelectInst(SelectInst &SI) { DEBUG(dbgs() << " original: " << SI << "\n"); - IRBuilder<> IRB(&SI); + assert((SI.getTrueValue() == OldPtr || SI.getFalseValue() == OldPtr) && + "Pointer isn't an operand!"); + assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable"); + assert(EndOffset <= NewAllocaEndOffset && "Selects are unsplittable"); - // Find the operand we need to rewrite here. - bool IsTrueVal = SI.getTrueValue() == OldPtr; - if (IsTrueVal) - assert(SI.getFalseValue() != OldPtr && "Pointer is both operands!"); - else - assert(SI.getFalseValue() == OldPtr && "Pointer isn't an operand!"); + Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); + // Replace the operands which were using the old pointer. + if (SI.getOperand(1) == OldPtr) + SI.setOperand(1, NewPtr); + if (SI.getOperand(2) == OldPtr) + SI.setOperand(2, NewPtr); - Value *NewPtr = getAdjustedAllocaPtr(IRB, OldPtr->getType()); - SI.setOperand(IsTrueVal ? 1 : 2, NewPtr); DEBUG(dbgs() << " to: " << SI << "\n"); deleteIfTriviallyDead(OldPtr); - return false; - } + // Selects can't be promoted on their own, but often can be speculated. We + // check the speculation outside of the rewriter so that we see the + // fully-rewritten alloca. + SelectUsers.insert(&SI); + return true; + } }; } @@ -2926,7 +3069,7 @@ class AggLoadStoreRewriter : public InstVisitor { // Befriend the base class so it can delegate to private visit methods. friend class llvm::InstVisitor; - const DataLayout &TD; + const DataLayout &DL; /// Queue of pointer uses to analyze and potentially rewrite. SmallVector Queue; @@ -2939,7 +3082,7 @@ class AggLoadStoreRewriter : public InstVisitor { Use *U; public: - AggLoadStoreRewriter(const DataLayout &TD) : TD(TD) {} + AggLoadStoreRewriter(const DataLayout &DL) : DL(DL) {} /// Rewrite loads and stores through a pointer and all pointers derived from /// it. @@ -2958,21 +3101,19 @@ private: /// Enqueue all the users of the given instruction for further processing. /// This uses a set to de-duplicate users. void enqueueUsers(Instruction &I) { - for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); UI != UE; - ++UI) - if (Visited.insert(*UI)) - Queue.push_back(&UI.getUse()); + for (Use &U : I.uses()) + if (Visited.insert(U.getUser()).second) + Queue.push_back(&U); } // Conservative default is to not rewrite anything. bool visitInstruction(Instruction &I) { return false; } /// \brief Generic recursive split emission class. - template - class OpSplitter { + template class OpSplitter { protected: /// The builder used to form new instructions. - IRBuilder<> IRB; + IRBuilderTy IRB; /// The indices which to be used with insert- or extractvalue to select the /// appropriate value within the aggregate. SmallVector Indices; @@ -2986,7 +3127,7 @@ private: /// Initialize the splitter with an insertion point, Ptr and start with a /// single zero GEP index. OpSplitter(Instruction *InsertionPoint, Value *Ptr) - : IRB(InsertionPoint), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr) {} + : IRB(InsertionPoint), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr) {} public: /// \brief Generic recursive split emission routine. @@ -3042,16 +3183,16 @@ private: struct LoadOpSplitter : public OpSplitter { LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr) - : OpSplitter(InsertionPoint, Ptr) {} + : OpSplitter(InsertionPoint, Ptr) {} /// Emit a leaf load of a single value. This is called at the leaves of the /// recursive emission to actually load values. void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) { assert(Ty->isSingleValueType()); // Load the single value and insert it using the indices. - Value *Load = IRB.CreateLoad(IRB.CreateInBoundsGEP(Ptr, GEPIndices, - Name + ".gep"), - Name + ".load"); + Value *GEP = + IRB.CreateInBoundsGEP(nullptr, Ptr, GEPIndices, Name + ".gep"); + Value *Load = IRB.CreateLoad(GEP, Name + ".load"); Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert"); DEBUG(dbgs() << " to: " << *Load << "\n"); } @@ -3074,7 +3215,7 @@ private: struct StoreOpSplitter : public OpSplitter { StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr) - : OpSplitter(InsertionPoint, Ptr) {} + : OpSplitter(InsertionPoint, Ptr) {} /// Emit a leaf store of a single value. This is called at the leaves of the /// recursive emission to actually produce stores. @@ -3082,8 +3223,8 @@ private: assert(Ty->isSingleValueType()); // Extract the single value and store it using the indices. Value *Store = IRB.CreateStore( - IRB.CreateExtractValue(Agg, Indices, Name + ".extract"), - IRB.CreateInBoundsGEP(Ptr, GEPIndices, Name + ".gep")); + IRB.CreateExtractValue(Agg, Indices, Name + ".extract"), + IRB.CreateInBoundsGEP(nullptr, Ptr, GEPIndices, Name + ".gep")); (void)Store; DEBUG(dbgs() << " to: " << *Store << "\n"); } @@ -3169,104 +3310,597 @@ static Type *stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty) { /// when the size or offset cause either end of type-based partition to be off. /// Also, this is a best-effort routine. It is reasonable to give up and not /// return a type if necessary. -static Type *getTypePartition(const DataLayout &TD, Type *Ty, - uint64_t Offset, uint64_t Size) { - if (Offset == 0 && TD.getTypeAllocSize(Ty) == Size) - return stripAggregateTypeWrapping(TD, Ty); +static Type *getTypePartition(const DataLayout &DL, Type *Ty, uint64_t Offset, + uint64_t Size) { + if (Offset == 0 && DL.getTypeAllocSize(Ty) == Size) + return stripAggregateTypeWrapping(DL, Ty); + if (Offset > DL.getTypeAllocSize(Ty) || + (DL.getTypeAllocSize(Ty) - Offset) < Size) + return nullptr; if (SequentialType *SeqTy = dyn_cast(Ty)) { // We can't partition pointers... if (SeqTy->isPointerTy()) - return 0; + return nullptr; Type *ElementTy = SeqTy->getElementType(); - uint64_t ElementSize = TD.getTypeAllocSize(ElementTy); + uint64_t ElementSize = DL.getTypeAllocSize(ElementTy); uint64_t NumSkippedElements = Offset / ElementSize; - if (ArrayType *ArrTy = dyn_cast(SeqTy)) + if (ArrayType *ArrTy = dyn_cast(SeqTy)) { if (NumSkippedElements >= ArrTy->getNumElements()) - return 0; - if (VectorType *VecTy = dyn_cast(SeqTy)) + return nullptr; + } else if (VectorType *VecTy = dyn_cast(SeqTy)) { if (NumSkippedElements >= VecTy->getNumElements()) - return 0; + return nullptr; + } Offset -= NumSkippedElements * ElementSize; // First check if we need to recurse. if (Offset > 0 || Size < ElementSize) { // Bail if the partition ends in a different array element. if ((Offset + Size) > ElementSize) - return 0; + return nullptr; // Recurse through the element type trying to peel off offset bytes. - return getTypePartition(TD, ElementTy, Offset, Size); + return getTypePartition(DL, ElementTy, Offset, Size); } assert(Offset == 0); if (Size == ElementSize) - return stripAggregateTypeWrapping(TD, ElementTy); + return stripAggregateTypeWrapping(DL, ElementTy); assert(Size > ElementSize); uint64_t NumElements = Size / ElementSize; if (NumElements * ElementSize != Size) - return 0; + return nullptr; return ArrayType::get(ElementTy, NumElements); } StructType *STy = dyn_cast(Ty); if (!STy) - return 0; + return nullptr; - const StructLayout *SL = TD.getStructLayout(STy); + const StructLayout *SL = DL.getStructLayout(STy); if (Offset >= SL->getSizeInBytes()) - return 0; + return nullptr; uint64_t EndOffset = Offset + Size; if (EndOffset > SL->getSizeInBytes()) - return 0; + return nullptr; unsigned Index = SL->getElementContainingOffset(Offset); Offset -= SL->getElementOffset(Index); Type *ElementTy = STy->getElementType(Index); - uint64_t ElementSize = TD.getTypeAllocSize(ElementTy); + uint64_t ElementSize = DL.getTypeAllocSize(ElementTy); if (Offset >= ElementSize) - return 0; // The offset points into alignment padding. + return nullptr; // The offset points into alignment padding. // See if any partition must be contained by the element. if (Offset > 0 || Size < ElementSize) { if ((Offset + Size) > ElementSize) - return 0; - return getTypePartition(TD, ElementTy, Offset, Size); + return nullptr; + return getTypePartition(DL, ElementTy, Offset, Size); } assert(Offset == 0); if (Size == ElementSize) - return stripAggregateTypeWrapping(TD, ElementTy); + return stripAggregateTypeWrapping(DL, ElementTy); StructType::element_iterator EI = STy->element_begin() + Index, EE = STy->element_end(); if (EndOffset < SL->getSizeInBytes()) { unsigned EndIndex = SL->getElementContainingOffset(EndOffset); if (Index == EndIndex) - return 0; // Within a single element and its padding. + return nullptr; // Within a single element and its padding. // Don't try to form "natural" types if the elements don't line up with the // expected size. // FIXME: We could potentially recurse down through the last element in the // sub-struct to find a natural end point. if (SL->getElementOffset(EndIndex) != EndOffset) - return 0; + return nullptr; assert(Index < EndIndex); EE = STy->element_begin() + EndIndex; } // Try to build up a sub-structure. - StructType *SubTy = StructType::get(STy->getContext(), makeArrayRef(EI, EE), - STy->isPacked()); - const StructLayout *SubSL = TD.getStructLayout(SubTy); + StructType *SubTy = + StructType::get(STy->getContext(), makeArrayRef(EI, EE), STy->isPacked()); + const StructLayout *SubSL = DL.getStructLayout(SubTy); if (Size != SubSL->getSizeInBytes()) - return 0; // The sub-struct doesn't have quite the size needed. + return nullptr; // The sub-struct doesn't have quite the size needed. return SubTy; } +/// \brief Pre-split loads and stores to simplify rewriting. +/// +/// We want to break up the splittable load+store pairs as much as +/// possible. This is important to do as a preprocessing step, as once we +/// start rewriting the accesses to partitions of the alloca we lose the +/// necessary information to correctly split apart paired loads and stores +/// which both point into this alloca. The case to consider is something like +/// the following: +/// +/// %a = alloca [12 x i8] +/// %gep1 = getelementptr [12 x i8]* %a, i32 0, i32 0 +/// %gep2 = getelementptr [12 x i8]* %a, i32 0, i32 4 +/// %gep3 = getelementptr [12 x i8]* %a, i32 0, i32 8 +/// %iptr1 = bitcast i8* %gep1 to i64* +/// %iptr2 = bitcast i8* %gep2 to i64* +/// %fptr1 = bitcast i8* %gep1 to float* +/// %fptr2 = bitcast i8* %gep2 to float* +/// %fptr3 = bitcast i8* %gep3 to float* +/// store float 0.0, float* %fptr1 +/// store float 1.0, float* %fptr2 +/// %v = load i64* %iptr1 +/// store i64 %v, i64* %iptr2 +/// %f1 = load float* %fptr2 +/// %f2 = load float* %fptr3 +/// +/// Here we want to form 3 partitions of the alloca, each 4 bytes large, and +/// promote everything so we recover the 2 SSA values that should have been +/// there all along. +/// +/// \returns true if any changes are made. +bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { + DEBUG(dbgs() << "Pre-splitting loads and stores\n"); + + // Track the loads and stores which are candidates for pre-splitting here, in + // the order they first appear during the partition scan. These give stable + // iteration order and a basis for tracking which loads and stores we + // actually split. + SmallVector Loads; + SmallVector Stores; + + // We need to accumulate the splits required of each load or store where we + // can find them via a direct lookup. This is important to cross-check loads + // and stores against each other. We also track the slice so that we can kill + // all the slices that end up split. + struct SplitOffsets { + Slice *S; + std::vector Splits; + }; + SmallDenseMap SplitOffsetsMap; + + // Track loads out of this alloca which cannot, for any reason, be pre-split. + // This is important as we also cannot pre-split stores of those loads! + // FIXME: This is all pretty gross. It means that we can be more aggressive + // in pre-splitting when the load feeding the store happens to come from + // a separate alloca. Put another way, the effectiveness of SROA would be + // decreased by a frontend which just concatenated all of its local allocas + // into one big flat alloca. But defeating such patterns is exactly the job + // SROA is tasked with! Sadly, to not have this discrepancy we would have + // change store pre-splitting to actually force pre-splitting of the load + // that feeds it *and all stores*. That makes pre-splitting much harder, but + // maybe it would make it more principled? + SmallPtrSet UnsplittableLoads; + + DEBUG(dbgs() << " Searching for candidate loads and stores\n"); + for (auto &P : AS.partitions()) { + for (Slice &S : P) { + Instruction *I = cast(S.getUse()->getUser()); + if (!S.isSplittable() ||S.endOffset() <= P.endOffset()) { + // If this was a load we have to track that it can't participate in any + // pre-splitting! + if (auto *LI = dyn_cast(I)) + UnsplittableLoads.insert(LI); + continue; + } + assert(P.endOffset() > S.beginOffset() && + "Empty or backwards partition!"); + + // Determine if this is a pre-splittable slice. + if (auto *LI = dyn_cast(I)) { + assert(!LI->isVolatile() && "Cannot split volatile loads!"); + + // The load must be used exclusively to store into other pointers for + // us to be able to arbitrarily pre-split it. The stores must also be + // simple to avoid changing semantics. + auto IsLoadSimplyStored = [](LoadInst *LI) { + for (User *LU : LI->users()) { + auto *SI = dyn_cast(LU); + if (!SI || !SI->isSimple()) + return false; + } + return true; + }; + if (!IsLoadSimplyStored(LI)) { + UnsplittableLoads.insert(LI); + continue; + } + + Loads.push_back(LI); + } else if (auto *SI = dyn_cast(S.getUse()->getUser())) { + if (!SI || + S.getUse() != &SI->getOperandUse(SI->getPointerOperandIndex())) + continue; + auto *StoredLoad = dyn_cast(SI->getValueOperand()); + if (!StoredLoad || !StoredLoad->isSimple()) + continue; + assert(!SI->isVolatile() && "Cannot split volatile stores!"); + + Stores.push_back(SI); + } else { + // Other uses cannot be pre-split. + continue; + } + + // Record the initial split. + DEBUG(dbgs() << " Candidate: " << *I << "\n"); + auto &Offsets = SplitOffsetsMap[I]; + assert(Offsets.Splits.empty() && + "Should not have splits the first time we see an instruction!"); + Offsets.S = &S; + Offsets.Splits.push_back(P.endOffset() - S.beginOffset()); + } + + // Now scan the already split slices, and add a split for any of them which + // we're going to pre-split. + for (Slice *S : P.splitSliceTails()) { + auto SplitOffsetsMapI = + SplitOffsetsMap.find(cast(S->getUse()->getUser())); + if (SplitOffsetsMapI == SplitOffsetsMap.end()) + continue; + auto &Offsets = SplitOffsetsMapI->second; + + assert(Offsets.S == S && "Found a mismatched slice!"); + assert(!Offsets.Splits.empty() && + "Cannot have an empty set of splits on the second partition!"); + assert(Offsets.Splits.back() == + P.beginOffset() - Offsets.S->beginOffset() && + "Previous split does not end where this one begins!"); + + // Record each split. The last partition's end isn't needed as the size + // of the slice dictates that. + if (S->endOffset() > P.endOffset()) + Offsets.Splits.push_back(P.endOffset() - Offsets.S->beginOffset()); + } + } + + // We may have split loads where some of their stores are split stores. For + // such loads and stores, we can only pre-split them if their splits exactly + // match relative to their starting offset. We have to verify this prior to + // any rewriting. + Stores.erase( + std::remove_if(Stores.begin(), Stores.end(), + [&UnsplittableLoads, &SplitOffsetsMap](StoreInst *SI) { + // Lookup the load we are storing in our map of split + // offsets. + auto *LI = cast(SI->getValueOperand()); + // If it was completely unsplittable, then we're done, + // and this store can't be pre-split. + if (UnsplittableLoads.count(LI)) + return true; + + auto LoadOffsetsI = SplitOffsetsMap.find(LI); + if (LoadOffsetsI == SplitOffsetsMap.end()) + return false; // Unrelated loads are definitely safe. + auto &LoadOffsets = LoadOffsetsI->second; + + // Now lookup the store's offsets. + auto &StoreOffsets = SplitOffsetsMap[SI]; + + // If the relative offsets of each split in the load and + // store match exactly, then we can split them and we + // don't need to remove them here. + if (LoadOffsets.Splits == StoreOffsets.Splits) + return false; + + DEBUG(dbgs() + << " Mismatched splits for load and store:\n" + << " " << *LI << "\n" + << " " << *SI << "\n"); + + // We've found a store and load that we need to split + // with mismatched relative splits. Just give up on them + // and remove both instructions from our list of + // candidates. + UnsplittableLoads.insert(LI); + return true; + }), + Stores.end()); + // Now we have to go *back* through all the stores, because a later store may + // have caused an earlier store's load to become unsplittable and if it is + // unsplittable for the later store, then we can't rely on it being split in + // the earlier store either. + Stores.erase(std::remove_if(Stores.begin(), Stores.end(), + [&UnsplittableLoads](StoreInst *SI) { + auto *LI = + cast(SI->getValueOperand()); + return UnsplittableLoads.count(LI); + }), + Stores.end()); + // Once we've established all the loads that can't be split for some reason, + // filter any that made it into our list out. + Loads.erase(std::remove_if(Loads.begin(), Loads.end(), + [&UnsplittableLoads](LoadInst *LI) { + return UnsplittableLoads.count(LI); + }), + Loads.end()); + + + // If no loads or stores are left, there is no pre-splitting to be done for + // this alloca. + if (Loads.empty() && Stores.empty()) + return false; + + // From here on, we can't fail and will be building new accesses, so rig up + // an IR builder. + IRBuilderTy IRB(&AI); + + // Collect the new slices which we will merge into the alloca slices. + SmallVector NewSlices; + + // Track any allocas we end up splitting loads and stores for so we iterate + // on them. + SmallPtrSet ResplitPromotableAllocas; + + // At this point, we have collected all of the loads and stores we can + // pre-split, and the specific splits needed for them. We actually do the + // splitting in a specific order in order to handle when one of the loads in + // the value operand to one of the stores. + // + // First, we rewrite all of the split loads, and just accumulate each split + // load in a parallel structure. We also build the slices for them and append + // them to the alloca slices. + SmallDenseMap, 1> SplitLoadsMap; + std::vector SplitLoads; + const DataLayout &DL = AI.getModule()->getDataLayout(); + for (LoadInst *LI : Loads) { + SplitLoads.clear(); + + IntegerType *Ty = cast(LI->getType()); + uint64_t LoadSize = Ty->getBitWidth() / 8; + assert(LoadSize > 0 && "Cannot have a zero-sized integer load!"); + + auto &Offsets = SplitOffsetsMap[LI]; + assert(LoadSize == Offsets.S->endOffset() - Offsets.S->beginOffset() && + "Slice size should always match load size exactly!"); + uint64_t BaseOffset = Offsets.S->beginOffset(); + assert(BaseOffset + LoadSize > BaseOffset && + "Cannot represent alloca access size using 64-bit integers!"); + + Instruction *BasePtr = cast(LI->getPointerOperand()); + IRB.SetInsertPoint(BasicBlock::iterator(LI)); + + DEBUG(dbgs() << " Splitting load: " << *LI << "\n"); + + uint64_t PartOffset = 0, PartSize = Offsets.Splits.front(); + int Idx = 0, Size = Offsets.Splits.size(); + for (;;) { + auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8); + auto *PartPtrTy = PartTy->getPointerTo(LI->getPointerAddressSpace()); + LoadInst *PLoad = IRB.CreateAlignedLoad( + getAdjustedPtr(IRB, DL, BasePtr, + APInt(DL.getPointerSizeInBits(), PartOffset), + PartPtrTy, BasePtr->getName() + "."), + getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false, + LI->getName()); + + // Append this load onto the list of split loads so we can find it later + // to rewrite the stores. + SplitLoads.push_back(PLoad); + + // Now build a new slice for the alloca. + NewSlices.push_back( + Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize, + &PLoad->getOperandUse(PLoad->getPointerOperandIndex()), + /*IsSplittable*/ false)); + DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset() + << ", " << NewSlices.back().endOffset() << "): " << *PLoad + << "\n"); + + // See if we've handled all the splits. + if (Idx >= Size) + break; + + // Setup the next partition. + PartOffset = Offsets.Splits[Idx]; + ++Idx; + PartSize = (Idx < Size ? Offsets.Splits[Idx] : LoadSize) - PartOffset; + } + + // Now that we have the split loads, do the slow walk over all uses of the + // load and rewrite them as split stores, or save the split loads to use + // below if the store is going to be split there anyways. + bool DeferredStores = false; + for (User *LU : LI->users()) { + StoreInst *SI = cast(LU); + if (!Stores.empty() && SplitOffsetsMap.count(SI)) { + DeferredStores = true; + DEBUG(dbgs() << " Deferred splitting of store: " << *SI << "\n"); + continue; + } + + Value *StoreBasePtr = SI->getPointerOperand(); + IRB.SetInsertPoint(BasicBlock::iterator(SI)); + + DEBUG(dbgs() << " Splitting store of load: " << *SI << "\n"); + + for (int Idx = 0, Size = SplitLoads.size(); Idx < Size; ++Idx) { + LoadInst *PLoad = SplitLoads[Idx]; + uint64_t PartOffset = Idx == 0 ? 0 : Offsets.Splits[Idx - 1]; + auto *PartPtrTy = + PLoad->getType()->getPointerTo(SI->getPointerAddressSpace()); + + StoreInst *PStore = IRB.CreateAlignedStore( + PLoad, getAdjustedPtr(IRB, DL, StoreBasePtr, + APInt(DL.getPointerSizeInBits(), PartOffset), + PartPtrTy, StoreBasePtr->getName() + "."), + getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false); + (void)PStore; + DEBUG(dbgs() << " +" << PartOffset << ":" << *PStore << "\n"); + } + + // We want to immediately iterate on any allocas impacted by splitting + // this store, and we have to track any promotable alloca (indicated by + // a direct store) as needing to be resplit because it is no longer + // promotable. + if (AllocaInst *OtherAI = dyn_cast(StoreBasePtr)) { + ResplitPromotableAllocas.insert(OtherAI); + Worklist.insert(OtherAI); + } else if (AllocaInst *OtherAI = dyn_cast( + StoreBasePtr->stripInBoundsOffsets())) { + Worklist.insert(OtherAI); + } + + // Mark the original store as dead. + DeadInsts.insert(SI); + } + + // Save the split loads if there are deferred stores among the users. + if (DeferredStores) + SplitLoadsMap.insert(std::make_pair(LI, std::move(SplitLoads))); + + // Mark the original load as dead and kill the original slice. + DeadInsts.insert(LI); + Offsets.S->kill(); + } + + // Second, we rewrite all of the split stores. At this point, we know that + // all loads from this alloca have been split already. For stores of such + // loads, we can simply look up the pre-existing split loads. For stores of + // other loads, we split those loads first and then write split stores of + // them. + for (StoreInst *SI : Stores) { + auto *LI = cast(SI->getValueOperand()); + IntegerType *Ty = cast(LI->getType()); + uint64_t StoreSize = Ty->getBitWidth() / 8; + assert(StoreSize > 0 && "Cannot have a zero-sized integer store!"); + + auto &Offsets = SplitOffsetsMap[SI]; + assert(StoreSize == Offsets.S->endOffset() - Offsets.S->beginOffset() && + "Slice size should always match load size exactly!"); + uint64_t BaseOffset = Offsets.S->beginOffset(); + assert(BaseOffset + StoreSize > BaseOffset && + "Cannot represent alloca access size using 64-bit integers!"); + + Value *LoadBasePtr = LI->getPointerOperand(); + Instruction *StoreBasePtr = cast(SI->getPointerOperand()); + + DEBUG(dbgs() << " Splitting store: " << *SI << "\n"); + + // Check whether we have an already split load. + auto SplitLoadsMapI = SplitLoadsMap.find(LI); + std::vector *SplitLoads = nullptr; + if (SplitLoadsMapI != SplitLoadsMap.end()) { + SplitLoads = &SplitLoadsMapI->second; + assert(SplitLoads->size() == Offsets.Splits.size() + 1 && + "Too few split loads for the number of splits in the store!"); + } else { + DEBUG(dbgs() << " of load: " << *LI << "\n"); + } + + uint64_t PartOffset = 0, PartSize = Offsets.Splits.front(); + int Idx = 0, Size = Offsets.Splits.size(); + for (;;) { + auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8); + auto *PartPtrTy = PartTy->getPointerTo(SI->getPointerAddressSpace()); + + // Either lookup a split load or create one. + LoadInst *PLoad; + if (SplitLoads) { + PLoad = (*SplitLoads)[Idx]; + } else { + IRB.SetInsertPoint(BasicBlock::iterator(LI)); + PLoad = IRB.CreateAlignedLoad( + getAdjustedPtr(IRB, DL, LoadBasePtr, + APInt(DL.getPointerSizeInBits(), PartOffset), + PartPtrTy, LoadBasePtr->getName() + "."), + getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false, + LI->getName()); + } + + // And store this partition. + IRB.SetInsertPoint(BasicBlock::iterator(SI)); + StoreInst *PStore = IRB.CreateAlignedStore( + PLoad, getAdjustedPtr(IRB, DL, StoreBasePtr, + APInt(DL.getPointerSizeInBits(), PartOffset), + PartPtrTy, StoreBasePtr->getName() + "."), + getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false); + + // Now build a new slice for the alloca. + NewSlices.push_back( + Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize, + &PStore->getOperandUse(PStore->getPointerOperandIndex()), + /*IsSplittable*/ false)); + DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset() + << ", " << NewSlices.back().endOffset() << "): " << *PStore + << "\n"); + if (!SplitLoads) { + DEBUG(dbgs() << " of split load: " << *PLoad << "\n"); + } + + // See if we've finished all the splits. + if (Idx >= Size) + break; + + // Setup the next partition. + PartOffset = Offsets.Splits[Idx]; + ++Idx; + PartSize = (Idx < Size ? Offsets.Splits[Idx] : StoreSize) - PartOffset; + } + + // We want to immediately iterate on any allocas impacted by splitting + // this load, which is only relevant if it isn't a load of this alloca and + // thus we didn't already split the loads above. We also have to keep track + // of any promotable allocas we split loads on as they can no longer be + // promoted. + if (!SplitLoads) { + if (AllocaInst *OtherAI = dyn_cast(LoadBasePtr)) { + assert(OtherAI != &AI && "We can't re-split our own alloca!"); + ResplitPromotableAllocas.insert(OtherAI); + Worklist.insert(OtherAI); + } else if (AllocaInst *OtherAI = dyn_cast( + LoadBasePtr->stripInBoundsOffsets())) { + assert(OtherAI != &AI && "We can't re-split our own alloca!"); + Worklist.insert(OtherAI); + } + } + + // Mark the original store as dead now that we've split it up and kill its + // slice. Note that we leave the original load in place unless this store + // was its only use. It may in turn be split up if it is an alloca load + // for some other alloca, but it may be a normal load. This may introduce + // redundant loads, but where those can be merged the rest of the optimizer + // should handle the merging, and this uncovers SSA splits which is more + // important. In practice, the original loads will almost always be fully + // split and removed eventually, and the splits will be merged by any + // trivial CSE, including instcombine. + if (LI->hasOneUse()) { + assert(*LI->user_begin() == SI && "Single use isn't this store!"); + DeadInsts.insert(LI); + } + DeadInsts.insert(SI); + Offsets.S->kill(); + } + + // Remove the killed slices that have ben pre-split. + AS.erase(std::remove_if(AS.begin(), AS.end(), [](const Slice &S) { + return S.isDead(); + }), AS.end()); + + // Insert our new slices. This will sort and merge them into the sorted + // sequence. + AS.insert(NewSlices); + + DEBUG(dbgs() << " Pre-split slices:\n"); +#ifndef NDEBUG + for (auto I = AS.begin(), E = AS.end(); I != E; ++I) + DEBUG(AS.print(dbgs(), I, " ")); +#endif + + // Finally, don't try to promote any allocas that new require re-splitting. + // They have already been added to the worklist above. + PromotableAllocas.erase( + std::remove_if( + PromotableAllocas.begin(), PromotableAllocas.end(), + [&](AllocaInst *AI) { return ResplitPromotableAllocas.count(AI); }), + PromotableAllocas.end()); + + return true; +} + /// \brief Rewrite an alloca partition's users. /// /// This routine drives both of the rewriting goals of the SROA pass. It tries @@ -3277,121 +3911,270 @@ static Type *getTypePartition(const DataLayout &TD, Type *Ty, /// appropriate new offsets. It also evaluates how successful the rewrite was /// at enabling promotion and if it was successful queues the alloca to be /// promoted. -bool SROA::rewriteAllocaPartition(AllocaInst &AI, - AllocaPartitioning &P, - AllocaPartitioning::iterator PI) { - uint64_t AllocaSize = PI->EndOffset - PI->BeginOffset; - bool IsLive = false; - for (AllocaPartitioning::use_iterator UI = P.use_begin(PI), - UE = P.use_end(PI); - UI != UE && !IsLive; ++UI) - if (UI->U) - IsLive = true; - if (!IsLive) - return false; // No live uses left of this partition. - - DEBUG(dbgs() << "Speculating PHIs and selects in partition " - << "[" << PI->BeginOffset << "," << PI->EndOffset << ")\n"); - - PHIOrSelectSpeculator Speculator(*TD, P, *this); - DEBUG(dbgs() << " speculating "); - DEBUG(P.print(dbgs(), PI, "")); - Speculator.visitUsers(PI); - +AllocaInst *SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS, + AllocaSlices::Partition &P) { // Try to compute a friendly type for this partition of the alloca. This // won't always succeed, in which case we fall back to a legal integer type // or an i8 array of an appropriate size. - Type *AllocaTy = 0; - if (Type *PartitionTy = P.getCommonType(PI)) - if (TD->getTypeAllocSize(PartitionTy) >= AllocaSize) - AllocaTy = PartitionTy; - if (!AllocaTy) - if (Type *PartitionTy = getTypePartition(*TD, AI.getAllocatedType(), - PI->BeginOffset, AllocaSize)) - AllocaTy = PartitionTy; - if ((!AllocaTy || - (AllocaTy->isArrayTy() && - AllocaTy->getArrayElementType()->isIntegerTy())) && - TD->isLegalInteger(AllocaSize * 8)) - AllocaTy = Type::getIntNTy(*C, AllocaSize * 8); - if (!AllocaTy) - AllocaTy = ArrayType::get(Type::getInt8Ty(*C), AllocaSize); - assert(TD->getTypeAllocSize(AllocaTy) >= AllocaSize); + Type *SliceTy = nullptr; + const DataLayout &DL = AI.getModule()->getDataLayout(); + if (Type *CommonUseTy = findCommonType(P.begin(), P.end(), P.endOffset())) + if (DL.getTypeAllocSize(CommonUseTy) >= P.size()) + SliceTy = CommonUseTy; + if (!SliceTy) + if (Type *TypePartitionTy = getTypePartition(DL, AI.getAllocatedType(), + P.beginOffset(), P.size())) + SliceTy = TypePartitionTy; + if ((!SliceTy || (SliceTy->isArrayTy() && + SliceTy->getArrayElementType()->isIntegerTy())) && + DL.isLegalInteger(P.size() * 8)) + SliceTy = Type::getIntNTy(*C, P.size() * 8); + if (!SliceTy) + SliceTy = ArrayType::get(Type::getInt8Ty(*C), P.size()); + assert(DL.getTypeAllocSize(SliceTy) >= P.size()); + + bool IsIntegerPromotable = isIntegerWideningViable(P, SliceTy, DL); + + VectorType *VecTy = + IsIntegerPromotable ? nullptr : isVectorPromotionViable(P, DL); + if (VecTy) + SliceTy = VecTy; // Check for the case where we're going to rewrite to a new alloca of the // exact same type as the original, and with the same access offsets. In that // case, re-use the existing alloca, but still run through the rewriter to - // performe phi and select speculation. + // perform phi and select speculation. AllocaInst *NewAI; - if (AllocaTy == AI.getAllocatedType()) { - assert(PI->BeginOffset == 0 && + if (SliceTy == AI.getAllocatedType()) { + assert(P.beginOffset() == 0 && "Non-zero begin offset but same alloca type"); - assert(PI == P.begin() && "Begin offset is zero on later partition"); NewAI = &AI; + // FIXME: We should be able to bail at this point with "nothing changed". + // FIXME: We might want to defer PHI speculation until after here. + // FIXME: return nullptr; } else { unsigned Alignment = AI.getAlignment(); if (!Alignment) { // The minimum alignment which users can rely on when the explicit // alignment is omitted or zero is that required by the ABI for this // type. - Alignment = TD->getABITypeAlignment(AI.getAllocatedType()); + Alignment = DL.getABITypeAlignment(AI.getAllocatedType()); } - Alignment = MinAlign(Alignment, PI->BeginOffset); + Alignment = MinAlign(Alignment, P.beginOffset()); // If we will get at least this much alignment from the type alone, leave // the alloca's alignment unconstrained. - if (Alignment <= TD->getABITypeAlignment(AllocaTy)) + if (Alignment <= DL.getABITypeAlignment(SliceTy)) Alignment = 0; - NewAI = new AllocaInst(AllocaTy, 0, Alignment, - AI.getName() + ".sroa." + Twine(PI - P.begin()), - &AI); + NewAI = new AllocaInst( + SliceTy, nullptr, Alignment, + AI.getName() + ".sroa." + Twine(P.begin() - AS.begin()), &AI); ++NumNewAllocas; } DEBUG(dbgs() << "Rewriting alloca partition " - << "[" << PI->BeginOffset << "," << PI->EndOffset << ") to: " - << *NewAI << "\n"); + << "[" << P.beginOffset() << "," << P.endOffset() + << ") to: " << *NewAI << "\n"); - // Track the high watermark of the post-promotion worklist. We will reset it - // to this point if the alloca is not in fact scheduled for promotion. + // Track the high watermark on the worklist as it is only relevant for + // promoted allocas. We will reset it to this point if the alloca is not in + // fact scheduled for promotion. unsigned PPWOldSize = PostPromotionWorklist.size(); + unsigned NumUses = 0; + SmallPtrSet PHIUsers; + SmallPtrSet SelectUsers; + + AllocaSliceRewriter Rewriter(DL, AS, *this, AI, *NewAI, P.beginOffset(), + P.endOffset(), IsIntegerPromotable, VecTy, + PHIUsers, SelectUsers); + bool Promotable = true; + for (Slice *S : P.splitSliceTails()) { + Promotable &= Rewriter.visit(S); + ++NumUses; + } + for (Slice &S : P) { + Promotable &= Rewriter.visit(&S); + ++NumUses; + } + + NumAllocaPartitionUses += NumUses; + MaxUsesPerAllocaPartition = + std::max(NumUses, MaxUsesPerAllocaPartition); + + // Now that we've processed all the slices in the new partition, check if any + // PHIs or Selects would block promotion. + for (SmallPtrSetImpl::iterator I = PHIUsers.begin(), + E = PHIUsers.end(); + I != E; ++I) + if (!isSafePHIToSpeculate(**I)) { + Promotable = false; + PHIUsers.clear(); + SelectUsers.clear(); + break; + } + for (SmallPtrSetImpl::iterator I = SelectUsers.begin(), + E = SelectUsers.end(); + I != E; ++I) + if (!isSafeSelectToSpeculate(**I)) { + Promotable = false; + PHIUsers.clear(); + SelectUsers.clear(); + break; + } - AllocaPartitionRewriter Rewriter(*TD, P, PI, *this, AI, *NewAI, - PI->BeginOffset, PI->EndOffset); - DEBUG(dbgs() << " rewriting "); - DEBUG(P.print(dbgs(), PI, "")); - bool Promotable = Rewriter.visitUsers(P.use_begin(PI), P.use_end(PI)); if (Promotable) { - DEBUG(dbgs() << " and queuing for promotion\n"); - PromotableAllocas.push_back(NewAI); - } else if (NewAI != &AI) { + if (PHIUsers.empty() && SelectUsers.empty()) { + // Promote the alloca. + PromotableAllocas.push_back(NewAI); + } else { + // If we have either PHIs or Selects to speculate, add them to those + // worklists and re-queue the new alloca so that we promote in on the + // next iteration. + for (PHINode *PHIUser : PHIUsers) + SpeculatablePHIs.insert(PHIUser); + for (SelectInst *SelectUser : SelectUsers) + SpeculatableSelects.insert(SelectUser); + Worklist.insert(NewAI); + } + } else { // If we can't promote the alloca, iterate on it to check for new // refinements exposed by splitting the current alloca. Don't iterate on an // alloca which didn't actually change and didn't get promoted. - Worklist.insert(NewAI); - } + if (NewAI != &AI) + Worklist.insert(NewAI); - // Drop any post-promotion work items if promotion didn't happen. - if (!Promotable) + // Drop any post-promotion work items if promotion didn't happen. while (PostPromotionWorklist.size() > PPWOldSize) PostPromotionWorklist.pop_back(); + } - return true; + return NewAI; } -/// \brief Walks the partitioning of an alloca rewriting uses of each partition. -bool SROA::splitAlloca(AllocaInst &AI, AllocaPartitioning &P) { +/// \brief Walks the slices of an alloca and form partitions based on them, +/// rewriting each of their uses. +bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) { + if (AS.begin() == AS.end()) + return false; + + unsigned NumPartitions = 0; bool Changed = false; - for (AllocaPartitioning::iterator PI = P.begin(), PE = P.end(); PI != PE; - ++PI) - Changed |= rewriteAllocaPartition(AI, P, PI); + const DataLayout &DL = AI.getModule()->getDataLayout(); + + // First try to pre-split loads and stores. + Changed |= presplitLoadsAndStores(AI, AS); + + // Now that we have identified any pre-splitting opportunities, mark any + // splittable (non-whole-alloca) loads and stores as unsplittable. If we fail + // to split these during pre-splitting, we want to force them to be + // rewritten into a partition. + bool IsSorted = true; + for (Slice &S : AS) { + if (!S.isSplittable()) + continue; + // FIXME: We currently leave whole-alloca splittable loads and stores. This + // used to be the only splittable loads and stores and we need to be + // confident that the above handling of splittable loads and stores is + // completely sufficient before we forcibly disable the remaining handling. + if (S.beginOffset() == 0 && + S.endOffset() >= DL.getTypeAllocSize(AI.getAllocatedType())) + continue; + if (isa(S.getUse()->getUser()) || + isa(S.getUse()->getUser())) { + S.makeUnsplittable(); + IsSorted = false; + } + } + if (!IsSorted) + std::sort(AS.begin(), AS.end()); + + /// \brief Describes the allocas introduced by rewritePartition + /// in order to migrate the debug info. + struct Piece { + AllocaInst *Alloca; + uint64_t Offset; + uint64_t Size; + Piece(AllocaInst *AI, uint64_t O, uint64_t S) + : Alloca(AI), Offset(O), Size(S) {} + }; + SmallVector Pieces; + + // Rewrite each partition. + for (auto &P : AS.partitions()) { + if (AllocaInst *NewAI = rewritePartition(AI, AS, P)) { + Changed = true; + if (NewAI != &AI) { + uint64_t SizeOfByte = 8; + uint64_t AllocaSize = DL.getTypeSizeInBits(NewAI->getAllocatedType()); + // Don't include any padding. + uint64_t Size = std::min(AllocaSize, P.size() * SizeOfByte); + Pieces.push_back(Piece(NewAI, P.beginOffset() * SizeOfByte, Size)); + } + } + ++NumPartitions; + } + + NumAllocaPartitions += NumPartitions; + MaxPartitionsPerAlloca = + std::max(NumPartitions, MaxPartitionsPerAlloca); + + // Migrate debug information from the old alloca to the new alloca(s) + // and the individual partitions. + if (DbgDeclareInst *DbgDecl = FindAllocaDbgDeclare(&AI)) { + auto *Var = DbgDecl->getVariable(); + auto *Expr = DbgDecl->getExpression(); + DIBuilder DIB(*AI.getParent()->getParent()->getParent(), + /*AllowUnresolved*/ false); + bool IsSplit = Pieces.size() > 1; + for (auto Piece : Pieces) { + // Create a piece expression describing the new partition or reuse AI's + // expression if there is only one partition. + auto *PieceExpr = Expr; + if (IsSplit || Expr->isBitPiece()) { + // If this alloca is already a scalar replacement of a larger aggregate, + // Piece.Offset describes the offset inside the scalar. + uint64_t Offset = Expr->isBitPiece() ? Expr->getBitPieceOffset() : 0; + uint64_t Start = Offset + Piece.Offset; + uint64_t Size = Piece.Size; + if (Expr->isBitPiece()) { + uint64_t AbsEnd = Expr->getBitPieceOffset() + Expr->getBitPieceSize(); + if (Start >= AbsEnd) + // No need to describe a SROAed padding. + continue; + Size = std::min(Size, AbsEnd - Start); + } + PieceExpr = DIB.createBitPieceExpression(Start, Size); + } + // Remove any existing dbg.declare intrinsic describing the same alloca. + if (DbgDeclareInst *OldDDI = FindAllocaDbgDeclare(Piece.Alloca)) + OldDDI->eraseFromParent(); + + DIB.insertDeclare(Piece.Alloca, Var, PieceExpr, DbgDecl->getDebugLoc(), + &AI); + } + } return Changed; } +/// \brief Clobber a use with undef, deleting the used value if it becomes dead. +void SROA::clobberUse(Use &U) { + Value *OldV = U; + // Replace the use with an undef value. + U = UndefValue::get(OldV->getType()); + + // Check for this making an instruction dead. We have to garbage collect + // all the dead instructions to ensure the uses of any alloca end up being + // minimal. + if (Instruction *OldI = dyn_cast(OldV)) + if (isInstructionTriviallyDead(OldI)) { + DeadInsts.insert(OldI); + } +} + /// \brief Analyze an alloca for SROA. /// /// This analyzes the alloca to ensure we can reason about it, builds -/// a partitioning of the alloca, and then hands it off to be split and +/// the slices of the alloca, and then hands it off to be split and /// rewritten as needed. bool SROA::runOnAlloca(AllocaInst &AI) { DEBUG(dbgs() << "SROA alloca: " << AI << "\n"); @@ -3402,51 +4185,59 @@ bool SROA::runOnAlloca(AllocaInst &AI) { AI.eraseFromParent(); return true; } + const DataLayout &DL = AI.getModule()->getDataLayout(); // Skip alloca forms that this analysis can't handle. if (AI.isArrayAllocation() || !AI.getAllocatedType()->isSized() || - TD->getTypeAllocSize(AI.getAllocatedType()) == 0) + DL.getTypeAllocSize(AI.getAllocatedType()) == 0) return false; bool Changed = false; // First, split any FCA loads and stores touching this alloca to promote // better splitting and promotion opportunities. - AggLoadStoreRewriter AggRewriter(*TD); + AggLoadStoreRewriter AggRewriter(DL); Changed |= AggRewriter.rewrite(AI); - // Build the partition set using a recursive instruction-visiting builder. - AllocaPartitioning P(*TD, AI); - DEBUG(P.print(dbgs())); - if (P.isEscaped()) + // Build the slices using a recursive instruction-visiting builder. + AllocaSlices AS(DL, AI); + DEBUG(AS.print(dbgs())); + if (AS.isEscaped()) return Changed; // Delete all the dead users of this alloca before splitting and rewriting it. - for (AllocaPartitioning::dead_user_iterator DI = P.dead_user_begin(), - DE = P.dead_user_end(); - DI != DE; ++DI) { + for (Instruction *DeadUser : AS.getDeadUsers()) { + // Free up everything used by this instruction. + for (Use &DeadOp : DeadUser->operands()) + clobberUse(DeadOp); + + // Now replace the uses of this instruction. + DeadUser->replaceAllUsesWith(UndefValue::get(DeadUser->getType())); + + // And mark it for deletion. + DeadInsts.insert(DeadUser); + Changed = true; + } + for (Use *DeadOp : AS.getDeadOperands()) { + clobberUse(*DeadOp); Changed = true; - (*DI)->replaceAllUsesWith(UndefValue::get((*DI)->getType())); - DeadInsts.push_back(*DI); - } - for (AllocaPartitioning::dead_op_iterator DO = P.dead_op_begin(), - DE = P.dead_op_end(); - DO != DE; ++DO) { - Value *OldV = **DO; - // Clobber the use with an undef value. - **DO = UndefValue::get(OldV->getType()); - if (Instruction *OldI = dyn_cast(OldV)) - if (isInstructionTriviallyDead(OldI)) { - Changed = true; - DeadInsts.push_back(OldI); - } } - // No partitions to split. Leave the dead alloca for a later pass to clean up. - if (P.begin() == P.end()) + // No slices to split. Leave the dead alloca for a later pass to clean up. + if (AS.begin() == AS.end()) return Changed; - return splitAlloca(AI, P) || Changed; + Changed |= splitAlloca(AI, AS); + + DEBUG(dbgs() << " Speculating PHIs\n"); + while (!SpeculatablePHIs.empty()) + speculatePHINodeLoads(*SpeculatablePHIs.pop_back_val()); + + DEBUG(dbgs() << " Speculating Selects\n"); + while (!SpeculatableSelects.empty()) + speculateSelectInstLoads(*SpeculatableSelects.pop_back_val()); + + return Changed; } /// \brief Delete the dead instructions accumulated in this run. @@ -3458,22 +4249,27 @@ bool SROA::runOnAlloca(AllocaInst &AI) { /// /// We also record the alloca instructions deleted here so that they aren't /// subsequently handed to mem2reg to promote. -void SROA::deleteDeadInstructions(SmallPtrSet &DeletedAllocas) { - DeadSplitInsts.clear(); +void SROA::deleteDeadInstructions( + SmallPtrSetImpl &DeletedAllocas) { while (!DeadInsts.empty()) { Instruction *I = DeadInsts.pop_back_val(); DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n"); - for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) - if (Instruction *U = dyn_cast(*OI)) { + I->replaceAllUsesWith(UndefValue::get(I->getType())); + + for (Use &Operand : I->operands()) + if (Instruction *U = dyn_cast(Operand)) { // Zero out the operand and see if it becomes trivially dead. - *OI = 0; + Operand = nullptr; if (isInstructionTriviallyDead(U)) - DeadInsts.push_back(U); + DeadInsts.insert(U); } - if (AllocaInst *AI = dyn_cast(I)) + if (AllocaInst *AI = dyn_cast(I)) { DeletedAllocas.insert(AI); + if (DbgDeclareInst *DbgDecl = FindAllocaDbgDeclare(AI)) + DbgDecl->eraseFromParent(); + } ++NumDeleted; I->eraseFromParent(); @@ -3484,91 +4280,34 @@ void SROA::deleteDeadInstructions(SmallPtrSet &DeletedAllocas) { /// /// This attempts to promote whatever allocas have been identified as viable in /// the PromotableAllocas list. If that list is empty, there is nothing to do. -/// If there is a domtree available, we attempt to promote using the full power -/// of mem2reg. Otherwise, we build and use the AllocaPromoter above which is -/// based on the SSAUpdater utilities. This function returns whether any -/// promotion occured. +/// This function returns whether any promotion occurred. bool SROA::promoteAllocas(Function &F) { if (PromotableAllocas.empty()) return false; NumPromoted += PromotableAllocas.size(); - if (DT && !ForceSSAUpdater) { - DEBUG(dbgs() << "Promoting allocas with mem2reg...\n"); - PromoteMemToReg(PromotableAllocas, *DT); - PromotableAllocas.clear(); - return true; - } - - DEBUG(dbgs() << "Promoting allocas with SSAUpdater...\n"); - SSAUpdater SSA; - DIBuilder DIB(*F.getParent()); - SmallVector Insts; - - for (unsigned Idx = 0, Size = PromotableAllocas.size(); Idx != Size; ++Idx) { - AllocaInst *AI = PromotableAllocas[Idx]; - for (Value::use_iterator UI = AI->use_begin(), UE = AI->use_end(); - UI != UE;) { - Instruction *I = cast(*UI++); - // FIXME: Currently the SSAUpdater infrastructure doesn't reason about - // lifetime intrinsics and so we strip them (and the bitcasts+GEPs - // leading to them) here. Eventually it should use them to optimize the - // scalar values produced. - if (isa(I) || isa(I)) { - assert(onlyUsedByLifetimeMarkers(I) && - "Found a bitcast used outside of a lifetime marker."); - while (!I->use_empty()) - cast(*I->use_begin())->eraseFromParent(); - I->eraseFromParent(); - continue; - } - if (IntrinsicInst *II = dyn_cast(I)) { - assert(II->getIntrinsicID() == Intrinsic::lifetime_start || - II->getIntrinsicID() == Intrinsic::lifetime_end); - II->eraseFromParent(); - continue; - } - - Insts.push_back(I); - } - AllocaPromoter(Insts, SSA, *AI, DIB).run(Insts); - Insts.clear(); - } - + DEBUG(dbgs() << "Promoting allocas with mem2reg...\n"); + PromoteMemToReg(PromotableAllocas, *DT, nullptr, AC); PromotableAllocas.clear(); return true; } -namespace { - /// \brief A predicate to test whether an alloca belongs to a set. - class IsAllocaInSet { - typedef SmallPtrSet SetType; - const SetType &Set; - - public: - typedef AllocaInst *argument_type; - - IsAllocaInSet(const SetType &Set) : Set(Set) {} - bool operator()(AllocaInst *AI) const { return Set.count(AI); } - }; -} - bool SROA::runOnFunction(Function &F) { + if (skipOptnoneFunction(F)) + return false; + DEBUG(dbgs() << "SROA function: " << F.getName() << "\n"); C = &F.getContext(); - TD = getAnalysisIfAvailable(); - if (!TD) { - DEBUG(dbgs() << " Skipping SROA -- no target data!\n"); - return false; - } - DT = getAnalysisIfAvailable(); + DT = &getAnalysis().getDomTree(); + AC = &getAnalysis().getAssumptionCache(F); BasicBlock &EntryBB = F.getEntryBlock(); - for (BasicBlock::iterator I = EntryBB.begin(), E = llvm::prior(EntryBB.end()); - I != E; ++I) + for (BasicBlock::iterator I = EntryBB.begin(), E = std::prev(EntryBB.end()); + I != E; ++I) { if (AllocaInst *AI = dyn_cast(I)) Worklist.insert(AI); + } bool Changed = false; // A set of deleted alloca instruction pointers which should be removed from @@ -3583,11 +4322,12 @@ bool SROA::runOnFunction(Function &F) { // Remove the deleted allocas from various lists so that we don't try to // continue processing them. if (!DeletedAllocas.empty()) { - Worklist.remove_if(IsAllocaInSet(DeletedAllocas)); - PostPromotionWorklist.remove_if(IsAllocaInSet(DeletedAllocas)); + auto IsInSet = [&](AllocaInst *AI) { return DeletedAllocas.count(AI); }; + Worklist.remove_if(IsInSet); + PostPromotionWorklist.remove_if(IsInSet); PromotableAllocas.erase(std::remove_if(PromotableAllocas.begin(), PromotableAllocas.end(), - IsAllocaInSet(DeletedAllocas)), + IsInSet), PromotableAllocas.end()); DeletedAllocas.clear(); } @@ -3603,7 +4343,7 @@ bool SROA::runOnFunction(Function &F) { } void SROA::getAnalysisUsage(AnalysisUsage &AU) const { - if (RequiresDomTree) - AU.addRequired(); + AU.addRequired(); + AU.addRequired(); AU.setPreservesCFG(); }