X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=blobdiff_plain;f=lib%2FTransforms%2FScalar%2FSROA.cpp;h=1ab7715168213f245a5e5bf77c9172f736914deb;hp=cb6da3b95615415a169b9c122fd99295afe76c71;hb=56e1394c8861ecdc551815ae875d2c3db2fa9cdb;hpb=ac104272d9fc42af8dc6853853b96d489685e5a7 diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp index cb6da3b9561..1ab77151682 100644 --- a/lib/Transforms/Scalar/SROA.cpp +++ b/lib/Transforms/Scalar/SROA.cpp @@ -25,44 +25,48 @@ #define DEBUG_TYPE "sroa" #include "llvm/Transforms/Scalar.h" -#include "llvm/Constants.h" -#include "llvm/DIBuilder.h" -#include "llvm/DebugInfo.h" -#include "llvm/DerivedTypes.h" -#include "llvm/Function.h" -#include "llvm/IRBuilder.h" -#include "llvm/Instructions.h" -#include "llvm/IntrinsicInst.h" -#include "llvm/LLVMContext.h" -#include "llvm/Module.h" -#include "llvm/Operator.h" -#include "llvm/Pass.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" -#include "llvm/ADT/STLExtras.h" -#include "llvm/Analysis/Dominators.h" #include "llvm/Analysis/Loads.h" +#include "llvm/Analysis/PtrUseVisitor.h" #include "llvm/Analysis/ValueTracking.h" +#include "llvm/DIBuilder.h" +#include "llvm/DebugInfo.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Operator.h" +#include "llvm/InstVisitor.h" +#include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" +#include "llvm/Support/Compiler.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/GetElementPtrTypeIterator.h" -#include "llvm/Support/InstVisitor.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/DataLayout.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/PromoteMemToReg.h" #include "llvm/Transforms/Utils/SSAUpdater.h" using namespace llvm; STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement"); -STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced"); -STATISTIC(NumPromoted, "Number of allocas promoted to SSA values"); +STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed"); +STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions per alloca"); +STATISTIC(NumAllocaPartitionUses, "Number of alloca partition uses rewritten"); +STATISTIC(MaxUsesPerAllocaPartition, "Maximum number of uses of a partition"); +STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced"); +STATISTIC(NumPromoted, "Number of allocas promoted to SSA values"); STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion"); -STATISTIC(NumDeleted, "Number of instructions deleted"); -STATISTIC(NumVectorized, "Number of vectorized aggregates"); +STATISTIC(NumDeleted, "Number of instructions deleted"); +STATISTIC(NumVectorized, "Number of vectorized aggregates"); /// Hidden option to force the pass to not use DomTree and mem2reg, instead /// forming SSA values through the SSAUpdater infrastructure. @@ -70,176 +74,144 @@ static cl::opt ForceSSAUpdater("force-ssa-updater", cl::init(false), cl::Hidden); namespace { -/// \brief Alloca partitioning representation. -/// -/// This class represents a partitioning of an alloca into slices, and -/// information about the nature of uses of each slice of the alloca. The goal -/// is that this information is sufficient to decide if and how to split the -/// alloca apart and replace slices with scalars. It is also intended that this -/// structure can capture the relevant information needed both to decide about -/// and to enact these transformations. -class AllocaPartitioning { +/// \brief A custom IRBuilder inserter which prefixes all names if they are +/// preserved. +template +class IRBuilderPrefixedInserter : + public IRBuilderDefaultInserter { + std::string Prefix; + public: - /// \brief A common base class for representing a half-open byte range. - struct ByteRange { - /// \brief The beginning offset of the range. - uint64_t BeginOffset; + void SetNamePrefix(const Twine &P) { Prefix = P.str(); } - /// \brief The ending offset, not included in the range. - uint64_t EndOffset; +protected: + void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB, + BasicBlock::iterator InsertPt) const { + IRBuilderDefaultInserter::InsertHelper( + I, Name.isTriviallyEmpty() ? Name : Prefix + Name, BB, InsertPt); + } +}; - ByteRange() : BeginOffset(), EndOffset() {} - ByteRange(uint64_t BeginOffset, uint64_t EndOffset) - : BeginOffset(BeginOffset), EndOffset(EndOffset) {} +// Specialization for not preserving the name is trivial. +template <> +class IRBuilderPrefixedInserter : + public IRBuilderDefaultInserter { +public: + void SetNamePrefix(const Twine &P) {} +}; - /// \brief Support for ordering ranges. - /// - /// This provides an ordering over ranges such that start offsets are - /// always increasing, and within equal start offsets, the end offsets are - /// decreasing. Thus the spanning range comes first in a cluster with the - /// same start position. - bool operator<(const ByteRange &RHS) const { - if (BeginOffset < RHS.BeginOffset) return true; - if (BeginOffset > RHS.BeginOffset) return false; - if (EndOffset > RHS.EndOffset) return true; - return false; - } +/// \brief Provide a typedef for IRBuilder that drops names in release builds. +#ifndef NDEBUG +typedef llvm::IRBuilder > IRBuilderTy; +#else +typedef llvm::IRBuilder > IRBuilderTy; +#endif +} - /// \brief Support comparison with a single offset to allow binary searches. - friend bool operator<(const ByteRange &LHS, uint64_t RHSOffset) { - return LHS.BeginOffset < RHSOffset; - } +namespace { +/// \brief A used slice of an alloca. +/// +/// This structure represents a slice of an alloca used by some instruction. It +/// stores both the begin and end offsets of this use, a pointer to the use +/// itself, and a flag indicating whether we can classify the use as splittable +/// or not when forming partitions of the alloca. +class Slice { + /// \brief The beginning offset of the range. + uint64_t BeginOffset; - friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset, - const ByteRange &RHS) { - return LHSOffset < RHS.BeginOffset; - } + /// \brief The ending offset, not included in the range. + uint64_t EndOffset; - bool operator==(const ByteRange &RHS) const { - return BeginOffset == RHS.BeginOffset && EndOffset == RHS.EndOffset; - } - bool operator!=(const ByteRange &RHS) const { return !operator==(RHS); } - }; + /// \brief Storage for both the use of this slice and whether it can be + /// split. + PointerIntPair UseAndIsSplittable; - /// \brief A partition of an alloca. - /// - /// This structure represents a contiguous partition of the alloca. These are - /// formed by examining the uses of the alloca. During formation, they may - /// overlap but once an AllocaPartitioning is built, the Partitions within it - /// are all disjoint. - struct Partition : public ByteRange { - /// \brief Whether this partition is splittable into smaller partitions. - /// - /// We flag partitions as splittable when they are formed entirely due to - /// accesses by trivially splittable operations such as memset and memcpy. - /// - /// FIXME: At some point we should consider loads and stores of FCAs to be - /// splittable and eagerly split them into scalar values. - bool IsSplittable; - - /// \brief Test whether a partition has been marked as dead. - bool isDead() const { - if (BeginOffset == UINT64_MAX) { - assert(EndOffset == UINT64_MAX); - return true; - } - return false; - } +public: + Slice() : BeginOffset(), EndOffset() {} + Slice(uint64_t BeginOffset, uint64_t EndOffset, Use *U, bool IsSplittable) + : BeginOffset(BeginOffset), EndOffset(EndOffset), + UseAndIsSplittable(U, IsSplittable) {} - /// \brief Kill a partition. - /// This is accomplished by setting both its beginning and end offset to - /// the maximum possible value. - void kill() { - assert(!isDead() && "He's Dead, Jim!"); - BeginOffset = EndOffset = UINT64_MAX; - } + uint64_t beginOffset() const { return BeginOffset; } + uint64_t endOffset() const { return EndOffset; } - Partition() : ByteRange(), IsSplittable() {} - Partition(uint64_t BeginOffset, uint64_t EndOffset, bool IsSplittable) - : ByteRange(BeginOffset, EndOffset), IsSplittable(IsSplittable) {} - }; + bool isSplittable() const { return UseAndIsSplittable.getInt(); } + void makeUnsplittable() { UseAndIsSplittable.setInt(false); } - /// \brief A particular use of a partition of the alloca. - /// - /// This structure is used to associate uses of a partition with it. They - /// mark the range of bytes which are referenced by a particular instruction, - /// and includes a handle to the user itself and the pointer value in use. - /// The bounds of these uses are determined by intersecting the bounds of the - /// memory use itself with a particular partition. As a consequence there is - /// intentionally overlap between various uses of the same partition. - struct PartitionUse : public ByteRange { - /// \brief The use in question. Provides access to both user and used value. - /// - /// Note that this may be null if the partition use is *dead*, that is, it - /// should be ignored. - Use *U; + Use *getUse() const { return UseAndIsSplittable.getPointer(); } - PartitionUse() : ByteRange(), U() {} - PartitionUse(uint64_t BeginOffset, uint64_t EndOffset, Use *U) - : ByteRange(BeginOffset, EndOffset), U(U) {} - }; + bool isDead() const { return getUse() == 0; } + void kill() { UseAndIsSplittable.setPointer(0); } - /// \brief Construct a partitioning of a particular alloca. + /// \brief Support for ordering ranges. /// - /// Construction does most of the work for partitioning the alloca. This - /// performs the necessary walks of users and builds a partitioning from it. - AllocaPartitioning(const DataLayout &TD, AllocaInst &AI); + /// This provides an ordering over ranges such that start offsets are + /// always increasing, and within equal start offsets, the end offsets are + /// decreasing. Thus the spanning range comes first in a cluster with the + /// same start position. + bool operator<(const Slice &RHS) const { + if (beginOffset() < RHS.beginOffset()) return true; + if (beginOffset() > RHS.beginOffset()) return false; + if (isSplittable() != RHS.isSplittable()) return !isSplittable(); + if (endOffset() > RHS.endOffset()) return true; + return false; + } + + /// \brief Support comparison with a single offset to allow binary searches. + friend LLVM_ATTRIBUTE_UNUSED bool operator<(const Slice &LHS, + uint64_t RHSOffset) { + return LHS.beginOffset() < RHSOffset; + } + friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset, + const Slice &RHS) { + return LHSOffset < RHS.beginOffset(); + } + + bool operator==(const Slice &RHS) const { + return isSplittable() == RHS.isSplittable() && + beginOffset() == RHS.beginOffset() && endOffset() == RHS.endOffset(); + } + bool operator!=(const Slice &RHS) const { return !operator==(RHS); } +}; +} // end anonymous namespace + +namespace llvm { +template struct isPodLike; +template <> struct isPodLike { + static const bool value = true; +}; +} + +namespace { +/// \brief Representation of the alloca slices. +/// +/// This class represents the slices of an alloca which are formed by its +/// various uses. If a pointer escapes, we can't fully build a representation +/// for the slices used and we reflect that in this structure. The uses are +/// stored, sorted by increasing beginning offset and with unsplittable slices +/// starting at a particular offset before splittable slices. +class AllocaSlices { +public: + /// \brief Construct the slices of a particular alloca. + AllocaSlices(const DataLayout &DL, AllocaInst &AI); /// \brief Test whether a pointer to the allocation escapes our analysis. /// - /// If this is true, the partitioning is never fully built and should be + /// If this is true, the slices are never fully built and should be /// ignored. bool isEscaped() const { return PointerEscapingInstr; } - /// \brief Support for iterating over the partitions. - /// @{ - typedef SmallVectorImpl::iterator iterator; - iterator begin() { return Partitions.begin(); } - iterator end() { return Partitions.end(); } - - typedef SmallVectorImpl::const_iterator const_iterator; - const_iterator begin() const { return Partitions.begin(); } - const_iterator end() const { return Partitions.end(); } - /// @} - - /// \brief Support for iterating over and manipulating a particular - /// partition's uses. - /// - /// The iteration support provided for uses is more limited, but also - /// includes some manipulation routines to support rewriting the uses of - /// partitions during SROA. + /// \brief Support for iterating over the slices. /// @{ - typedef SmallVectorImpl::iterator use_iterator; - use_iterator use_begin(unsigned Idx) { return Uses[Idx].begin(); } - use_iterator use_begin(const_iterator I) { return Uses[I - begin()].begin(); } - use_iterator use_end(unsigned Idx) { return Uses[Idx].end(); } - use_iterator use_end(const_iterator I) { return Uses[I - begin()].end(); } + typedef SmallVectorImpl::iterator iterator; + iterator begin() { return Slices.begin(); } + iterator end() { return Slices.end(); } - typedef SmallVectorImpl::const_iterator const_use_iterator; - const_use_iterator use_begin(unsigned Idx) const { return Uses[Idx].begin(); } - const_use_iterator use_begin(const_iterator I) const { - return Uses[I - begin()].begin(); - } - const_use_iterator use_end(unsigned Idx) const { return Uses[Idx].end(); } - const_use_iterator use_end(const_iterator I) const { - return Uses[I - begin()].end(); - } - - unsigned use_size(unsigned Idx) const { return Uses[Idx].size(); } - unsigned use_size(const_iterator I) const { return Uses[I - begin()].size(); } - const PartitionUse &getUse(unsigned PIdx, unsigned UIdx) const { - return Uses[PIdx][UIdx]; - } - const PartitionUse &getUse(const_iterator I, unsigned UIdx) const { - return Uses[I - begin()][UIdx]; - } - - void use_push_back(unsigned Idx, const PartitionUse &PU) { - Uses[Idx].push_back(PU); - } - void use_push_back(const_iterator I, const PartitionUse &PU) { - Uses[I - begin()].push_back(PU); - } + typedef SmallVectorImpl::const_iterator const_iterator; + const_iterator begin() const { return Slices.begin(); } + const_iterator end() const { return Slices.end(); } /// @} /// \brief Allow iterating the dead users for this alloca. @@ -265,114 +237,49 @@ public: dead_op_iterator dead_op_end() const { return DeadOperands.end(); } /// @} - /// \brief MemTransferInst auxiliary data. - /// This struct provides some auxiliary data about memory transfer - /// intrinsics such as memcpy and memmove. These intrinsics can use two - /// different ranges within the same alloca, and provide other challenges to - /// correctly represent. We stash extra data to help us untangle this - /// after the partitioning is complete. - struct MemTransferOffsets { - /// The destination begin and end offsets when the destination is within - /// this alloca. If the end offset is zero the destination is not within - /// this alloca. - uint64_t DestBegin, DestEnd; - - /// The source begin and end offsets when the source is within this alloca. - /// If the end offset is zero, the source is not within this alloca. - uint64_t SourceBegin, SourceEnd; - - /// Flag for whether an alloca is splittable. - bool IsSplittable; - }; - MemTransferOffsets getMemTransferOffsets(MemTransferInst &II) const { - return MemTransferInstData.lookup(&II); - } - - /// \brief Map from a PHI or select operand back to a partition. - /// - /// When manipulating PHI nodes or selects, they can use more than one - /// partition of an alloca. We store a special mapping to allow finding the - /// partition referenced by each of these operands, if any. - iterator findPartitionForPHIOrSelectOperand(Use *U) { - SmallDenseMap >::const_iterator MapIt - = PHIOrSelectOpMap.find(U); - if (MapIt == PHIOrSelectOpMap.end()) - return end(); - - return begin() + MapIt->second.first; - } - - /// \brief Map from a PHI or select operand back to the specific use of - /// a partition. - /// - /// Similar to mapping these operands back to the partitions, this maps - /// directly to the use structure of that partition. - use_iterator findPartitionUseForPHIOrSelectOperand(Use *U) { - SmallDenseMap >::const_iterator MapIt - = PHIOrSelectOpMap.find(U); - assert(MapIt != PHIOrSelectOpMap.end()); - return Uses[MapIt->second.first].begin() + MapIt->second.second; - } - - /// \brief Compute a common type among the uses of a particular partition. - /// - /// This routines walks all of the uses of a particular partition and tries - /// to find a common type between them. Untyped operations such as memset and - /// memcpy are ignored. - Type *getCommonType(iterator I) const; - #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void print(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const; - void printUsers(raw_ostream &OS, const_iterator I, + void printSlice(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const; + void printUse(raw_ostream &OS, const_iterator I, + StringRef Indent = " ") const; void print(raw_ostream &OS) const; - void LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED dump(const_iterator I) const; - void LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED dump() const; + void dump(const_iterator I) const; + void dump() const; #endif private: template class BuilderBase; - class PartitionBuilder; - friend class AllocaPartitioning::PartitionBuilder; - class UseBuilder; - friend class AllocaPartitioning::UseBuilder; + class SliceBuilder; + friend class AllocaSlices::SliceBuilder; -#ifndef NDEBUG +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) /// \brief Handle to alloca instruction to simplify method interfaces. AllocaInst &AI; #endif - /// \brief The instruction responsible for this alloca having no partitioning. + /// \brief The instruction responsible for this alloca not having a known set + /// of slices. /// /// When an instruction (potentially) escapes the pointer to the alloca, we - /// store a pointer to that here and abort trying to partition the alloca. - /// This will be null if the alloca is partitioned successfully. + /// store a pointer to that here and abort trying to form slices of the + /// alloca. This will be null if the alloca slices are analyzed successfully. Instruction *PointerEscapingInstr; - /// \brief The partitions of the alloca. - /// - /// We store a vector of the partitions over the alloca here. This vector is - /// sorted by increasing begin offset, and then by decreasing end offset. See - /// the Partition inner class for more details. Initially (during - /// construction) there are overlaps, but we form a disjoint sequence of - /// partitions while finishing construction and a fully constructed object is - /// expected to always have this as a disjoint space. - SmallVector Partitions; - - /// \brief The uses of the partitions. + /// \brief The slices of the alloca. /// - /// This is essentially a mapping from each partition to a list of uses of - /// that partition. The mapping is done with a Uses vector that has the exact - /// same number of entries as the partition vector. Each entry is itself - /// a vector of the uses. - SmallVector, 8> Uses; + /// We store a vector of the slices formed by uses of the alloca here. This + /// vector is sorted by increasing begin offset, and then the unsplittable + /// slices before the splittable ones. See the Slice inner class for more + /// details. + SmallVector Slices; /// \brief Instructions which will become dead if we rewrite the alloca. /// - /// Note that these are not separated by partition. This is because we expect - /// a partitioned alloca to be completely rewritten or not rewritten at all. - /// If rewritten, all these instructions can simply be removed and replaced - /// with undef as they come from outside of the allocated space. + /// Note that these are not separated by slice. This is because we expect an + /// alloca to be completely rewritten or not rewritten at all. If rewritten, + /// all these instructions can simply be removed and replaced with undef as + /// they come from outside of the allocated space. SmallVector DeadUsers; /// \brief Operands which will become dead if we rewrite the alloca. @@ -384,356 +291,250 @@ private: /// want to swap this particular input for undef to simplify the use lists of /// the alloca. SmallVector DeadOperands; - - /// \brief The underlying storage for auxiliary memcpy and memset info. - SmallDenseMap MemTransferInstData; - - /// \brief A side datastructure used when building up the partitions and uses. - /// - /// This mapping is only really used during the initial building of the - /// partitioning so that we can retain information about PHI and select nodes - /// processed. - SmallDenseMap > PHIOrSelectSizes; - - /// \brief Auxiliary information for particular PHI or select operands. - SmallDenseMap, 4> PHIOrSelectOpMap; - - /// \brief A utility routine called from the constructor. - /// - /// This does what it says on the tin. It is the key of the alloca partition - /// splitting and merging. After it is called we have the desired disjoint - /// collection of partitions. - void splitAndMergePartitions(); }; } -template -class AllocaPartitioning::BuilderBase - : public InstVisitor { -public: - BuilderBase(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P) - : TD(TD), - AllocSize(TD.getTypeAllocSize(AI.getAllocatedType())), - P(P) { - enqueueUsers(AI, 0); - } - -protected: - const DataLayout &TD; - const uint64_t AllocSize; - AllocaPartitioning &P; - - SmallPtrSet VisitedUses; - - struct OffsetUse { - Use *U; - int64_t Offset; - }; - SmallVector Queue; - - // The active offset and use while visiting. - Use *U; - int64_t Offset; - - void enqueueUsers(Instruction &I, int64_t UserOffset) { - for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); - UI != UE; ++UI) { - if (VisitedUses.insert(&UI.getUse())) { - OffsetUse OU = { &UI.getUse(), UserOffset }; - Queue.push_back(OU); - } - } - } - - bool computeConstantGEPOffset(GetElementPtrInst &GEPI, int64_t &GEPOffset) { - GEPOffset = Offset; - for (gep_type_iterator GTI = gep_type_begin(GEPI), GTE = gep_type_end(GEPI); - GTI != GTE; ++GTI) { - ConstantInt *OpC = dyn_cast(GTI.getOperand()); - if (!OpC) - return false; - if (OpC->isZero()) - continue; - - // Handle a struct index, which adds its field offset to the pointer. - if (StructType *STy = dyn_cast(*GTI)) { - unsigned ElementIdx = OpC->getZExtValue(); - const StructLayout *SL = TD.getStructLayout(STy); - uint64_t ElementOffset = SL->getElementOffset(ElementIdx); - // Check that we can continue to model this GEP in a signed 64-bit offset. - if (ElementOffset > INT64_MAX || - (GEPOffset >= 0 && - ((uint64_t)GEPOffset + ElementOffset) > INT64_MAX)) { - DEBUG(dbgs() << "WARNING: Encountered a cumulative offset exceeding " - << "what can be represented in an int64_t!\n" - << " alloca: " << P.AI << "\n"); - return false; - } - if (GEPOffset < 0) - GEPOffset = ElementOffset + (uint64_t)-GEPOffset; - else - GEPOffset += ElementOffset; - continue; - } +static Value *foldSelectInst(SelectInst &SI) { + // If the condition being selected on is a constant or the same value is + // being selected between, fold the select. Yes this does (rarely) happen + // early on. + if (ConstantInt *CI = dyn_cast(SI.getCondition())) + return SI.getOperand(1+CI->isZero()); + if (SI.getOperand(1) == SI.getOperand(2)) + return SI.getOperand(1); - APInt Index = OpC->getValue().sextOrTrunc(TD.getPointerSizeInBits()); - Index *= APInt(Index.getBitWidth(), - TD.getTypeAllocSize(GTI.getIndexedType())); - Index += APInt(Index.getBitWidth(), (uint64_t)GEPOffset, - /*isSigned*/true); - // Check if the result can be stored in our int64_t offset. - if (!Index.isSignedIntN(sizeof(GEPOffset) * 8)) { - DEBUG(dbgs() << "WARNING: Encountered a cumulative offset exceeding " - << "what can be represented in an int64_t!\n" - << " alloca: " << P.AI << "\n"); - return false; - } + return 0; +} - GEPOffset = Index.getSExtValue(); - } - return true; - } +/// \brief Builder for the alloca slices. +/// +/// This class builds a set of alloca slices by recursively visiting the uses +/// of an alloca and making a slice for each load and store at each offset. +class AllocaSlices::SliceBuilder : public PtrUseVisitor { + friend class PtrUseVisitor; + friend class InstVisitor; + typedef PtrUseVisitor Base; - Value *foldSelectInst(SelectInst &SI) { - // If the condition being selected on is a constant or the same value is - // being selected between, fold the select. Yes this does (rarely) happen - // early on. - if (ConstantInt *CI = dyn_cast(SI.getCondition())) - return SI.getOperand(1+CI->isZero()); - if (SI.getOperand(1) == SI.getOperand(2)) { - assert(*U == SI.getOperand(1)); - return SI.getOperand(1); - } - return 0; - } -}; + const uint64_t AllocSize; + AllocaSlices &S; -/// \brief Builder for the alloca partitioning. -/// -/// This class builds an alloca partitioning by recursively visiting the uses -/// of an alloca and splitting the partitions for each load and store at each -/// offset. -class AllocaPartitioning::PartitionBuilder - : public BuilderBase { - friend class InstVisitor; + SmallDenseMap MemTransferSliceMap; + SmallDenseMap PHIOrSelectSizes; - SmallDenseMap MemTransferPartitionMap; + /// \brief Set to de-duplicate dead instructions found in the use walk. + SmallPtrSet VisitedDeadInsts; public: - PartitionBuilder(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P) - : BuilderBase(TD, AI, P) {} - - /// \brief Run the builder over the allocation. - bool operator()() { - // Note that we have to re-evaluate size on each trip through the loop as - // the queue grows at the tail. - for (unsigned Idx = 0; Idx < Queue.size(); ++Idx) { - U = Queue[Idx].U; - Offset = Queue[Idx].Offset; - if (!visit(cast(U->getUser()))) - return false; - } - return true; - } + SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &S) + : PtrUseVisitor(DL), + AllocSize(DL.getTypeAllocSize(AI.getAllocatedType())), S(S) {} private: - bool markAsEscaping(Instruction &I) { - P.PointerEscapingInstr = &I; - return false; + void markAsDead(Instruction &I) { + if (VisitedDeadInsts.insert(&I)) + S.DeadUsers.push_back(&I); } - void insertUse(Instruction &I, int64_t Offset, uint64_t Size, + void insertUse(Instruction &I, const APInt &Offset, uint64_t Size, bool IsSplittable = false) { - // Completely skip uses which have a zero size or don't overlap the - // allocation. - if (Size == 0 || - (Offset >= 0 && (uint64_t)Offset >= AllocSize) || - (Offset < 0 && (uint64_t)-Offset >= Size)) { + // Completely skip uses which have a zero size or start either before or + // past the end of the allocation. + if (Size == 0 || Offset.isNegative() || Offset.uge(AllocSize)) { DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset - << " which starts past the end of the " << AllocSize - << " byte alloca:\n" - << " alloca: " << P.AI << "\n" - << " use: " << I << "\n"); - return; - } - - // Clamp the start to the beginning of the allocation. - if (Offset < 0) { - DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset - << " to start at the beginning of the alloca:\n" - << " alloca: " << P.AI << "\n" + << " which has zero size or starts outside of the " + << AllocSize << " byte alloca:\n" + << " alloca: " << S.AI << "\n" << " use: " << I << "\n"); - Size -= (uint64_t)-Offset; - Offset = 0; + return markAsDead(I); } - uint64_t BeginOffset = Offset, EndOffset = BeginOffset + Size; + uint64_t BeginOffset = Offset.getZExtValue(); + uint64_t EndOffset = BeginOffset + Size; // Clamp the end offset to the end of the allocation. Note that this is // formulated to handle even the case where "BeginOffset + Size" overflows. + // This may appear superficially to be something we could ignore entirely, + // but that is not so! There may be widened loads or PHI-node uses where + // some instructions are dead but not others. We can't completely ignore + // them, and so have to record at least the information here. assert(AllocSize >= BeginOffset); // Established above. if (Size > AllocSize - BeginOffset) { DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset << " to remain within the " << AllocSize << " byte alloca:\n" - << " alloca: " << P.AI << "\n" + << " alloca: " << S.AI << "\n" << " use: " << I << "\n"); EndOffset = AllocSize; } - Partition New(BeginOffset, EndOffset, IsSplittable); - P.Partitions.push_back(New); + S.Slices.push_back(Slice(BeginOffset, EndOffset, U, IsSplittable)); } - bool handleLoadOrStore(Type *Ty, Instruction &I, int64_t Offset) { - uint64_t Size = TD.getTypeStoreSize(Ty); - - // If this memory access can be shown to *statically* extend outside the - // bounds of of the allocation, it's behavior is undefined, so simply - // ignore it. Note that this is more strict than the generic clamping - // behavior of insertUse. We also try to handle cases which might run the - // risk of overflow. - // FIXME: We should instead consider the pointer to have escaped if this - // function is being instrumented for addressing bugs or race conditions. - if (Offset < 0 || (uint64_t)Offset >= AllocSize || - Size > (AllocSize - (uint64_t)Offset)) { - DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte " - << (isa(I) ? "load" : "store") << " @" << Offset - << " which extends past the end of the " << AllocSize - << " byte alloca:\n" - << " alloca: " << P.AI << "\n" - << " use: " << I << "\n"); - return true; - } + void visitBitCastInst(BitCastInst &BC) { + if (BC.use_empty()) + return markAsDead(BC); - insertUse(I, Offset, Size); - return true; + return Base::visitBitCastInst(BC); } - bool visitBitCastInst(BitCastInst &BC) { - enqueueUsers(BC, Offset); - return true; + void visitGetElementPtrInst(GetElementPtrInst &GEPI) { + if (GEPI.use_empty()) + return markAsDead(GEPI); + + return Base::visitGetElementPtrInst(GEPI); } - bool visitGetElementPtrInst(GetElementPtrInst &GEPI) { - int64_t GEPOffset; - if (!computeConstantGEPOffset(GEPI, GEPOffset)) - return markAsEscaping(GEPI); + void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset, + uint64_t Size, bool IsVolatile) { + // We allow splitting of loads and stores where the type is an integer type + // and cover the entire alloca. This prevents us from splitting over + // eagerly. + // FIXME: In the great blue eventually, we should eagerly split all integer + // loads and stores, and then have a separate step that merges adjacent + // alloca partitions into a single partition suitable for integer widening. + // Or we should skip the merge step and rely on GVN and other passes to + // merge adjacent loads and stores that survive mem2reg. + bool IsSplittable = + Ty->isIntegerTy() && !IsVolatile && Offset == 0 && Size >= AllocSize; - enqueueUsers(GEPI, GEPOffset); - return true; + insertUse(I, Offset, Size, IsSplittable); } - bool visitLoadInst(LoadInst &LI) { + void visitLoadInst(LoadInst &LI) { assert((!LI.isSimple() || LI.getType()->isSingleValueType()) && "All simple FCA loads should have been pre-split"); - return handleLoadOrStore(LI.getType(), LI, Offset); + + if (!IsOffsetKnown) + return PI.setAborted(&LI); + + uint64_t Size = DL.getTypeStoreSize(LI.getType()); + return handleLoadOrStore(LI.getType(), LI, Offset, Size, LI.isVolatile()); } - bool visitStoreInst(StoreInst &SI) { + void visitStoreInst(StoreInst &SI) { Value *ValOp = SI.getValueOperand(); if (ValOp == *U) - return markAsEscaping(SI); + return PI.setEscapedAndAborted(&SI); + if (!IsOffsetKnown) + return PI.setAborted(&SI); + + uint64_t Size = DL.getTypeStoreSize(ValOp->getType()); + + // If this memory access can be shown to *statically* extend outside the + // bounds of of the allocation, it's behavior is undefined, so simply + // ignore it. Note that this is more strict than the generic clamping + // behavior of insertUse. We also try to handle cases which might run the + // risk of overflow. + // FIXME: We should instead consider the pointer to have escaped if this + // function is being instrumented for addressing bugs or race conditions. + if (Offset.isNegative() || Size > AllocSize || + Offset.ugt(AllocSize - Size)) { + DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @" << Offset + << " which extends past the end of the " << AllocSize + << " byte alloca:\n" + << " alloca: " << S.AI << "\n" + << " use: " << SI << "\n"); + return markAsDead(SI); + } assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) && "All simple FCA stores should have been pre-split"); - return handleLoadOrStore(ValOp->getType(), SI, Offset); + handleLoadOrStore(ValOp->getType(), SI, Offset, Size, SI.isVolatile()); } - bool visitMemSetInst(MemSetInst &II) { + void visitMemSetInst(MemSetInst &II) { assert(II.getRawDest() == *U && "Pointer use is not the destination?"); ConstantInt *Length = dyn_cast(II.getLength()); - uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset; - insertUse(II, Offset, Size, Length); - return true; + if ((Length && Length->getValue() == 0) || + (IsOffsetKnown && !Offset.isNegative() && Offset.uge(AllocSize))) + // Zero-length mem transfer intrinsics can be ignored entirely. + return markAsDead(II); + + if (!IsOffsetKnown) + return PI.setAborted(&II); + + insertUse(II, Offset, + Length ? Length->getLimitedValue() + : AllocSize - Offset.getLimitedValue(), + (bool)Length); } - bool visitMemTransferInst(MemTransferInst &II) { + void visitMemTransferInst(MemTransferInst &II) { ConstantInt *Length = dyn_cast(II.getLength()); - uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset; - if (!Size) + if ((Length && Length->getValue() == 0) || + (IsOffsetKnown && !Offset.isNegative() && Offset.uge(AllocSize))) // Zero-length mem transfer intrinsics can be ignored entirely. - return true; + return markAsDead(II); - MemTransferOffsets &Offsets = P.MemTransferInstData[&II]; + if (!IsOffsetKnown) + return PI.setAborted(&II); - // Only intrinsics with a constant length can be split. - Offsets.IsSplittable = Length; + uint64_t RawOffset = Offset.getLimitedValue(); + uint64_t Size = Length ? Length->getLimitedValue() + : AllocSize - RawOffset; - if (*U == II.getRawDest()) { - Offsets.DestBegin = Offset; - Offsets.DestEnd = Offset + Size; - } - if (*U == II.getRawSource()) { - Offsets.SourceBegin = Offset; - Offsets.SourceEnd = Offset + Size; + // Check for the special case where the same exact value is used for both + // source and dest. + if (*U == II.getRawDest() && *U == II.getRawSource()) { + // For non-volatile transfers this is a no-op. + if (!II.isVolatile()) + return markAsDead(II); + + return insertUse(II, Offset, Size, /*IsSplittable=*/false); } - // If we have set up end offsets for both the source and the destination, - // we have found both sides of this transfer pointing at the same alloca. - bool SeenBothEnds = Offsets.SourceEnd && Offsets.DestEnd; - if (SeenBothEnds && II.getRawDest() != II.getRawSource()) { - unsigned PrevIdx = MemTransferPartitionMap[&II]; + // If we have seen both source and destination for a mem transfer, then + // they both point to the same alloca. + bool Inserted; + SmallDenseMap::iterator MTPI; + llvm::tie(MTPI, Inserted) = + MemTransferSliceMap.insert(std::make_pair(&II, S.Slices.size())); + unsigned PrevIdx = MTPI->second; + if (!Inserted) { + Slice &PrevP = S.Slices[PrevIdx]; // Check if the begin offsets match and this is a non-volatile transfer. // In that case, we can completely elide the transfer. - if (!II.isVolatile() && Offsets.SourceBegin == Offsets.DestBegin) { - P.Partitions[PrevIdx].kill(); - return true; + if (!II.isVolatile() && PrevP.beginOffset() == RawOffset) { + PrevP.kill(); + return markAsDead(II); } // Otherwise we have an offset transfer within the same alloca. We can't // split those. - P.Partitions[PrevIdx].IsSplittable = Offsets.IsSplittable = false; - } else if (SeenBothEnds) { - // Handle the case where this exact use provides both ends of the - // operation. - assert(II.getRawDest() == II.getRawSource()); - - // For non-volatile transfers this is a no-op. - if (!II.isVolatile()) - return true; - - // Otherwise just suppress splitting. - Offsets.IsSplittable = false; + PrevP.makeUnsplittable(); } - // Insert the use now that we've fixed up the splittable nature. - insertUse(II, Offset, Size, Offsets.IsSplittable); - - // Setup the mapping from intrinsic to partition of we've not seen both - // ends of this transfer. - if (!SeenBothEnds) { - unsigned NewIdx = P.Partitions.size() - 1; - bool Inserted - = MemTransferPartitionMap.insert(std::make_pair(&II, NewIdx)).second; - assert(Inserted && - "Already have intrinsic in map but haven't seen both ends"); - (void)Inserted; - } + insertUse(II, Offset, Size, /*IsSplittable=*/Inserted && Length); - return true; + // Check that we ended up with a valid index in the map. + assert(S.Slices[PrevIdx].getUse()->getUser() == &II && + "Map index doesn't point back to a slice with this user."); } // Disable SRoA for any intrinsics except for lifetime invariants. - // FIXME: What about debug instrinsics? This matches old behavior, but + // FIXME: What about debug intrinsics? This matches old behavior, but // doesn't make sense. - bool visitIntrinsicInst(IntrinsicInst &II) { + void visitIntrinsicInst(IntrinsicInst &II) { + if (!IsOffsetKnown) + return PI.setAborted(&II); + if (II.getIntrinsicID() == Intrinsic::lifetime_start || II.getIntrinsicID() == Intrinsic::lifetime_end) { ConstantInt *Length = cast(II.getArgOperand(0)); - uint64_t Size = std::min(AllocSize - Offset, Length->getLimitedValue()); + uint64_t Size = std::min(AllocSize - Offset.getLimitedValue(), + Length->getLimitedValue()); insertUse(II, Offset, Size, true); - return true; + return; } - return markAsEscaping(II); + Base::visitIntrinsicInst(II); } Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) { // We consider any PHI or select that results in a direct load or store of - // the same offset to be a viable use for partitioning purposes. These uses + // the same offset to be a viable use for slicing purposes. These uses // are considered unsplittable and the size is the maximum loaded or stored // size. SmallPtrSet Visited; @@ -748,498 +549,176 @@ private: llvm::tie(UsedI, I) = Uses.pop_back_val(); if (LoadInst *LI = dyn_cast(I)) { - Size = std::max(Size, TD.getTypeStoreSize(LI->getType())); + Size = std::max(Size, DL.getTypeStoreSize(LI->getType())); continue; } if (StoreInst *SI = dyn_cast(I)) { Value *Op = SI->getOperand(0); if (Op == UsedI) return SI; - Size = std::max(Size, TD.getTypeStoreSize(Op->getType())); + Size = std::max(Size, DL.getTypeStoreSize(Op->getType())); continue; } if (GetElementPtrInst *GEP = dyn_cast(I)) { if (!GEP->hasAllZeroIndices()) - return GEP; - } else if (!isa(I) && !isa(I) && - !isa(I)) { - return I; - } - - for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); UI != UE; - ++UI) - if (Visited.insert(cast(*UI))) - Uses.push_back(std::make_pair(I, cast(*UI))); - } while (!Uses.empty()); - - return 0; - } - - bool visitPHINode(PHINode &PN) { - // See if we already have computed info on this node. - std::pair &PHIInfo = P.PHIOrSelectSizes[&PN]; - if (PHIInfo.first) { - PHIInfo.second = true; - insertUse(PN, Offset, PHIInfo.first); - return true; - } - - // Check for an unsafe use of the PHI node. - if (Instruction *EscapingI = hasUnsafePHIOrSelectUse(&PN, PHIInfo.first)) - return markAsEscaping(*EscapingI); - - insertUse(PN, Offset, PHIInfo.first); - return true; - } - - bool visitSelectInst(SelectInst &SI) { - if (Value *Result = foldSelectInst(SI)) { - if (Result == *U) - // If the result of the constant fold will be the pointer, recurse - // through the select as if we had RAUW'ed it. - enqueueUsers(SI, Offset); - - return true; - } - - // See if we already have computed info on this node. - std::pair &SelectInfo = P.PHIOrSelectSizes[&SI]; - if (SelectInfo.first) { - SelectInfo.second = true; - insertUse(SI, Offset, SelectInfo.first); - return true; - } - - // Check for an unsafe use of the PHI node. - if (Instruction *EscapingI = hasUnsafePHIOrSelectUse(&SI, SelectInfo.first)) - return markAsEscaping(*EscapingI); - - insertUse(SI, Offset, SelectInfo.first); - return true; - } - - /// \brief Disable SROA entirely if there are unhandled users of the alloca. - bool visitInstruction(Instruction &I) { return markAsEscaping(I); } -}; - - -/// \brief Use adder for the alloca partitioning. -/// -/// This class adds the uses of an alloca to all of the partitions which they -/// use. For splittable partitions, this can end up doing essentially a linear -/// walk of the partitions, but the number of steps remains bounded by the -/// total result instruction size: -/// - The number of partitions is a result of the number unsplittable -/// instructions using the alloca. -/// - The number of users of each partition is at worst the total number of -/// splittable instructions using the alloca. -/// Thus we will produce N * M instructions in the end, where N are the number -/// of unsplittable uses and M are the number of splittable. This visitor does -/// the exact same number of updates to the partitioning. -/// -/// In the more common case, this visitor will leverage the fact that the -/// partition space is pre-sorted, and do a logarithmic search for the -/// partition needed, making the total visit a classical ((N + M) * log(N)) -/// complexity operation. -class AllocaPartitioning::UseBuilder : public BuilderBase { - friend class InstVisitor; - - /// \brief Set to de-duplicate dead instructions found in the use walk. - SmallPtrSet VisitedDeadInsts; - -public: - UseBuilder(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P) - : BuilderBase(TD, AI, P) {} - - /// \brief Run the builder over the allocation. - void operator()() { - // Note that we have to re-evaluate size on each trip through the loop as - // the queue grows at the tail. - for (unsigned Idx = 0; Idx < Queue.size(); ++Idx) { - U = Queue[Idx].U; - Offset = Queue[Idx].Offset; - this->visit(cast(U->getUser())); - } - } - -private: - void markAsDead(Instruction &I) { - if (VisitedDeadInsts.insert(&I)) - P.DeadUsers.push_back(&I); - } - - void insertUse(Instruction &User, int64_t Offset, uint64_t Size) { - // If the use has a zero size or extends outside of the allocation, record - // it as a dead use for elimination later. - if (Size == 0 || (uint64_t)Offset >= AllocSize || - (Offset < 0 && (uint64_t)-Offset >= Size)) - return markAsDead(User); - - // Clamp the start to the beginning of the allocation. - if (Offset < 0) { - Size -= (uint64_t)-Offset; - Offset = 0; - } - - uint64_t BeginOffset = Offset, EndOffset = BeginOffset + Size; - - // Clamp the end offset to the end of the allocation. Note that this is - // formulated to handle even the case where "BeginOffset + Size" overflows. - assert(AllocSize >= BeginOffset); // Established above. - if (Size > AllocSize - BeginOffset) - EndOffset = AllocSize; - - // NB: This only works if we have zero overlapping partitions. - iterator B = std::lower_bound(P.begin(), P.end(), BeginOffset); - if (B != P.begin() && llvm::prior(B)->EndOffset > BeginOffset) - B = llvm::prior(B); - for (iterator I = B, E = P.end(); I != E && I->BeginOffset < EndOffset; - ++I) { - PartitionUse NewPU(std::max(I->BeginOffset, BeginOffset), - std::min(I->EndOffset, EndOffset), U); - P.use_push_back(I, NewPU); - if (isa(U->getUser()) || isa(U->getUser())) - P.PHIOrSelectOpMap[U] - = std::make_pair(I - P.begin(), P.Uses[I - P.begin()].size() - 1); - } - } - - void handleLoadOrStore(Type *Ty, Instruction &I, int64_t Offset) { - uint64_t Size = TD.getTypeStoreSize(Ty); - - // If this memory access can be shown to *statically* extend outside the - // bounds of of the allocation, it's behavior is undefined, so simply - // ignore it. Note that this is more strict than the generic clamping - // behavior of insertUse. - if (Offset < 0 || (uint64_t)Offset >= AllocSize || - Size > (AllocSize - (uint64_t)Offset)) - return markAsDead(I); - - insertUse(I, Offset, Size); - } - - void visitBitCastInst(BitCastInst &BC) { - if (BC.use_empty()) - return markAsDead(BC); - - enqueueUsers(BC, Offset); - } - - void visitGetElementPtrInst(GetElementPtrInst &GEPI) { - if (GEPI.use_empty()) - return markAsDead(GEPI); - - int64_t GEPOffset; - if (!computeConstantGEPOffset(GEPI, GEPOffset)) - llvm_unreachable("Unable to compute constant offset for use"); - - enqueueUsers(GEPI, GEPOffset); - } - - void visitLoadInst(LoadInst &LI) { - handleLoadOrStore(LI.getType(), LI, Offset); - } - - void visitStoreInst(StoreInst &SI) { - handleLoadOrStore(SI.getOperand(0)->getType(), SI, Offset); - } - - void visitMemSetInst(MemSetInst &II) { - ConstantInt *Length = dyn_cast(II.getLength()); - uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset; - insertUse(II, Offset, Size); - } - - void visitMemTransferInst(MemTransferInst &II) { - ConstantInt *Length = dyn_cast(II.getLength()); - uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset; - if (!Size) - return markAsDead(II); + return GEP; + } else if (!isa(I) && !isa(I) && + !isa(I)) { + return I; + } - MemTransferOffsets &Offsets = P.MemTransferInstData[&II]; - if (!II.isVolatile() && Offsets.DestEnd && Offsets.SourceEnd && - Offsets.DestBegin == Offsets.SourceBegin) - return markAsDead(II); // Skip identity transfers without side-effects. + for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); UI != UE; + ++UI) + if (Visited.insert(cast(*UI))) + Uses.push_back(std::make_pair(I, cast(*UI))); + } while (!Uses.empty()); - insertUse(II, Offset, Size); + return 0; } - void visitIntrinsicInst(IntrinsicInst &II) { - assert(II.getIntrinsicID() == Intrinsic::lifetime_start || - II.getIntrinsicID() == Intrinsic::lifetime_end); - - ConstantInt *Length = cast(II.getArgOperand(0)); - insertUse(II, Offset, - std::min(AllocSize - Offset, Length->getLimitedValue())); - } + void visitPHINode(PHINode &PN) { + if (PN.use_empty()) + return markAsDead(PN); + if (!IsOffsetKnown) + return PI.setAborted(&PN); - void insertPHIOrSelect(Instruction &User, uint64_t Offset) { - uint64_t Size = P.PHIOrSelectSizes.lookup(&User).first; + // See if we already have computed info on this node. + uint64_t &PHISize = PHIOrSelectSizes[&PN]; + if (!PHISize) { + // This is a new PHI node, check for an unsafe use of the PHI node. + if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&PN, PHISize)) + return PI.setAborted(UnsafeI); + } // For PHI and select operands outside the alloca, we can't nuke the entire // phi or select -- the other side might still be relevant, so we special // case them here and use a separate structure to track the operands // themselves which should be replaced with undef. - if (Offset >= AllocSize) { - P.DeadOperands.push_back(U); + // FIXME: This should instead be escaped in the event we're instrumenting + // for address sanitization. + if ((Offset.isNegative() && (-Offset).uge(PHISize)) || + (!Offset.isNegative() && Offset.uge(AllocSize))) { + S.DeadOperands.push_back(U); return; } - insertUse(User, Offset, Size); + insertUse(PN, Offset, PHISize); } - void visitPHINode(PHINode &PN) { - if (PN.use_empty()) - return markAsDead(PN); - insertPHIOrSelect(PN, Offset); - } void visitSelectInst(SelectInst &SI) { if (SI.use_empty()) return markAsDead(SI); - if (Value *Result = foldSelectInst(SI)) { if (Result == *U) // If the result of the constant fold will be the pointer, recurse // through the select as if we had RAUW'ed it. - enqueueUsers(SI, Offset); + enqueueUsers(SI); else // Otherwise the operand to the select is dead, and we can replace it // with undef. - P.DeadOperands.push_back(U); + S.DeadOperands.push_back(U); return; } + if (!IsOffsetKnown) + return PI.setAborted(&SI); - insertPHIOrSelect(SI, Offset); - } - - /// \brief Unreachable, we've already visited the alloca once. - void visitInstruction(Instruction &I) { - llvm_unreachable("Unhandled instruction in use builder."); - } -}; - -void AllocaPartitioning::splitAndMergePartitions() { - size_t NumDeadPartitions = 0; - - // Track the range of splittable partitions that we pass when accumulating - // overlapping unsplittable partitions. - uint64_t SplitEndOffset = 0ull; - - Partition New(0ull, 0ull, false); - - for (unsigned i = 0, j = i, e = Partitions.size(); i != e; i = j) { - ++j; - - if (!Partitions[i].IsSplittable || New.BeginOffset == New.EndOffset) { - assert(New.BeginOffset == New.EndOffset); - New = Partitions[i]; - } else { - assert(New.IsSplittable); - New.EndOffset = std::max(New.EndOffset, Partitions[i].EndOffset); - } - assert(New.BeginOffset != New.EndOffset); - - // Scan the overlapping partitions. - while (j != e && New.EndOffset > Partitions[j].BeginOffset) { - // If the new partition we are forming is splittable, stop at the first - // unsplittable partition. - if (New.IsSplittable && !Partitions[j].IsSplittable) - break; - - // Grow the new partition to include any equally splittable range. 'j' is - // always equally splittable when New is splittable, but when New is not - // splittable, we may subsume some (or part of some) splitable partition - // without growing the new one. - if (New.IsSplittable == Partitions[j].IsSplittable) { - New.EndOffset = std::max(New.EndOffset, Partitions[j].EndOffset); - } else { - assert(!New.IsSplittable); - assert(Partitions[j].IsSplittable); - SplitEndOffset = std::max(SplitEndOffset, Partitions[j].EndOffset); - } - - Partitions[j].kill(); - ++NumDeadPartitions; - ++j; - } - - // If the new partition is splittable, chop off the end as soon as the - // unsplittable subsequent partition starts and ensure we eventually cover - // the splittable area. - if (j != e && New.IsSplittable) { - SplitEndOffset = std::max(SplitEndOffset, New.EndOffset); - New.EndOffset = std::min(New.EndOffset, Partitions[j].BeginOffset); + // See if we already have computed info on this node. + uint64_t &SelectSize = PHIOrSelectSizes[&SI]; + if (!SelectSize) { + // This is a new Select, check for an unsafe use of it. + if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&SI, SelectSize)) + return PI.setAborted(UnsafeI); } - // Add the new partition if it differs from the original one and is - // non-empty. We can end up with an empty partition here if it was - // splittable but there is an unsplittable one that starts at the same - // offset. - if (New != Partitions[i]) { - if (New.BeginOffset != New.EndOffset) - Partitions.push_back(New); - // Mark the old one for removal. - Partitions[i].kill(); - ++NumDeadPartitions; + // For PHI and select operands outside the alloca, we can't nuke the entire + // phi or select -- the other side might still be relevant, so we special + // case them here and use a separate structure to track the operands + // themselves which should be replaced with undef. + // FIXME: This should instead be escaped in the event we're instrumenting + // for address sanitization. + if ((Offset.isNegative() && Offset.uge(SelectSize)) || + (!Offset.isNegative() && Offset.uge(AllocSize))) { + S.DeadOperands.push_back(U); + return; } - New.BeginOffset = New.EndOffset; - if (!New.IsSplittable) { - New.EndOffset = std::max(New.EndOffset, SplitEndOffset); - if (j != e && !Partitions[j].IsSplittable) - New.EndOffset = std::min(New.EndOffset, Partitions[j].BeginOffset); - New.IsSplittable = true; - // If there is a trailing splittable partition which won't be fused into - // the next splittable partition go ahead and add it onto the partitions - // list. - if (New.BeginOffset < New.EndOffset && - (j == e || !Partitions[j].IsSplittable || - New.EndOffset < Partitions[j].BeginOffset)) { - Partitions.push_back(New); - New.BeginOffset = New.EndOffset = 0ull; - } - } + insertUse(SI, Offset, SelectSize); } - // Re-sort the partitions now that they have been split and merged into - // disjoint set of partitions. Also remove any of the dead partitions we've - // replaced in the process. - std::sort(Partitions.begin(), Partitions.end()); - if (NumDeadPartitions) { - assert(Partitions.back().isDead()); - assert((ptrdiff_t)NumDeadPartitions == - std::count(Partitions.begin(), Partitions.end(), Partitions.back())); + /// \brief Disable SROA entirely if there are unhandled users of the alloca. + void visitInstruction(Instruction &I) { + PI.setAborted(&I); } - Partitions.erase(Partitions.end() - NumDeadPartitions, Partitions.end()); -} +}; -AllocaPartitioning::AllocaPartitioning(const DataLayout &TD, AllocaInst &AI) +AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI) : -#ifndef NDEBUG +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) AI(AI), #endif PointerEscapingInstr(0) { - PartitionBuilder PB(TD, AI, *this); - if (!PB()) + SliceBuilder PB(DL, AI, *this); + SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI); + if (PtrI.isEscaped() || PtrI.isAborted()) { + // FIXME: We should sink the escape vs. abort info into the caller nicely, + // possibly by just storing the PtrInfo in the AllocaSlices. + PointerEscapingInstr = PtrI.getEscapingInst() ? PtrI.getEscapingInst() + : PtrI.getAbortingInst(); + assert(PointerEscapingInstr && "Did not track a bad instruction"); return; + } + + Slices.erase(std::remove_if(Slices.begin(), Slices.end(), + std::mem_fun_ref(&Slice::isDead)), + Slices.end()); // Sort the uses. This arranges for the offsets to be in ascending order, // and the sizes to be in descending order. - std::sort(Partitions.begin(), Partitions.end()); - - // Remove any partitions from the back which are marked as dead. - while (!Partitions.empty() && Partitions.back().isDead()) - Partitions.pop_back(); - - if (Partitions.size() > 1) { - // Intersect splittability for all partitions with equal offsets and sizes. - // Then remove all but the first so that we have a sequence of non-equal but - // potentially overlapping partitions. - for (iterator I = Partitions.begin(), J = I, E = Partitions.end(); I != E; - I = J) { - ++J; - while (J != E && *I == *J) { - I->IsSplittable &= J->IsSplittable; - ++J; - } - } - Partitions.erase(std::unique(Partitions.begin(), Partitions.end()), - Partitions.end()); - - // Split splittable and merge unsplittable partitions into a disjoint set - // of partitions over the used space of the allocation. - splitAndMergePartitions(); - } - - // Now build up the user lists for each of these disjoint partitions by - // re-walking the recursive users of the alloca. - Uses.resize(Partitions.size()); - UseBuilder UB(TD, AI, *this); - UB(); + std::sort(Slices.begin(), Slices.end()); } -Type *AllocaPartitioning::getCommonType(iterator I) const { - Type *Ty = 0; - for (const_use_iterator UI = use_begin(I), UE = use_end(I); UI != UE; ++UI) { - if (!UI->U) - continue; // Skip dead uses. - if (isa(*UI->U->getUser())) - continue; - if (UI->BeginOffset != I->BeginOffset || UI->EndOffset != I->EndOffset) - continue; - - Type *UserTy = 0; - if (LoadInst *LI = dyn_cast(UI->U->getUser())) { - UserTy = LI->getType(); - } else if (StoreInst *SI = dyn_cast(UI->U->getUser())) { - UserTy = SI->getValueOperand()->getType(); - } - - if (Ty && Ty != UserTy) - return 0; +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) - Ty = UserTy; - } - return Ty; +void AllocaSlices::print(raw_ostream &OS, const_iterator I, + StringRef Indent) const { + printSlice(OS, I, Indent); + printUse(OS, I, Indent); } -#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) - -void AllocaPartitioning::print(raw_ostream &OS, const_iterator I, - StringRef Indent) const { - OS << Indent << "partition #" << (I - begin()) - << " [" << I->BeginOffset << "," << I->EndOffset << ")" - << (I->IsSplittable ? " (splittable)" : "") - << (Uses[I - begin()].empty() ? " (zero uses)" : "") - << "\n"; +void AllocaSlices::printSlice(raw_ostream &OS, const_iterator I, + StringRef Indent) const { + OS << Indent << "[" << I->beginOffset() << "," << I->endOffset() << ")" + << " slice #" << (I - begin()) + << (I->isSplittable() ? " (splittable)" : "") << "\n"; } -void AllocaPartitioning::printUsers(raw_ostream &OS, const_iterator I, - StringRef Indent) const { - for (const_use_iterator UI = use_begin(I), UE = use_end(I); - UI != UE; ++UI) { - if (!UI->U) - continue; // Skip dead uses. - OS << Indent << " [" << UI->BeginOffset << "," << UI->EndOffset << ") " - << "used by: " << *UI->U->getUser() << "\n"; - if (MemTransferInst *II = dyn_cast(UI->U->getUser())) { - const MemTransferOffsets &MTO = MemTransferInstData.lookup(II); - bool IsDest; - if (!MTO.IsSplittable) - IsDest = UI->BeginOffset == MTO.DestBegin; - else - IsDest = MTO.DestBegin != 0u; - OS << Indent << " (original " << (IsDest ? "dest" : "source") << ": " - << "[" << (IsDest ? MTO.DestBegin : MTO.SourceBegin) - << "," << (IsDest ? MTO.DestEnd : MTO.SourceEnd) << ")\n"; - } - } +void AllocaSlices::printUse(raw_ostream &OS, const_iterator I, + StringRef Indent) const { + OS << Indent << " used by: " << *I->getUse()->getUser() << "\n"; } -void AllocaPartitioning::print(raw_ostream &OS) const { +void AllocaSlices::print(raw_ostream &OS) const { if (PointerEscapingInstr) { - OS << "No partitioning for alloca: " << AI << "\n" + OS << "Can't analyze slices for alloca: " << AI << "\n" << " A pointer to this alloca escaped by:\n" << " " << *PointerEscapingInstr << "\n"; return; } - OS << "Partitioning of alloca: " << AI << "\n"; - unsigned Num = 0; - for (const_iterator I = begin(), E = end(); I != E; ++I, ++Num) { + OS << "Slices of alloca: " << AI << "\n"; + for (const_iterator I = begin(), E = end(); I != E; ++I) print(OS, I); - printUsers(OS, I); - } } -void AllocaPartitioning::dump(const_iterator I) const { print(dbgs(), I); } -void AllocaPartitioning::dump() const { print(dbgs()); } +LLVM_DUMP_METHOD void AllocaSlices::dump(const_iterator I) const { + print(dbgs(), I); +} +LLVM_DUMP_METHOD void AllocaSlices::dump() const { print(dbgs()); } #endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) - namespace { /// \brief Implementation of LoadAndStorePromoter for promoting allocas. /// @@ -1256,12 +735,13 @@ class AllocaPromoter : public LoadAndStorePromoter { SmallVector DVIs; public: - AllocaPromoter(const SmallVectorImpl &Insts, SSAUpdater &S, + AllocaPromoter(const SmallVectorImpl &Insts, SSAUpdater &S, AllocaInst &AI, DIBuilder &DIB) - : LoadAndStorePromoter(Insts, S), AI(AI), DIB(DIB) {} + : LoadAndStorePromoter(Insts, S), AI(AI), DIB(DIB) {} void run(const SmallVectorImpl &Insts) { - // Remember which alloca we're promoting (for isInstInList). + // Retain the debug information attached to the alloca for use when + // rewriting loads and stores. if (MDNode *DebugNode = MDNode::getIfExists(AI.getContext(), &AI)) { for (Value::use_iterator UI = DebugNode->use_begin(), UE = DebugNode->use_end(); @@ -1273,7 +753,9 @@ public: } LoadAndStorePromoter::run(Insts); - AI.eraseFromParent(); + + // While we have the debug information, clear it off of the alloca. The + // caller takes care of deleting the alloca. while (!DDIs.empty()) DDIs.pop_back_val()->eraseFromParent(); while (!DVIs.empty()) @@ -1282,13 +764,34 @@ public: virtual bool isInstInList(Instruction *I, const SmallVectorImpl &Insts) const { + Value *Ptr; if (LoadInst *LI = dyn_cast(I)) - return LI->getOperand(0) == &AI; - return cast(I)->getPointerOperand() == &AI; + Ptr = LI->getOperand(0); + else + Ptr = cast(I)->getPointerOperand(); + + // Only used to detect cycles, which will be rare and quickly found as + // we're walking up a chain of defs rather than down through uses. + SmallPtrSet Visited; + + do { + if (Ptr == &AI) + return true; + + if (BitCastInst *BCI = dyn_cast(Ptr)) + Ptr = BCI->getOperand(0); + else if (GetElementPtrInst *GEPI = dyn_cast(Ptr)) + Ptr = GEPI->getPointerOperand(); + else + return false; + + } while (Visited.insert(Ptr)); + + return false; } virtual void updateDebugInfo(Instruction *Inst) const { - for (SmallVector::const_iterator I = DDIs.begin(), + for (SmallVectorImpl::const_iterator I = DDIs.begin(), E = DDIs.end(); I != E; ++I) { DbgDeclareInst *DDI = *I; if (StoreInst *SI = dyn_cast(Inst)) @@ -1296,21 +799,21 @@ public: else if (LoadInst *LI = dyn_cast(Inst)) ConvertDebugDeclareToDebugValue(DDI, LI, DIB); } - for (SmallVector::const_iterator I = DVIs.begin(), + for (SmallVectorImpl::const_iterator I = DVIs.begin(), E = DVIs.end(); I != E; ++I) { DbgValueInst *DVI = *I; - Value *Arg = NULL; + Value *Arg = 0; if (StoreInst *SI = dyn_cast(Inst)) { // If an argument is zero extended then use argument directly. The ZExt // may be zapped by an optimization pass in future. if (ZExtInst *ZExt = dyn_cast(SI->getOperand(0))) Arg = dyn_cast(ZExt->getOperand(0)); - if (SExtInst *SExt = dyn_cast(SI->getOperand(0))) + else if (SExtInst *SExt = dyn_cast(SI->getOperand(0))) Arg = dyn_cast(SExt->getOperand(0)); if (!Arg) - Arg = SI->getOperand(0); + Arg = SI->getValueOperand(); } else if (LoadInst *LI = dyn_cast(Inst)) { - Arg = LI->getOperand(0); + Arg = LI->getPointerOperand(); } else { continue; } @@ -1334,7 +837,7 @@ namespace { /// 1) It takes allocations of aggregates and analyzes the ways in which they /// are used to try to split them into smaller allocations, ideally of /// a single scalar data type. It will split up memcpy and memset accesses -/// as necessary and try to isolate invidual scalar accesses. +/// as necessary and try to isolate individual scalar accesses. /// 2) It will transform accesses into forms which are suitable for SSA value /// promotion. This can be replacing a memset with a scalar store of an /// integer value, or it can involve speculating operations on a PHI or @@ -1347,7 +850,7 @@ class SROA : public FunctionPass { const bool RequiresDomTree; LLVMContext *C; - const DataLayout *TD; + const DataLayout *DL; DominatorTree *DT; /// \brief Worklist of alloca instructions to simplify. @@ -1362,11 +865,7 @@ class SROA : public FunctionPass { /// \brief A collection of instructions to delete. /// We try to batch deletions to simplify code and make things a bit more /// efficient. - SmallVector DeadInsts; - - /// \brief A set to prevent repeatedly marking an instruction split into many - /// uses as dead. Only used to guard insertion into DeadInsts. - SmallPtrSet DeadSplitInsts; + SetVector > DeadInsts; /// \brief Post-promotion worklist. /// @@ -1381,10 +880,25 @@ class SROA : public FunctionPass { /// \brief A collection of alloca instructions we can directly promote. std::vector PromotableAllocas; + /// \brief A worklist of PHIs to speculate prior to promoting allocas. + /// + /// All of these PHIs have been checked for the safety of speculation and by + /// being speculated will allow promoting allocas currently in the promotable + /// queue. + SetVector > SpeculatablePHIs; + + /// \brief A worklist of select instructions to speculate prior to promoting + /// allocas. + /// + /// All of these select instructions have been checked for the safety of + /// speculation and by being speculated will allow promoting allocas + /// currently in the promotable queue. + SetVector > SpeculatableSelects; + public: SROA(bool RequiresDomTree = true) : FunctionPass(ID), RequiresDomTree(RequiresDomTree), - C(0), TD(0), DT(0) { + C(0), DL(0), DT(0) { initializeSROAPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F); @@ -1395,13 +909,13 @@ public: private: friend class PHIOrSelectSpeculator; - friend class AllocaPartitionRewriter; - friend class AllocaPartitionVectorRewriter; + friend class AllocaSliceRewriter; - bool rewriteAllocaPartition(AllocaInst &AI, - AllocaPartitioning &P, - AllocaPartitioning::iterator PI); - bool splitAlloca(AllocaInst &AI, AllocaPartitioning &P); + bool rewritePartition(AllocaInst &AI, AllocaSlices &S, + AllocaSlices::iterator B, AllocaSlices::iterator E, + int64_t BeginOffset, int64_t EndOffset, + ArrayRef SplitUses); + bool splitAlloca(AllocaInst &AI, AllocaSlices &S); bool runOnAlloca(AllocaInst &AI); void deleteDeadInstructions(SmallPtrSet &DeletedAllocas); bool promoteAllocas(Function &F); @@ -1420,334 +934,263 @@ INITIALIZE_PASS_DEPENDENCY(DominatorTree) INITIALIZE_PASS_END(SROA, "sroa", "Scalar Replacement Of Aggregates", false, false) -namespace { -/// \brief Visitor to speculate PHIs and Selects where possible. -class PHIOrSelectSpeculator : public InstVisitor { - // Befriend the base class so it can delegate to private visit methods. - friend class llvm::InstVisitor; - - const DataLayout &TD; - AllocaPartitioning &P; - SROA &Pass; +/// Walk the range of a partitioning looking for a common type to cover this +/// sequence of slices. +static Type *findCommonType(AllocaSlices::const_iterator B, + AllocaSlices::const_iterator E, + uint64_t EndOffset) { + Type *Ty = 0; + bool IgnoreNonIntegralTypes = false; + for (AllocaSlices::const_iterator I = B; I != E; ++I) { + Use *U = I->getUse(); + if (isa(*U->getUser())) + continue; + if (I->beginOffset() != B->beginOffset() || I->endOffset() != EndOffset) + continue; -public: - PHIOrSelectSpeculator(const DataLayout &TD, AllocaPartitioning &P, SROA &Pass) - : TD(TD), P(P), Pass(Pass) {} - - /// \brief Visit the users of an alloca partition and rewrite them. - void visitUsers(AllocaPartitioning::const_iterator PI) { - // Note that we need to use an index here as the underlying vector of uses - // may be grown during speculation. However, we never need to re-visit the - // new uses, and so we can use the initial size bound. - for (unsigned Idx = 0, Size = P.use_size(PI); Idx != Size; ++Idx) { - const AllocaPartitioning::PartitionUse &PU = P.getUse(PI, Idx); - if (!PU.U) - continue; // Skip dead use. - - visit(cast(PU.U->getUser())); + Type *UserTy = 0; + if (LoadInst *LI = dyn_cast(U->getUser())) { + UserTy = LI->getType(); + } else if (StoreInst *SI = dyn_cast(U->getUser())) { + UserTy = SI->getValueOperand()->getType(); + } else { + IgnoreNonIntegralTypes = true; // Give up on anything but an iN type. + continue; } - } -private: - // By default, skip this instruction. - void visitInstruction(Instruction &I) {} - - /// PHI instructions that use an alloca and are subsequently loaded can be - /// rewritten to load both input pointers in the pred blocks and then PHI the - /// results, allowing the load of the alloca to be promoted. - /// From this: - /// %P2 = phi [i32* %Alloca, i32* %Other] - /// %V = load i32* %P2 - /// to: - /// %V1 = load i32* %Alloca -> will be mem2reg'd - /// ... - /// %V2 = load i32* %Other - /// ... - /// %V = phi [i32 %V1, i32 %V2] - /// - /// We can do this to a select if its only uses are loads and if the operands - /// to the select can be loaded unconditionally. - /// - /// FIXME: This should be hoisted into a generic utility, likely in - /// Transforms/Util/Local.h - bool isSafePHIToSpeculate(PHINode &PN, SmallVectorImpl &Loads) { - // For now, we can only do this promotion if the load is in the same block - // as the PHI, and if there are no stores between the phi and load. - // TODO: Allow recursive phi users. - // TODO: Allow stores. - BasicBlock *BB = PN.getParent(); - unsigned MaxAlign = 0; - for (Value::use_iterator UI = PN.use_begin(), UE = PN.use_end(); - UI != UE; ++UI) { - LoadInst *LI = dyn_cast(*UI); - if (LI == 0 || !LI->isSimple()) return false; - - // For now we only allow loads in the same block as the PHI. This is - // a common case that happens when instcombine merges two loads through - // a PHI. - if (LI->getParent() != BB) return false; - - // Ensure that there are no instructions between the PHI and the load that - // could store. - for (BasicBlock::iterator BBI = &PN; &*BBI != LI; ++BBI) - if (BBI->mayWriteToMemory()) - return false; - - MaxAlign = std::max(MaxAlign, LI->getAlignment()); - Loads.push_back(LI); + if (IntegerType *ITy = dyn_cast(UserTy)) { + // If the type is larger than the partition, skip it. We only encounter + // this for split integer operations where we want to use the type of the + // entity causing the split. Also skip if the type is not a byte width + // multiple. + if (ITy->getBitWidth() % 8 != 0 || + ITy->getBitWidth() / 8 > (EndOffset - B->beginOffset())) + continue; + + // If we have found an integer type use covering the alloca, use that + // regardless of the other types, as integers are often used for + // a "bucket of bits" type. + // + // NB: This *must* be the only return from inside the loop so that the + // order of slices doesn't impact the computed type. + return ITy; + } else if (IgnoreNonIntegralTypes) { + continue; } - // We can only transform this if it is safe to push the loads into the - // predecessor blocks. The only thing to watch out for is that we can't put - // a possibly trapping load in the predecessor if it is a critical edge. - for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; - ++Idx) { - TerminatorInst *TI = PN.getIncomingBlock(Idx)->getTerminator(); - Value *InVal = PN.getIncomingValue(Idx); - - // If the value is produced by the terminator of the predecessor (an - // invoke) or it has side-effects, there is no valid place to put a load - // in the predecessor. - if (TI == InVal || TI->mayHaveSideEffects()) - return false; + if (Ty && Ty != UserTy) + IgnoreNonIntegralTypes = true; // Give up on anything but an iN type. - // If the predecessor has a single successor, then the edge isn't - // critical. - if (TI->getNumSuccessors() == 1) - continue; + Ty = UserTy; + } + return Ty; +} - // If this pointer is always safe to load, or if we can prove that there - // is already a load in the block, then we can move the load to the pred - // block. - if (InVal->isDereferenceablePointer() || - isSafeToLoadUnconditionally(InVal, TI, MaxAlign, &TD)) - continue; +/// PHI instructions that use an alloca and are subsequently loaded can be +/// rewritten to load both input pointers in the pred blocks and then PHI the +/// results, allowing the load of the alloca to be promoted. +/// From this: +/// %P2 = phi [i32* %Alloca, i32* %Other] +/// %V = load i32* %P2 +/// to: +/// %V1 = load i32* %Alloca -> will be mem2reg'd +/// ... +/// %V2 = load i32* %Other +/// ... +/// %V = phi [i32 %V1, i32 %V2] +/// +/// We can do this to a select if its only uses are loads and if the operands +/// to the select can be loaded unconditionally. +/// +/// FIXME: This should be hoisted into a generic utility, likely in +/// Transforms/Util/Local.h +static bool isSafePHIToSpeculate(PHINode &PN, + const DataLayout *DL = 0) { + // For now, we can only do this promotion if the load is in the same block + // as the PHI, and if there are no stores between the phi and load. + // TODO: Allow recursive phi users. + // TODO: Allow stores. + BasicBlock *BB = PN.getParent(); + unsigned MaxAlign = 0; + bool HaveLoad = false; + for (Value::use_iterator UI = PN.use_begin(), UE = PN.use_end(); UI != UE; + ++UI) { + LoadInst *LI = dyn_cast(*UI); + if (LI == 0 || !LI->isSimple()) + return false; + // For now we only allow loads in the same block as the PHI. This is + // a common case that happens when instcombine merges two loads through + // a PHI. + if (LI->getParent() != BB) return false; - } - return true; + // Ensure that there are no instructions between the PHI and the load that + // could store. + for (BasicBlock::iterator BBI = &PN; &*BBI != LI; ++BBI) + if (BBI->mayWriteToMemory()) + return false; + + MaxAlign = std::max(MaxAlign, LI->getAlignment()); + HaveLoad = true; } - void visitPHINode(PHINode &PN) { - DEBUG(dbgs() << " original: " << PN << "\n"); + if (!HaveLoad) + return false; - SmallVector Loads; - if (!isSafePHIToSpeculate(PN, Loads)) - return; + // We can only transform this if it is safe to push the loads into the + // predecessor blocks. The only thing to watch out for is that we can't put + // a possibly trapping load in the predecessor if it is a critical edge. + for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) { + TerminatorInst *TI = PN.getIncomingBlock(Idx)->getTerminator(); + Value *InVal = PN.getIncomingValue(Idx); + + // If the value is produced by the terminator of the predecessor (an + // invoke) or it has side-effects, there is no valid place to put a load + // in the predecessor. + if (TI == InVal || TI->mayHaveSideEffects()) + return false; - assert(!Loads.empty()); + // If the predecessor has a single successor, then the edge isn't + // critical. + if (TI->getNumSuccessors() == 1) + continue; - Type *LoadTy = cast(PN.getType())->getElementType(); - IRBuilder<> PHIBuilder(&PN); - PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(), - PN.getName() + ".sroa.speculated"); + // If this pointer is always safe to load, or if we can prove that there + // is already a load in the block, then we can move the load to the pred + // block. + if (InVal->isDereferenceablePointer() || + isSafeToLoadUnconditionally(InVal, TI, MaxAlign, DL)) + continue; - // Get the TBAA tag and alignment to use from one of the loads. It doesn't - // matter which one we get and if any differ, it doesn't matter. - LoadInst *SomeLoad = cast(Loads.back()); - MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa); - unsigned Align = SomeLoad->getAlignment(); + return false; + } - // Rewrite all loads of the PN to use the new PHI. - do { - LoadInst *LI = Loads.pop_back_val(); - LI->replaceAllUsesWith(NewPN); - Pass.DeadInsts.push_back(LI); - } while (!Loads.empty()); - - // Inject loads into all of the pred blocks. - for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) { - BasicBlock *Pred = PN.getIncomingBlock(Idx); - TerminatorInst *TI = Pred->getTerminator(); - Use *InUse = &PN.getOperandUse(PN.getOperandNumForIncomingValue(Idx)); - Value *InVal = PN.getIncomingValue(Idx); - IRBuilder<> PredBuilder(TI); - - LoadInst *Load - = PredBuilder.CreateLoad(InVal, (PN.getName() + ".sroa.speculate.load." + - Pred->getName())); - ++NumLoadsSpeculated; - Load->setAlignment(Align); - if (TBAATag) - Load->setMetadata(LLVMContext::MD_tbaa, TBAATag); - NewPN->addIncoming(Load, Pred); - - Instruction *Ptr = dyn_cast(InVal); - if (!Ptr) - // No uses to rewrite. - continue; + return true; +} - // Try to lookup and rewrite any partition uses corresponding to this phi - // input. - AllocaPartitioning::iterator PI - = P.findPartitionForPHIOrSelectOperand(InUse); - if (PI == P.end()) - continue; +static void speculatePHINodeLoads(PHINode &PN) { + DEBUG(dbgs() << " original: " << PN << "\n"); + + Type *LoadTy = cast(PN.getType())->getElementType(); + IRBuilderTy PHIBuilder(&PN); + PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(), + PN.getName() + ".sroa.speculated"); + + // Get the TBAA tag and alignment to use from one of the loads. It doesn't + // matter which one we get and if any differ. + LoadInst *SomeLoad = cast(*PN.use_begin()); + MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa); + unsigned Align = SomeLoad->getAlignment(); + + // Rewrite all loads of the PN to use the new PHI. + while (!PN.use_empty()) { + LoadInst *LI = cast(*PN.use_begin()); + LI->replaceAllUsesWith(NewPN); + LI->eraseFromParent(); + } + + // Inject loads into all of the pred blocks. + for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) { + BasicBlock *Pred = PN.getIncomingBlock(Idx); + TerminatorInst *TI = Pred->getTerminator(); + Value *InVal = PN.getIncomingValue(Idx); + IRBuilderTy PredBuilder(TI); + + LoadInst *Load = PredBuilder.CreateLoad( + InVal, (PN.getName() + ".sroa.speculate.load." + Pred->getName())); + ++NumLoadsSpeculated; + Load->setAlignment(Align); + if (TBAATag) + Load->setMetadata(LLVMContext::MD_tbaa, TBAATag); + NewPN->addIncoming(Load, Pred); + } + + DEBUG(dbgs() << " speculated to: " << *NewPN << "\n"); + PN.eraseFromParent(); +} - // Replace the Use in the PartitionUse for this operand with the Use - // inside the load. - AllocaPartitioning::use_iterator UI - = P.findPartitionUseForPHIOrSelectOperand(InUse); - assert(isa(*UI->U->getUser())); - UI->U = &Load->getOperandUse(Load->getPointerOperandIndex()); - } - DEBUG(dbgs() << " speculated to: " << *NewPN << "\n"); - } - - /// Select instructions that use an alloca and are subsequently loaded can be - /// rewritten to load both input pointers and then select between the result, - /// allowing the load of the alloca to be promoted. - /// From this: - /// %P2 = select i1 %cond, i32* %Alloca, i32* %Other - /// %V = load i32* %P2 - /// to: - /// %V1 = load i32* %Alloca -> will be mem2reg'd - /// %V2 = load i32* %Other - /// %V = select i1 %cond, i32 %V1, i32 %V2 - /// - /// We can do this to a select if its only uses are loads and if the operand - /// to the select can be loaded unconditionally. - bool isSafeSelectToSpeculate(SelectInst &SI, - SmallVectorImpl &Loads) { - Value *TValue = SI.getTrueValue(); - Value *FValue = SI.getFalseValue(); - bool TDerefable = TValue->isDereferenceablePointer(); - bool FDerefable = FValue->isDereferenceablePointer(); - - for (Value::use_iterator UI = SI.use_begin(), UE = SI.use_end(); - UI != UE; ++UI) { - LoadInst *LI = dyn_cast(*UI); - if (LI == 0 || !LI->isSimple()) return false; - - // Both operands to the select need to be dereferencable, either - // absolutely (e.g. allocas) or at this point because we can see other - // accesses to it. - if (!TDerefable && !isSafeToLoadUnconditionally(TValue, LI, - LI->getAlignment(), &TD)) - return false; - if (!FDerefable && !isSafeToLoadUnconditionally(FValue, LI, - LI->getAlignment(), &TD)) - return false; - Loads.push_back(LI); - } +/// Select instructions that use an alloca and are subsequently loaded can be +/// rewritten to load both input pointers and then select between the result, +/// allowing the load of the alloca to be promoted. +/// From this: +/// %P2 = select i1 %cond, i32* %Alloca, i32* %Other +/// %V = load i32* %P2 +/// to: +/// %V1 = load i32* %Alloca -> will be mem2reg'd +/// %V2 = load i32* %Other +/// %V = select i1 %cond, i32 %V1, i32 %V2 +/// +/// We can do this to a select if its only uses are loads and if the operand +/// to the select can be loaded unconditionally. +static bool isSafeSelectToSpeculate(SelectInst &SI, const DataLayout *DL = 0) { + Value *TValue = SI.getTrueValue(); + Value *FValue = SI.getFalseValue(); + bool TDerefable = TValue->isDereferenceablePointer(); + bool FDerefable = FValue->isDereferenceablePointer(); + + for (Value::use_iterator UI = SI.use_begin(), UE = SI.use_end(); UI != UE; + ++UI) { + LoadInst *LI = dyn_cast(*UI); + if (LI == 0 || !LI->isSimple()) + return false; - return true; + // Both operands to the select need to be dereferencable, either + // absolutely (e.g. allocas) or at this point because we can see other + // accesses to it. + if (!TDerefable && + !isSafeToLoadUnconditionally(TValue, LI, LI->getAlignment(), DL)) + return false; + if (!FDerefable && + !isSafeToLoadUnconditionally(FValue, LI, LI->getAlignment(), DL)) + return false; } - void visitSelectInst(SelectInst &SI) { - DEBUG(dbgs() << " original: " << SI << "\n"); - IRBuilder<> IRB(&SI); - - // If the select isn't safe to speculate, just use simple logic to emit it. - SmallVector Loads; - if (!isSafeSelectToSpeculate(SI, Loads)) - return; + return true; +} - Use *Ops[2] = { &SI.getOperandUse(1), &SI.getOperandUse(2) }; - AllocaPartitioning::iterator PIs[2]; - AllocaPartitioning::PartitionUse PUs[2]; - for (unsigned i = 0, e = 2; i != e; ++i) { - PIs[i] = P.findPartitionForPHIOrSelectOperand(Ops[i]); - if (PIs[i] != P.end()) { - // If the pointer is within the partitioning, remove the select from - // its uses. We'll add in the new loads below. - AllocaPartitioning::use_iterator UI - = P.findPartitionUseForPHIOrSelectOperand(Ops[i]); - PUs[i] = *UI; - // Clear out the use here so that the offsets into the use list remain - // stable but this use is ignored when rewriting. - UI->U = 0; - } - } +static void speculateSelectInstLoads(SelectInst &SI) { + DEBUG(dbgs() << " original: " << SI << "\n"); - Value *TV = SI.getTrueValue(); - Value *FV = SI.getFalseValue(); - // Replace the loads of the select with a select of two loads. - while (!Loads.empty()) { - LoadInst *LI = Loads.pop_back_val(); + IRBuilderTy IRB(&SI); + Value *TV = SI.getTrueValue(); + Value *FV = SI.getFalseValue(); + // Replace the loads of the select with a select of two loads. + while (!SI.use_empty()) { + LoadInst *LI = cast(*SI.use_begin()); + assert(LI->isSimple() && "We only speculate simple loads"); - IRB.SetInsertPoint(LI); - LoadInst *TL = + IRB.SetInsertPoint(LI); + LoadInst *TL = IRB.CreateLoad(TV, LI->getName() + ".sroa.speculate.load.true"); - LoadInst *FL = + LoadInst *FL = IRB.CreateLoad(FV, LI->getName() + ".sroa.speculate.load.false"); - NumLoadsSpeculated += 2; - - // Transfer alignment and TBAA info if present. - TL->setAlignment(LI->getAlignment()); - FL->setAlignment(LI->getAlignment()); - if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa)) { - TL->setMetadata(LLVMContext::MD_tbaa, Tag); - FL->setMetadata(LLVMContext::MD_tbaa, Tag); - } - - Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL, - LI->getName() + ".sroa.speculated"); - - LoadInst *Loads[2] = { TL, FL }; - for (unsigned i = 0, e = 2; i != e; ++i) { - if (PIs[i] != P.end()) { - Use *LoadUse = &Loads[i]->getOperandUse(0); - assert(PUs[i].U->get() == LoadUse->get()); - PUs[i].U = LoadUse; - P.use_push_back(PIs[i], PUs[i]); - } - } - - DEBUG(dbgs() << " speculated to: " << *V << "\n"); - LI->replaceAllUsesWith(V); - Pass.DeadInsts.push_back(LI); - } - } -}; -} + NumLoadsSpeculated += 2; -/// \brief Accumulate the constant offsets in a GEP into a single APInt offset. -/// -/// If the provided GEP is all-constant, the total byte offset formed by the -/// GEP is computed and Offset is set to it. If the GEP has any non-constant -/// operands, the function returns false and the value of Offset is unmodified. -static bool accumulateGEPOffsets(const DataLayout &TD, GEPOperator &GEP, - APInt &Offset) { - APInt GEPOffset(Offset.getBitWidth(), 0); - for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); - GTI != GTE; ++GTI) { - ConstantInt *OpC = dyn_cast(GTI.getOperand()); - if (!OpC) - return false; - if (OpC->isZero()) continue; - - // Handle a struct index, which adds its field offset to the pointer. - if (StructType *STy = dyn_cast(*GTI)) { - unsigned ElementIdx = OpC->getZExtValue(); - const StructLayout *SL = TD.getStructLayout(STy); - GEPOffset += APInt(Offset.getBitWidth(), - SL->getElementOffset(ElementIdx)); - continue; + // Transfer alignment and TBAA info if present. + TL->setAlignment(LI->getAlignment()); + FL->setAlignment(LI->getAlignment()); + if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa)) { + TL->setMetadata(LLVMContext::MD_tbaa, Tag); + FL->setMetadata(LLVMContext::MD_tbaa, Tag); } - APInt TypeSize(Offset.getBitWidth(), - TD.getTypeAllocSize(GTI.getIndexedType())); - if (VectorType *VTy = dyn_cast(*GTI)) { - assert((VTy->getScalarSizeInBits() % 8) == 0 && - "vector element size is not a multiple of 8, cannot GEP over it"); - TypeSize = VTy->getScalarSizeInBits() / 8; - } + Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL, + LI->getName() + ".sroa.speculated"); - GEPOffset += OpC->getValue().sextOrTrunc(Offset.getBitWidth()) * TypeSize; + DEBUG(dbgs() << " speculated to: " << *V << "\n"); + LI->replaceAllUsesWith(V); + LI->eraseFromParent(); } - Offset = GEPOffset; - return true; + SI.eraseFromParent(); } /// \brief Build a GEP out of a base pointer and indices. /// /// This will return the BasePtr if that is valid, or build a new GEP /// instruction using the IRBuilder if GEP-ing is needed. -static Value *buildGEP(IRBuilder<> &IRB, Value *BasePtr, - SmallVectorImpl &Indices, - const Twine &Prefix) { +static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr, + SmallVectorImpl &Indices) { if (Indices.empty()) return BasePtr; @@ -1756,7 +1199,7 @@ static Value *buildGEP(IRBuilder<> &IRB, Value *BasePtr, if (Indices.size() == 1 && cast(Indices.back())->isZero()) return BasePtr; - return IRB.CreateInBoundsGEP(BasePtr, Indices, Prefix + ".idx"); + return IRB.CreateInBoundsGEP(BasePtr, Indices, "idx"); } /// \brief Get a natural GEP off of the BasePtr walking through Ty toward @@ -1768,12 +1211,11 @@ static Value *buildGEP(IRBuilder<> &IRB, Value *BasePtr, /// TargetTy. If we can't find one with the same type, we at least try to use /// one with the same size. If none of that works, we just produce the GEP as /// indicated by Indices to have the correct offset. -static Value *getNaturalGEPWithType(IRBuilder<> &IRB, const DataLayout &TD, +static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL, Value *BasePtr, Type *Ty, Type *TargetTy, - SmallVectorImpl &Indices, - const Twine &Prefix) { + SmallVectorImpl &Indices) { if (Ty == TargetTy) - return buildGEP(IRB, BasePtr, Indices, Prefix); + return buildGEP(IRB, BasePtr, Indices); // See if we can descend into a struct and locate a field with the correct // type. @@ -1784,7 +1226,9 @@ static Value *getNaturalGEPWithType(IRBuilder<> &IRB, const DataLayout &TD, break; if (SequentialType *SeqTy = dyn_cast(ElementTy)) { ElementTy = SeqTy->getElementType(); - Indices.push_back(IRB.getInt(APInt(TD.getPointerSizeInBits(), 0))); + // Note that we use the default address space as this index is over an + // array or a vector, not a pointer. + Indices.push_back(IRB.getInt(APInt(DL.getPointerSizeInBits(0), 0))); } else if (StructType *STy = dyn_cast(ElementTy)) { if (STy->element_begin() == STy->element_end()) break; // Nothing left to descend into. @@ -1798,20 +1242,19 @@ static Value *getNaturalGEPWithType(IRBuilder<> &IRB, const DataLayout &TD, if (ElementTy != TargetTy) Indices.erase(Indices.end() - NumLayers, Indices.end()); - return buildGEP(IRB, BasePtr, Indices, Prefix); + return buildGEP(IRB, BasePtr, Indices); } /// \brief Recursively compute indices for a natural GEP. /// /// This is the recursive step for getNaturalGEPWithOffset that walks down the /// element types adding appropriate indices for the GEP. -static Value *getNaturalGEPRecursively(IRBuilder<> &IRB, const DataLayout &TD, +static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, Type *Ty, APInt &Offset, Type *TargetTy, - SmallVectorImpl &Indices, - const Twine &Prefix) { + SmallVectorImpl &Indices) { if (Offset == 0) - return getNaturalGEPWithType(IRB, TD, Ptr, Ty, TargetTy, Indices, Prefix); + return getNaturalGEPWithType(IRB, DL, Ptr, Ty, TargetTy, Indices); // We can't recurse through pointer types. if (Ty->isPointerTy()) @@ -1821,49 +1264,49 @@ static Value *getNaturalGEPRecursively(IRBuilder<> &IRB, const DataLayout &TD, // extremely poorly defined currently. The long-term goal is to remove GEPing // over a vector from the IR completely. if (VectorType *VecTy = dyn_cast(Ty)) { - unsigned ElementSizeInBits = VecTy->getScalarSizeInBits(); + unsigned ElementSizeInBits = DL.getTypeSizeInBits(VecTy->getScalarType()); if (ElementSizeInBits % 8) return 0; // GEPs over non-multiple of 8 size vector elements are invalid. APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8); - APInt NumSkippedElements = Offset.udiv(ElementSize); + APInt NumSkippedElements = Offset.sdiv(ElementSize); if (NumSkippedElements.ugt(VecTy->getNumElements())) return 0; Offset -= NumSkippedElements * ElementSize; Indices.push_back(IRB.getInt(NumSkippedElements)); - return getNaturalGEPRecursively(IRB, TD, Ptr, VecTy->getElementType(), - Offset, TargetTy, Indices, Prefix); + return getNaturalGEPRecursively(IRB, DL, Ptr, VecTy->getElementType(), + Offset, TargetTy, Indices); } if (ArrayType *ArrTy = dyn_cast(Ty)) { Type *ElementTy = ArrTy->getElementType(); - APInt ElementSize(Offset.getBitWidth(), TD.getTypeAllocSize(ElementTy)); - APInt NumSkippedElements = Offset.udiv(ElementSize); + APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy)); + APInt NumSkippedElements = Offset.sdiv(ElementSize); if (NumSkippedElements.ugt(ArrTy->getNumElements())) return 0; Offset -= NumSkippedElements * ElementSize; Indices.push_back(IRB.getInt(NumSkippedElements)); - return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy, - Indices, Prefix); + return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, + Indices); } StructType *STy = dyn_cast(Ty); if (!STy) return 0; - const StructLayout *SL = TD.getStructLayout(STy); + const StructLayout *SL = DL.getStructLayout(STy); uint64_t StructOffset = Offset.getZExtValue(); if (StructOffset >= SL->getSizeInBytes()) return 0; unsigned Index = SL->getElementContainingOffset(StructOffset); Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index)); Type *ElementTy = STy->getElementType(Index); - if (Offset.uge(TD.getTypeAllocSize(ElementTy))) + if (Offset.uge(DL.getTypeAllocSize(ElementTy))) return 0; // The offset points into alignment padding. Indices.push_back(IRB.getInt32(Index)); - return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy, - Indices, Prefix); + return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, + Indices); } /// \brief Get a natural GEP from a base pointer to a particular offset and @@ -1876,10 +1319,9 @@ static Value *getNaturalGEPRecursively(IRBuilder<> &IRB, const DataLayout &TD, /// Indices, and setting Ty to the result subtype. /// /// If no natural GEP can be constructed, this function returns null. -static Value *getNaturalGEPWithOffset(IRBuilder<> &IRB, const DataLayout &TD, +static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, APInt Offset, Type *TargetTy, - SmallVectorImpl &Indices, - const Twine &Prefix) { + SmallVectorImpl &Indices) { PointerType *Ty = cast(Ptr->getType()); // Don't consider any GEPs through an i8* as natural unless the TargetTy is @@ -1890,15 +1332,15 @@ static Value *getNaturalGEPWithOffset(IRBuilder<> &IRB, const DataLayout &TD, Type *ElementTy = Ty->getElementType(); if (!ElementTy->isSized()) return 0; // We can't GEP through an unsized element. - APInt ElementSize(Offset.getBitWidth(), TD.getTypeAllocSize(ElementTy)); + APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy)); if (ElementSize == 0) return 0; // Zero-length arrays can't help us build a natural GEP. - APInt NumSkippedElements = Offset.udiv(ElementSize); + APInt NumSkippedElements = Offset.sdiv(ElementSize); Offset -= NumSkippedElements * ElementSize; Indices.push_back(IRB.getInt(NumSkippedElements)); - return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy, - Indices, Prefix); + return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, + Indices); } /// \brief Compute an adjusted pointer from Ptr by Offset bytes where the @@ -1913,12 +1355,11 @@ static Value *getNaturalGEPWithOffset(IRBuilder<> &IRB, const DataLayout &TD, /// The strategy for finding the more natural GEPs is to peel off layers of the /// pointer, walking back through bit casts and GEPs, searching for a base /// pointer from which we can compute a natural GEP with the desired -/// properities. The algorithm tries to fold as many constant indices into +/// properties. The algorithm tries to fold as many constant indices into /// a single GEP as possible, thus making each GEP more independent of the /// surrounding code. -static Value *getAdjustedPtr(IRBuilder<> &IRB, const DataLayout &TD, - Value *Ptr, APInt Offset, Type *PointerTy, - const Twine &Prefix) { +static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, + Value *Ptr, APInt Offset, Type *PointerTy) { // Even though we don't look through PHI nodes, we could be called on an // instruction in an unreachable block, which may be on a cycle. SmallPtrSet Visited; @@ -1941,7 +1382,7 @@ static Value *getAdjustedPtr(IRBuilder<> &IRB, const DataLayout &TD, // First fold any existing GEPs into the offset. while (GEPOperator *GEP = dyn_cast(Ptr)) { APInt GEPOffset(Offset.getBitWidth(), 0); - if (!accumulateGEPOffsets(TD, *GEP, GEPOffset)) + if (!GEP->accumulateConstantOffset(DL, GEPOffset)) break; Offset += GEPOffset; Ptr = GEP->getPointerOperand(); @@ -1951,8 +1392,8 @@ static Value *getAdjustedPtr(IRBuilder<> &IRB, const DataLayout &TD, // See if we can perform a natural GEP here. Indices.clear(); - if (Value *P = getNaturalGEPWithOffset(IRB, TD, Ptr, Offset, TargetTy, - Indices, Prefix)) { + if (Value *P = getNaturalGEPWithOffset(IRB, DL, Ptr, Offset, TargetTy, + Indices)) { if (P->getType() == PointerTy) { // Zap any offset pointer that we ended up computing in previous rounds. if (OffsetPtr && OffsetPtr->use_empty()) @@ -1987,24 +1428,183 @@ static Value *getAdjustedPtr(IRBuilder<> &IRB, const DataLayout &TD, if (!OffsetPtr) { if (!Int8Ptr) { Int8Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy(), - Prefix + ".raw_cast"); + "raw_cast"); Int8PtrOffset = Offset; } OffsetPtr = Int8PtrOffset == 0 ? Int8Ptr : IRB.CreateInBoundsGEP(Int8Ptr, IRB.getInt(Int8PtrOffset), - Prefix + ".raw_idx"); + "raw_idx"); } Ptr = OffsetPtr; // On the off chance we were targeting i8*, guard the bitcast here. if (Ptr->getType() != PointerTy) - Ptr = IRB.CreateBitCast(Ptr, PointerTy, Prefix + ".cast"); + Ptr = IRB.CreateBitCast(Ptr, PointerTy, "cast"); return Ptr; } -/// \brief Test whether the given alloca partition can be promoted to a vector. +/// \brief Test whether we can convert a value from the old to the new type. +/// +/// This predicate should be used to guard calls to convertValue in order to +/// ensure that we only try to convert viable values. The strategy is that we +/// will peel off single element struct and array wrappings to get to an +/// underlying value, and convert that value. +static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) { + if (OldTy == NewTy) + return true; + if (IntegerType *OldITy = dyn_cast(OldTy)) + if (IntegerType *NewITy = dyn_cast(NewTy)) + if (NewITy->getBitWidth() >= OldITy->getBitWidth()) + return true; + if (DL.getTypeSizeInBits(NewTy) != DL.getTypeSizeInBits(OldTy)) + return false; + if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType()) + return false; + + // We can convert pointers to integers and vice-versa. Same for vectors + // of pointers and integers. + OldTy = OldTy->getScalarType(); + NewTy = NewTy->getScalarType(); + if (NewTy->isPointerTy() || OldTy->isPointerTy()) { + if (NewTy->isPointerTy() && OldTy->isPointerTy()) + return true; + if (NewTy->isIntegerTy() || OldTy->isIntegerTy()) + return true; + return false; + } + + return true; +} + +/// \brief Generic routine to convert an SSA value to a value of a different +/// type. +/// +/// This will try various different casting techniques, such as bitcasts, +/// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test +/// two types for viability with this routine. +static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V, + Type *NewTy) { + Type *OldTy = V->getType(); + assert(canConvertValue(DL, OldTy, NewTy) && "Value not convertable to type"); + + if (OldTy == NewTy) + return V; + + if (IntegerType *OldITy = dyn_cast(OldTy)) + if (IntegerType *NewITy = dyn_cast(NewTy)) + if (NewITy->getBitWidth() > OldITy->getBitWidth()) + return IRB.CreateZExt(V, NewITy); + + // See if we need inttoptr for this type pair. A cast involving both scalars + // and vectors requires and additional bitcast. + if (OldTy->getScalarType()->isIntegerTy() && + NewTy->getScalarType()->isPointerTy()) { + // Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8* + if (OldTy->isVectorTy() && !NewTy->isVectorTy()) + return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)), + NewTy); + + // Expand i128 to <2 x i8*> --> i128 to <2 x i64> to <2 x i8*> + if (!OldTy->isVectorTy() && NewTy->isVectorTy()) + return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)), + NewTy); + + return IRB.CreateIntToPtr(V, NewTy); + } + + // See if we need ptrtoint for this type pair. A cast involving both scalars + // and vectors requires and additional bitcast. + if (OldTy->getScalarType()->isPointerTy() && + NewTy->getScalarType()->isIntegerTy()) { + // Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128 + if (OldTy->isVectorTy() && !NewTy->isVectorTy()) + return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)), + NewTy); + + // Expand i8* to <2 x i32> --> i8* to i64 to <2 x i32> + if (!OldTy->isVectorTy() && NewTy->isVectorTy()) + return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)), + NewTy); + + return IRB.CreatePtrToInt(V, NewTy); + } + + return IRB.CreateBitCast(V, NewTy); +} + +/// \brief Test whether the given slice use can be promoted to a vector. +/// +/// This function is called to test each entry in a partioning which is slated +/// for a single slice. +static bool isVectorPromotionViableForSlice( + const DataLayout &DL, AllocaSlices &S, uint64_t SliceBeginOffset, + uint64_t SliceEndOffset, VectorType *Ty, uint64_t ElementSize, + AllocaSlices::const_iterator I) { + // First validate the slice offsets. + uint64_t BeginOffset = + std::max(I->beginOffset(), SliceBeginOffset) - SliceBeginOffset; + uint64_t BeginIndex = BeginOffset / ElementSize; + if (BeginIndex * ElementSize != BeginOffset || + BeginIndex >= Ty->getNumElements()) + return false; + uint64_t EndOffset = + std::min(I->endOffset(), SliceEndOffset) - SliceBeginOffset; + uint64_t EndIndex = EndOffset / ElementSize; + if (EndIndex * ElementSize != EndOffset || EndIndex > Ty->getNumElements()) + return false; + + assert(EndIndex > BeginIndex && "Empty vector!"); + uint64_t NumElements = EndIndex - BeginIndex; + Type *SliceTy = + (NumElements == 1) ? Ty->getElementType() + : VectorType::get(Ty->getElementType(), NumElements); + + Type *SplitIntTy = + Type::getIntNTy(Ty->getContext(), NumElements * ElementSize * 8); + + Use *U = I->getUse(); + + if (MemIntrinsic *MI = dyn_cast(U->getUser())) { + if (MI->isVolatile()) + return false; + if (!I->isSplittable()) + return false; // Skip any unsplittable intrinsics. + } else if (U->get()->getType()->getPointerElementType()->isStructTy()) { + // Disable vector promotion when there are loads or stores of an FCA. + return false; + } else if (LoadInst *LI = dyn_cast(U->getUser())) { + if (LI->isVolatile()) + return false; + Type *LTy = LI->getType(); + if (SliceBeginOffset > I->beginOffset() || + SliceEndOffset < I->endOffset()) { + assert(LTy->isIntegerTy()); + LTy = SplitIntTy; + } + if (!canConvertValue(DL, SliceTy, LTy)) + return false; + } else if (StoreInst *SI = dyn_cast(U->getUser())) { + if (SI->isVolatile()) + return false; + Type *STy = SI->getValueOperand()->getType(); + if (SliceBeginOffset > I->beginOffset() || + SliceEndOffset < I->endOffset()) { + assert(STy->isIntegerTy()); + STy = SplitIntTy; + } + if (!canConvertValue(DL, STy, SliceTy)) + return false; + } else { + return false; + } + + return true; +} + +/// \brief Test whether the given alloca partitioning and range of slices can be +/// promoted to a vector. /// /// This is a quick test to check whether we can rewrite a particular alloca /// partition (and its newly formed alloca) into a vector alloca with only @@ -2012,140 +1612,303 @@ static Value *getAdjustedPtr(IRBuilder<> &IRB, const DataLayout &TD, /// SSA value. We only can ensure this for a limited set of operations, and we /// don't want to do the rewrites unless we are confident that the result will /// be promotable, so we have an early test here. -static bool isVectorPromotionViable(const DataLayout &TD, - Type *AllocaTy, - AllocaPartitioning &P, - uint64_t PartitionBeginOffset, - uint64_t PartitionEndOffset, - AllocaPartitioning::const_use_iterator I, - AllocaPartitioning::const_use_iterator E) { +static bool +isVectorPromotionViable(const DataLayout &DL, Type *AllocaTy, AllocaSlices &S, + uint64_t SliceBeginOffset, uint64_t SliceEndOffset, + AllocaSlices::const_iterator I, + AllocaSlices::const_iterator E, + ArrayRef SplitUses) { VectorType *Ty = dyn_cast(AllocaTy); if (!Ty) return false; - uint64_t VecSize = TD.getTypeSizeInBits(Ty); - uint64_t ElementSize = Ty->getScalarSizeInBits(); + uint64_t ElementSize = DL.getTypeSizeInBits(Ty->getScalarType()); // While the definition of LLVM vectors is bitpacked, we don't support sizes // that aren't byte sized. if (ElementSize % 8) return false; - assert((VecSize % 8) == 0 && "vector size not a multiple of element size?"); - VecSize /= 8; + assert((DL.getTypeSizeInBits(Ty) % 8) == 0 && + "vector size not a multiple of element size?"); ElementSize /= 8; - for (; I != E; ++I) { - if (!I->U) - continue; // Skip dead use. - - uint64_t BeginOffset = I->BeginOffset - PartitionBeginOffset; - uint64_t BeginIndex = BeginOffset / ElementSize; - if (BeginIndex * ElementSize != BeginOffset || - BeginIndex >= Ty->getNumElements()) + for (; I != E; ++I) + if (!isVectorPromotionViableForSlice(DL, S, SliceBeginOffset, + SliceEndOffset, Ty, ElementSize, I)) return false; - uint64_t EndOffset = I->EndOffset - PartitionBeginOffset; - uint64_t EndIndex = EndOffset / ElementSize; - if (EndIndex * ElementSize != EndOffset || - EndIndex > Ty->getNumElements()) + + for (ArrayRef::const_iterator SUI = SplitUses.begin(), + SUE = SplitUses.end(); + SUI != SUE; ++SUI) + if (!isVectorPromotionViableForSlice(DL, S, SliceBeginOffset, + SliceEndOffset, Ty, ElementSize, *SUI)) return false; - // FIXME: We should build shuffle vector instructions to handle - // non-element-sized accesses. - if ((EndOffset - BeginOffset) != ElementSize && - (EndOffset - BeginOffset) != VecSize) + return true; +} + +/// \brief Test whether a slice of an alloca is valid for integer widening. +/// +/// This implements the necessary checking for the \c isIntegerWideningViable +/// test below on a single slice of the alloca. +static bool isIntegerWideningViableForSlice(const DataLayout &DL, + Type *AllocaTy, + uint64_t AllocBeginOffset, + uint64_t Size, AllocaSlices &S, + AllocaSlices::const_iterator I, + bool &WholeAllocaOp) { + uint64_t RelBegin = I->beginOffset() - AllocBeginOffset; + uint64_t RelEnd = I->endOffset() - AllocBeginOffset; + + // We can't reasonably handle cases where the load or store extends past + // the end of the aloca's type and into its padding. + if (RelEnd > Size) + return false; + + Use *U = I->getUse(); + + if (LoadInst *LI = dyn_cast(U->getUser())) { + if (LI->isVolatile()) + return false; + if (RelBegin == 0 && RelEnd == Size) + WholeAllocaOp = true; + if (IntegerType *ITy = dyn_cast(LI->getType())) { + if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy)) + return false; + } else if (RelBegin != 0 || RelEnd != Size || + !canConvertValue(DL, AllocaTy, LI->getType())) { + // Non-integer loads need to be convertible from the alloca type so that + // they are promotable. + return false; + } + } else if (StoreInst *SI = dyn_cast(U->getUser())) { + Type *ValueTy = SI->getValueOperand()->getType(); + if (SI->isVolatile()) return false; - - if (MemIntrinsic *MI = dyn_cast(I->U->getUser())) { - if (MI->isVolatile()) + if (RelBegin == 0 && RelEnd == Size) + WholeAllocaOp = true; + if (IntegerType *ITy = dyn_cast(ValueTy)) { + if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy)) return false; - if (MemTransferInst *MTI = dyn_cast(I->U->getUser())) { - const AllocaPartitioning::MemTransferOffsets &MTO - = P.getMemTransferOffsets(*MTI); - if (!MTO.IsSplittable) - return false; - } - } else if (I->U->get()->getType()->getPointerElementType()->isStructTy()) { - // Disable vector promotion when there are loads or stores of an FCA. - return false; - } else if (!isa(I->U->getUser()) && - !isa(I->U->getUser())) { + } else if (RelBegin != 0 || RelEnd != Size || + !canConvertValue(DL, ValueTy, AllocaTy)) { + // Non-integer stores need to be convertible to the alloca type so that + // they are promotable. return false; } + } else if (MemIntrinsic *MI = dyn_cast(U->getUser())) { + if (MI->isVolatile() || !isa(MI->getLength())) + return false; + if (!I->isSplittable()) + return false; // Skip any unsplittable intrinsics. + } else if (IntrinsicInst *II = dyn_cast(U->getUser())) { + if (II->getIntrinsicID() != Intrinsic::lifetime_start && + II->getIntrinsicID() != Intrinsic::lifetime_end) + return false; + } else { + return false; } + return true; } -/// \brief Test whether the given alloca partition can be promoted to an int. +/// \brief Test whether the given alloca partition's integer operations can be +/// widened to promotable ones. /// -/// This is a quick test to check whether we can rewrite a particular alloca -/// partition (and its newly formed alloca) into an integer alloca suitable for -/// promotion to an SSA value. We only can ensure this for a limited set of -/// operations, and we don't want to do the rewrites unless we are confident -/// that the result will be promotable, so we have an early test here. -static bool isIntegerPromotionViable(const DataLayout &TD, - Type *AllocaTy, - uint64_t AllocBeginOffset, - AllocaPartitioning &P, - AllocaPartitioning::const_use_iterator I, - AllocaPartitioning::const_use_iterator E) { - IntegerType *Ty = dyn_cast(AllocaTy); - if (!Ty || 8*TD.getTypeStoreSize(Ty) != Ty->getBitWidth()) +/// This is a quick test to check whether we can rewrite the integer loads and +/// stores to a particular alloca into wider loads and stores and be able to +/// promote the resulting alloca. +static bool +isIntegerWideningViable(const DataLayout &DL, Type *AllocaTy, + uint64_t AllocBeginOffset, AllocaSlices &S, + AllocaSlices::const_iterator I, + AllocaSlices::const_iterator E, + ArrayRef SplitUses) { + uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy); + // Don't create integer types larger than the maximum bitwidth. + if (SizeInBits > IntegerType::MAX_INT_BITS) + return false; + + // Don't try to handle allocas with bit-padding. + if (SizeInBits != DL.getTypeStoreSizeInBits(AllocaTy)) + return false; + + // We need to ensure that an integer type with the appropriate bitwidth can + // be converted to the alloca type, whatever that is. We don't want to force + // the alloca itself to have an integer type if there is a more suitable one. + Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits); + if (!canConvertValue(DL, AllocaTy, IntTy) || + !canConvertValue(DL, IntTy, AllocaTy)) return false; - // Check the uses to ensure the uses are (likely) promoteable integer uses. - // Also ensure that the alloca has a covering load or store. We don't want - // promote because of some other unsplittable entry (which we may make - // splittable later) and lose the ability to promote each element access. - bool WholeAllocaOp = false; - for (; I != E; ++I) { - if (!I->U) - continue; // Skip dead use. - - // We can't reasonably handle cases where the load or store extends past - // the end of the aloca's type and into its padding. - if ((I->EndOffset - AllocBeginOffset) > TD.getTypeStoreSize(Ty)) + uint64_t Size = DL.getTypeStoreSize(AllocaTy); + + // While examining uses, we ensure that the alloca has a covering load or + // store. We don't want to widen the integer operations only to fail to + // promote due to some other unsplittable entry (which we may make splittable + // later). However, if there are only splittable uses, go ahead and assume + // that we cover the alloca. + bool WholeAllocaOp = (I != E) ? false : DL.isLegalInteger(SizeInBits); + + for (; I != E; ++I) + if (!isIntegerWideningViableForSlice(DL, AllocaTy, AllocBeginOffset, Size, + S, I, WholeAllocaOp)) return false; - if (LoadInst *LI = dyn_cast(I->U->getUser())) { - if (LI->isVolatile() || !LI->getType()->isIntegerTy()) - return false; - if (LI->getType() == Ty) - WholeAllocaOp = true; - } else if (StoreInst *SI = dyn_cast(I->U->getUser())) { - if (SI->isVolatile() || !SI->getValueOperand()->getType()->isIntegerTy()) - return false; - if (SI->getValueOperand()->getType() == Ty) - WholeAllocaOp = true; - } else if (MemIntrinsic *MI = dyn_cast(I->U->getUser())) { - if (MI->isVolatile()) - return false; - if (MemTransferInst *MTI = dyn_cast(I->U->getUser())) { - const AllocaPartitioning::MemTransferOffsets &MTO - = P.getMemTransferOffsets(*MTI); - if (!MTO.IsSplittable) - return false; - } - } else { + for (ArrayRef::const_iterator SUI = SplitUses.begin(), + SUE = SplitUses.end(); + SUI != SUE; ++SUI) + if (!isIntegerWideningViableForSlice(DL, AllocaTy, AllocBeginOffset, Size, + S, *SUI, WholeAllocaOp)) return false; - } - } + return WholeAllocaOp; } +static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V, + IntegerType *Ty, uint64_t Offset, + const Twine &Name) { + DEBUG(dbgs() << " start: " << *V << "\n"); + IntegerType *IntTy = cast(V->getType()); + assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) && + "Element extends past full value"); + uint64_t ShAmt = 8*Offset; + if (DL.isBigEndian()) + ShAmt = 8*(DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset); + if (ShAmt) { + V = IRB.CreateLShr(V, ShAmt, Name + ".shift"); + DEBUG(dbgs() << " shifted: " << *V << "\n"); + } + assert(Ty->getBitWidth() <= IntTy->getBitWidth() && + "Cannot extract to a larger integer!"); + if (Ty != IntTy) { + V = IRB.CreateTrunc(V, Ty, Name + ".trunc"); + DEBUG(dbgs() << " trunced: " << *V << "\n"); + } + return V; +} + +static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old, + Value *V, uint64_t Offset, const Twine &Name) { + IntegerType *IntTy = cast(Old->getType()); + IntegerType *Ty = cast(V->getType()); + assert(Ty->getBitWidth() <= IntTy->getBitWidth() && + "Cannot insert a larger integer!"); + DEBUG(dbgs() << " start: " << *V << "\n"); + if (Ty != IntTy) { + V = IRB.CreateZExt(V, IntTy, Name + ".ext"); + DEBUG(dbgs() << " extended: " << *V << "\n"); + } + assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) && + "Element store outside of alloca store"); + uint64_t ShAmt = 8*Offset; + if (DL.isBigEndian()) + ShAmt = 8*(DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset); + if (ShAmt) { + V = IRB.CreateShl(V, ShAmt, Name + ".shift"); + DEBUG(dbgs() << " shifted: " << *V << "\n"); + } + + if (ShAmt || Ty->getBitWidth() < IntTy->getBitWidth()) { + APInt Mask = ~Ty->getMask().zext(IntTy->getBitWidth()).shl(ShAmt); + Old = IRB.CreateAnd(Old, Mask, Name + ".mask"); + DEBUG(dbgs() << " masked: " << *Old << "\n"); + V = IRB.CreateOr(Old, V, Name + ".insert"); + DEBUG(dbgs() << " inserted: " << *V << "\n"); + } + return V; +} + +static Value *extractVector(IRBuilderTy &IRB, Value *V, + unsigned BeginIndex, unsigned EndIndex, + const Twine &Name) { + VectorType *VecTy = cast(V->getType()); + unsigned NumElements = EndIndex - BeginIndex; + assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); + + if (NumElements == VecTy->getNumElements()) + return V; + + if (NumElements == 1) { + V = IRB.CreateExtractElement(V, IRB.getInt32(BeginIndex), + Name + ".extract"); + DEBUG(dbgs() << " extract: " << *V << "\n"); + return V; + } + + SmallVector Mask; + Mask.reserve(NumElements); + for (unsigned i = BeginIndex; i != EndIndex; ++i) + Mask.push_back(IRB.getInt32(i)); + V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()), + ConstantVector::get(Mask), + Name + ".extract"); + DEBUG(dbgs() << " shuffle: " << *V << "\n"); + return V; +} + +static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V, + unsigned BeginIndex, const Twine &Name) { + VectorType *VecTy = cast(Old->getType()); + assert(VecTy && "Can only insert a vector into a vector"); + + VectorType *Ty = dyn_cast(V->getType()); + if (!Ty) { + // Single element to insert. + V = IRB.CreateInsertElement(Old, V, IRB.getInt32(BeginIndex), + Name + ".insert"); + DEBUG(dbgs() << " insert: " << *V << "\n"); + return V; + } + + assert(Ty->getNumElements() <= VecTy->getNumElements() && + "Too many elements!"); + if (Ty->getNumElements() == VecTy->getNumElements()) { + assert(V->getType() == VecTy && "Vector type mismatch"); + return V; + } + unsigned EndIndex = BeginIndex + Ty->getNumElements(); + + // When inserting a smaller vector into the larger to store, we first + // use a shuffle vector to widen it with undef elements, and then + // a second shuffle vector to select between the loaded vector and the + // incoming vector. + SmallVector Mask; + Mask.reserve(VecTy->getNumElements()); + for (unsigned i = 0; i != VecTy->getNumElements(); ++i) + if (i >= BeginIndex && i < EndIndex) + Mask.push_back(IRB.getInt32(i - BeginIndex)); + else + Mask.push_back(UndefValue::get(IRB.getInt32Ty())); + V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()), + ConstantVector::get(Mask), + Name + ".expand"); + DEBUG(dbgs() << " shuffle: " << *V << "\n"); + + Mask.clear(); + for (unsigned i = 0; i != VecTy->getNumElements(); ++i) + Mask.push_back(IRB.getInt1(i >= BeginIndex && i < EndIndex)); + + V = IRB.CreateSelect(ConstantVector::get(Mask), V, Old, Name + "blend"); + + DEBUG(dbgs() << " blend: " << *V << "\n"); + return V; +} + namespace { -/// \brief Visitor to rewrite instructions using a partition of an alloca to -/// use a new alloca. +/// \brief Visitor to rewrite instructions using p particular slice of an alloca +/// to use a new alloca. /// /// Also implements the rewriting to vector-based accesses when the partition /// passes the isVectorPromotionViable predicate. Most of the rewriting logic /// lives here. -class AllocaPartitionRewriter : public InstVisitor { +class AllocaSliceRewriter : public InstVisitor { // Befriend the base class so it can delegate to private visit methods. - friend class llvm::InstVisitor; + friend class llvm::InstVisitor; + typedef llvm::InstVisitor Base; - const DataLayout &TD; - AllocaPartitioning &P; + const DataLayout &DL; + AllocaSlices &S; SROA &Pass; AllocaInst &OldAI, &NewAI; const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset; @@ -2153,7 +1916,7 @@ class AllocaPartitionRewriter : public InstVisitor(NewAI.getAllocatedType()); - ElementTy = VecTy->getElementType(); - assert((VecTy->getScalarSizeInBits() % 8) == 0 && + AllocaSliceRewriter(const DataLayout &DL, AllocaSlices &S, SROA &Pass, + AllocaInst &OldAI, AllocaInst &NewAI, + uint64_t NewBeginOffset, uint64_t NewEndOffset, + bool IsVectorPromotable = false, + bool IsIntegerPromotable = false) + : DL(DL), S(S), Pass(Pass), OldAI(OldAI), NewAI(NewAI), + NewAllocaBeginOffset(NewBeginOffset), NewAllocaEndOffset(NewEndOffset), + NewAllocaTy(NewAI.getAllocatedType()), + VecTy(IsVectorPromotable ? cast(NewAllocaTy) : 0), + ElementTy(VecTy ? VecTy->getElementType() : 0), + ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy) / 8 : 0), + IntTy(IsIntegerPromotable + ? Type::getIntNTy( + NewAI.getContext(), + DL.getTypeSizeInBits(NewAI.getAllocatedType())) + : 0), + BeginOffset(), EndOffset(), IsSplittable(), IsSplit(), OldUse(), + OldPtr(), IsUsedByRewrittenSpeculatableInstructions(false), + IRB(NewAI.getContext(), ConstantFolder()) { + if (VecTy) { + assert((DL.getTypeSizeInBits(ElementTy) % 8) == 0 && "Only multiple-of-8 sized vector elements are viable"); - ElementSize = VecTy->getScalarSizeInBits() / 8; - } else if (isIntegerPromotionViable(TD, NewAI.getAllocatedType(), - NewAllocaBeginOffset, P, I, E)) { - IntPromotionTy = cast(NewAI.getAllocatedType()); + ++NumVectorized; } + assert((!IsVectorPromotable && !IsIntegerPromotable) || + IsVectorPromotable != IsIntegerPromotable); + } + + bool visit(AllocaSlices::const_iterator I) { bool CanSROA = true; - for (; I != E; ++I) { - if (!I->U) - continue; // Skip dead uses. - BeginOffset = I->BeginOffset; - EndOffset = I->EndOffset; - OldUse = I->U; - OldPtr = cast(I->U->get()); - NamePrefix = (Twine(NewAI.getName()) + "." + Twine(BeginOffset)).str(); - CanSROA &= visit(cast(I->U->getUser())); - } - if (VecTy) { + BeginOffset = I->beginOffset(); + EndOffset = I->endOffset(); + IsSplittable = I->isSplittable(); + IsSplit = + BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset; + + OldUse = I->getUse(); + OldPtr = cast(OldUse->get()); + + Instruction *OldUserI = cast(OldUse->getUser()); + IRB.SetInsertPoint(OldUserI); + IRB.SetCurrentDebugLocation(OldUserI->getDebugLoc()); + IRB.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) + "."); + + CanSROA &= visit(cast(OldUse->getUser())); + if (VecTy || IntTy) assert(CanSROA); - VecTy = 0; - ElementTy = 0; - ElementSize = 0; - } return CanSROA; } + /// \brief Query whether this slice is used by speculatable instructions after + /// rewriting. + /// + /// These instructions (PHIs and Selects currently) require the alloca slice + /// to run back through the rewriter. Thus, they are promotable, but not on + /// this iteration. This is distinct from a slice which is unpromotable for + /// some other reason, in which case we don't even want to perform the + /// speculation. This can be querried at any time and reflects whether (at + /// that point) a visit call has rewritten a speculatable instruction on the + /// current slice. + bool isUsedByRewrittenSpeculatableInstructions() const { + return IsUsedByRewrittenSpeculatableInstructions; + } + private: + // Make sure the other visit overloads are visible. + using Base::visit; + // Every instruction which can end up as a user must have a rewrite rule. bool visitInstruction(Instruction &I) { DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n"); llvm_unreachable("No rewrite rule for this instruction!"); } - Twine getName(const Twine &Suffix) { - return NamePrefix + Suffix; - } - - Value *getAdjustedAllocaPtr(IRBuilder<> &IRB, Type *PointerTy) { - assert(BeginOffset >= NewAllocaBeginOffset); - APInt Offset(TD.getPointerSizeInBits(), BeginOffset - NewAllocaBeginOffset); - return getAdjustedPtr(IRB, TD, &NewAI, Offset, PointerTy, getName("")); + Value *getAdjustedAllocaPtr(IRBuilderTy &IRB, uint64_t Offset, + Type *PointerTy) { + assert(Offset >= NewAllocaBeginOffset); + return getAdjustedPtr(IRB, DL, &NewAI, APInt(DL.getPointerSizeInBits(), + Offset - NewAllocaBeginOffset), + PointerTy); } /// \brief Compute suitable alignment to access an offset into the new alloca. unsigned getOffsetAlign(uint64_t Offset) { unsigned NewAIAlign = NewAI.getAlignment(); if (!NewAIAlign) - NewAIAlign = TD.getABITypeAlignment(NewAI.getAllocatedType()); + NewAIAlign = DL.getABITypeAlignment(NewAI.getAllocatedType()); return MinAlign(NewAIAlign, Offset); } - /// \brief Compute suitable alignment to access this partition of the new - /// alloca. - unsigned getPartitionAlign() { - return getOffsetAlign(BeginOffset - NewAllocaBeginOffset); - } - /// \brief Compute suitable alignment to access a type at an offset of the /// new alloca. /// @@ -2266,211 +2046,161 @@ private: /// otherwise returns the maximal suitable alignment. unsigned getOffsetTypeAlign(Type *Ty, uint64_t Offset) { unsigned Align = getOffsetAlign(Offset); - return Align == TD.getABITypeAlignment(Ty) ? 0 : Align; - } - - /// \brief Compute suitable alignment to access a type at the beginning of - /// this partition of the new alloca. - /// - /// See \c getOffsetTypeAlign for details; this routine delegates to it. - unsigned getPartitionTypeAlign(Type *Ty) { - return getOffsetTypeAlign(Ty, BeginOffset - NewAllocaBeginOffset); + return Align == DL.getABITypeAlignment(Ty) ? 0 : Align; } - ConstantInt *getIndex(IRBuilder<> &IRB, uint64_t Offset) { + unsigned getIndex(uint64_t Offset) { assert(VecTy && "Can only call getIndex when rewriting a vector"); uint64_t RelOffset = Offset - NewAllocaBeginOffset; assert(RelOffset / ElementSize < UINT32_MAX && "Index out of bounds"); uint32_t Index = RelOffset / ElementSize; assert(Index * ElementSize == RelOffset); - return IRB.getInt32(Index); - } - - Value *extractInteger(IRBuilder<> &IRB, IntegerType *TargetTy, - uint64_t Offset) { - assert(IntPromotionTy && "Alloca is not an integer we can extract from"); - Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), - getName(".load")); - assert(Offset >= NewAllocaBeginOffset && "Out of bounds offset"); - uint64_t RelOffset = Offset - NewAllocaBeginOffset; - assert(TD.getTypeStoreSize(TargetTy) + RelOffset <= - TD.getTypeStoreSize(IntPromotionTy) && - "Element load outside of alloca store"); - uint64_t ShAmt = 8*RelOffset; - if (TD.isBigEndian()) - ShAmt = 8*(TD.getTypeStoreSize(IntPromotionTy) - - TD.getTypeStoreSize(TargetTy) - RelOffset); - if (ShAmt) - V = IRB.CreateLShr(V, ShAmt, getName(".shift")); - if (TargetTy != IntPromotionTy) { - assert(TargetTy->getBitWidth() < IntPromotionTy->getBitWidth() && - "Cannot extract to a larger integer!"); - V = IRB.CreateTrunc(V, TargetTy, getName(".trunc")); - } - return V; - } - - StoreInst *insertInteger(IRBuilder<> &IRB, Value *V, uint64_t Offset) { - IntegerType *Ty = cast(V->getType()); - if (Ty == IntPromotionTy) - return IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment()); - - assert(Ty->getBitWidth() < IntPromotionTy->getBitWidth() && - "Cannot insert a larger integer!"); - V = IRB.CreateZExt(V, IntPromotionTy, getName(".ext")); - assert(Offset >= NewAllocaBeginOffset && "Out of bounds offset"); - uint64_t RelOffset = Offset - NewAllocaBeginOffset; - assert(TD.getTypeStoreSize(Ty) + RelOffset <= - TD.getTypeStoreSize(IntPromotionTy) && - "Element store outside of alloca store"); - uint64_t ShAmt = 8*RelOffset; - if (TD.isBigEndian()) - ShAmt = 8*(TD.getTypeStoreSize(IntPromotionTy) - TD.getTypeStoreSize(Ty) - - RelOffset); - if (ShAmt) - V = IRB.CreateShl(V, ShAmt, getName(".shift")); - - APInt Mask = ~Ty->getMask().zext(IntPromotionTy->getBitWidth()).shl(ShAmt); - Value *Old = IRB.CreateAnd(IRB.CreateAlignedLoad(&NewAI, - NewAI.getAlignment(), - getName(".oldload")), - Mask, getName(".mask")); - return IRB.CreateAlignedStore(IRB.CreateOr(Old, V, getName(".insert")), - &NewAI, NewAI.getAlignment()); + return Index; } void deleteIfTriviallyDead(Value *V) { Instruction *I = cast(V); if (isInstructionTriviallyDead(I)) - Pass.DeadInsts.push_back(I); - } - - /// \brief Test whether we can convert a value from the old to the new type. - /// - /// This predicate should be used to guard calls to convertValue in order to - /// ensure that we only try to convert viable values. The strategy is that we - /// will peel off single element struct and array wrappings to get to an - /// underlying value, and convert that value. - bool canConvertValue(Type *OldTy, Type *NewTy) { - if (OldTy == NewTy) - return true; - if (TD.getTypeSizeInBits(NewTy) != TD.getTypeSizeInBits(OldTy)) - return false; - if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType()) - return false; - - if (NewTy->isPointerTy() || OldTy->isPointerTy()) { - if (NewTy->isPointerTy() && OldTy->isPointerTy()) - return true; - if (NewTy->isIntegerTy() || OldTy->isIntegerTy()) - return true; - return false; - } - - return true; - } - - Value *convertValue(IRBuilder<> &IRB, Value *V, Type *Ty) { - assert(canConvertValue(V->getType(), Ty) && "Value not convertable to type"); - if (V->getType() == Ty) - return V; - if (V->getType()->isIntegerTy() && Ty->isPointerTy()) - return IRB.CreateIntToPtr(V, Ty); - if (V->getType()->isPointerTy() && Ty->isIntegerTy()) - return IRB.CreatePtrToInt(V, Ty); - - return IRB.CreateBitCast(V, Ty); + Pass.DeadInsts.insert(I); } - bool rewriteVectorizedLoadInst(IRBuilder<> &IRB, LoadInst &LI, Value *OldOp) { - Value *Result; - if (LI.getType() == VecTy->getElementType() || - BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset) { - Result = IRB.CreateExtractElement( - IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), getName(".load")), - getIndex(IRB, BeginOffset), getName(".extract")); - } else { - Result = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), - getName(".load")); - } - if (Result->getType() != LI.getType()) - Result = convertValue(IRB, Result, LI.getType()); - LI.replaceAllUsesWith(Result); - Pass.DeadInsts.push_back(&LI); + Value *rewriteVectorizedLoadInst(uint64_t NewBeginOffset, + uint64_t NewEndOffset) { + unsigned BeginIndex = getIndex(NewBeginOffset); + unsigned EndIndex = getIndex(NewEndOffset); + assert(EndIndex > BeginIndex && "Empty vector!"); - DEBUG(dbgs() << " to: " << *Result << "\n"); - return true; + Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), + "load"); + return extractVector(IRB, V, BeginIndex, EndIndex, "vec"); } - bool rewriteIntegerLoad(IRBuilder<> &IRB, LoadInst &LI) { + Value *rewriteIntegerLoad(LoadInst &LI, uint64_t NewBeginOffset, + uint64_t NewEndOffset) { + assert(IntTy && "We cannot insert an integer to the alloca"); assert(!LI.isVolatile()); - Value *Result = extractInteger(IRB, cast(LI.getType()), - BeginOffset); - LI.replaceAllUsesWith(Result); - Pass.DeadInsts.push_back(&LI); - DEBUG(dbgs() << " to: " << *Result << "\n"); - return true; + Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), + "load"); + V = convertValue(DL, IRB, V, IntTy); + assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); + uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; + if (Offset > 0 || NewEndOffset < NewAllocaEndOffset) + V = extractInteger(DL, IRB, V, cast(LI.getType()), Offset, + "extract"); + return V; } bool visitLoadInst(LoadInst &LI) { DEBUG(dbgs() << " original: " << LI << "\n"); Value *OldOp = LI.getOperand(0); assert(OldOp == OldPtr); - IRBuilder<> IRB(&LI); - if (VecTy) - return rewriteVectorizedLoadInst(IRB, LI, OldOp); - if (IntPromotionTy) - return rewriteIntegerLoad(IRB, LI); - - if (BeginOffset == NewAllocaBeginOffset && - canConvertValue(NewAllocaTy, LI.getType())) { - Value *NewLI = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), - LI.isVolatile(), getName(".load")); - Value *NewV = convertValue(IRB, NewLI, LI.getType()); - LI.replaceAllUsesWith(NewV); - Pass.DeadInsts.push_back(&LI); - - DEBUG(dbgs() << " to: " << *NewLI << "\n"); - return !LI.isVolatile(); - } + // Compute the intersecting offset range. + assert(BeginOffset < NewAllocaEndOffset); + assert(EndOffset > NewAllocaBeginOffset); + uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); + uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); - Value *NewPtr = getAdjustedAllocaPtr(IRB, - LI.getPointerOperand()->getType()); - LI.setOperand(0, NewPtr); - LI.setAlignment(getPartitionTypeAlign(LI.getType())); - DEBUG(dbgs() << " to: " << LI << "\n"); + uint64_t Size = NewEndOffset - NewBeginOffset; - deleteIfTriviallyDead(OldOp); - return NewPtr == &NewAI && !LI.isVolatile(); - } + Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), Size * 8) + : LI.getType(); + bool IsPtrAdjusted = false; + Value *V; + if (VecTy) { + V = rewriteVectorizedLoadInst(NewBeginOffset, NewEndOffset); + } else if (IntTy && LI.getType()->isIntegerTy()) { + V = rewriteIntegerLoad(LI, NewBeginOffset, NewEndOffset); + } else if (NewBeginOffset == NewAllocaBeginOffset && + canConvertValue(DL, NewAllocaTy, LI.getType())) { + V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), + LI.isVolatile(), "load"); + } else { + Type *LTy = TargetTy->getPointerTo(); + V = IRB.CreateAlignedLoad( + getAdjustedAllocaPtr(IRB, NewBeginOffset, LTy), + getOffsetTypeAlign(TargetTy, NewBeginOffset - NewAllocaBeginOffset), + LI.isVolatile(), "load"); + IsPtrAdjusted = true; + } + V = convertValue(DL, IRB, V, TargetTy); + + if (IsSplit) { + assert(!LI.isVolatile()); + assert(LI.getType()->isIntegerTy() && + "Only integer type loads and stores are split"); + assert(Size < DL.getTypeStoreSize(LI.getType()) && + "Split load isn't smaller than original load"); + assert(LI.getType()->getIntegerBitWidth() == + DL.getTypeStoreSizeInBits(LI.getType()) && + "Non-byte-multiple bit width"); + // Move the insertion point just past the load so that we can refer to it. + IRB.SetInsertPoint(llvm::next(BasicBlock::iterator(&LI))); + // Create a placeholder value with the same type as LI to use as the + // basis for the new value. This allows us to replace the uses of LI with + // the computed value, and then replace the placeholder with LI, leaving + // LI only used for this computation. + Value *Placeholder + = new LoadInst(UndefValue::get(LI.getType()->getPointerTo())); + V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset, + "insert"); + LI.replaceAllUsesWith(V); + Placeholder->replaceAllUsesWith(&LI); + delete Placeholder; + } else { + LI.replaceAllUsesWith(V); + } - bool rewriteVectorizedStoreInst(IRBuilder<> &IRB, StoreInst &SI, - Value *OldOp) { - Value *V = SI.getValueOperand(); - if (V->getType() == ElementTy || - BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset) { - if (V->getType() != ElementTy) - V = convertValue(IRB, V, ElementTy); - LoadInst *LI = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), - getName(".load")); - V = IRB.CreateInsertElement(LI, V, getIndex(IRB, BeginOffset), - getName(".insert")); - } else if (V->getType() != VecTy) { - V = convertValue(IRB, V, VecTy); + Pass.DeadInsts.insert(&LI); + deleteIfTriviallyDead(OldOp); + DEBUG(dbgs() << " to: " << *V << "\n"); + return !LI.isVolatile() && !IsPtrAdjusted; + } + + bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp, + uint64_t NewBeginOffset, + uint64_t NewEndOffset) { + if (V->getType() != VecTy) { + unsigned BeginIndex = getIndex(NewBeginOffset); + unsigned EndIndex = getIndex(NewEndOffset); + assert(EndIndex > BeginIndex && "Empty vector!"); + unsigned NumElements = EndIndex - BeginIndex; + assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); + Type *SliceTy = + (NumElements == 1) ? ElementTy + : VectorType::get(ElementTy, NumElements); + if (V->getType() != SliceTy) + V = convertValue(DL, IRB, V, SliceTy); + + // Mix in the existing elements. + Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), + "load"); + V = insertVector(IRB, Old, V, BeginIndex, "vec"); } StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment()); - Pass.DeadInsts.push_back(&SI); + Pass.DeadInsts.insert(&SI); (void)Store; DEBUG(dbgs() << " to: " << *Store << "\n"); return true; } - bool rewriteIntegerStore(IRBuilder<> &IRB, StoreInst &SI) { + bool rewriteIntegerStore(Value *V, StoreInst &SI, + uint64_t NewBeginOffset, uint64_t NewEndOffset) { + assert(IntTy && "We cannot extract an integer from the alloca"); assert(!SI.isVolatile()); - StoreInst *Store = insertInteger(IRB, SI.getValueOperand(), BeginOffset); - Pass.DeadInsts.push_back(&SI); + if (DL.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) { + Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), + "oldload"); + Old = convertValue(DL, IRB, Old, IntTy); + assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); + uint64_t Offset = BeginOffset - NewAllocaBeginOffset; + V = insertInteger(DL, IRB, Old, SI.getValueOperand(), Offset, + "insert"); + } + V = convertValue(DL, IRB, V, NewAllocaTy); + StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment()); + Pass.DeadInsts.insert(&SI); (void)Store; DEBUG(dbgs() << " to: " << *Store << "\n"); return true; @@ -2480,80 +2210,141 @@ private: DEBUG(dbgs() << " original: " << SI << "\n"); Value *OldOp = SI.getOperand(1); assert(OldOp == OldPtr); - IRBuilder<> IRB(&SI); - - if (VecTy) - return rewriteVectorizedStoreInst(IRB, SI, OldOp); - if (IntPromotionTy) - return rewriteIntegerStore(IRB, SI); - Type *ValueTy = SI.getValueOperand()->getType(); + Value *V = SI.getValueOperand(); // Strip all inbounds GEPs and pointer casts to try to dig out any root // alloca that should be re-examined after promoting this alloca. - if (ValueTy->isPointerTy()) - if (AllocaInst *AI = dyn_cast(SI.getValueOperand() - ->stripInBoundsOffsets())) + if (V->getType()->isPointerTy()) + if (AllocaInst *AI = dyn_cast(V->stripInBoundsOffsets())) Pass.PostPromotionWorklist.insert(AI); - if (BeginOffset == NewAllocaBeginOffset && - canConvertValue(ValueTy, NewAllocaTy)) { - Value *NewV = convertValue(IRB, SI.getValueOperand(), NewAllocaTy); - StoreInst *NewSI = IRB.CreateAlignedStore(NewV, &NewAI, NewAI.getAlignment(), - SI.isVolatile()); - (void)NewSI; - Pass.DeadInsts.push_back(&SI); - - DEBUG(dbgs() << " to: " << *NewSI << "\n"); - return !SI.isVolatile(); + // Compute the intersecting offset range. + assert(BeginOffset < NewAllocaEndOffset); + assert(EndOffset > NewAllocaBeginOffset); + uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); + uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); + + uint64_t Size = NewEndOffset - NewBeginOffset; + if (Size < DL.getTypeStoreSize(V->getType())) { + assert(!SI.isVolatile()); + assert(V->getType()->isIntegerTy() && + "Only integer type loads and stores are split"); + assert(V->getType()->getIntegerBitWidth() == + DL.getTypeStoreSizeInBits(V->getType()) && + "Non-byte-multiple bit width"); + IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), Size * 8); + V = extractInteger(DL, IRB, V, NarrowTy, NewBeginOffset, + "extract"); } - Value *NewPtr = getAdjustedAllocaPtr(IRB, - SI.getPointerOperand()->getType()); - SI.setOperand(1, NewPtr); - SI.setAlignment(getPartitionTypeAlign(SI.getValueOperand()->getType())); - DEBUG(dbgs() << " to: " << SI << "\n"); - + if (VecTy) + return rewriteVectorizedStoreInst(V, SI, OldOp, NewBeginOffset, + NewEndOffset); + if (IntTy && V->getType()->isIntegerTy()) + return rewriteIntegerStore(V, SI, NewBeginOffset, NewEndOffset); + + StoreInst *NewSI; + if (NewBeginOffset == NewAllocaBeginOffset && + NewEndOffset == NewAllocaEndOffset && + canConvertValue(DL, V->getType(), NewAllocaTy)) { + V = convertValue(DL, IRB, V, NewAllocaTy); + NewSI = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(), + SI.isVolatile()); + } else { + Value *NewPtr = getAdjustedAllocaPtr(IRB, NewBeginOffset, + V->getType()->getPointerTo()); + NewSI = IRB.CreateAlignedStore( + V, NewPtr, getOffsetTypeAlign( + V->getType(), NewBeginOffset - NewAllocaBeginOffset), + SI.isVolatile()); + } + (void)NewSI; + Pass.DeadInsts.insert(&SI); deleteIfTriviallyDead(OldOp); - return NewPtr == &NewAI && !SI.isVolatile(); + + DEBUG(dbgs() << " to: " << *NewSI << "\n"); + return NewSI->getPointerOperand() == &NewAI && !SI.isVolatile(); + } + + /// \brief Compute an integer value from splatting an i8 across the given + /// number of bytes. + /// + /// Note that this routine assumes an i8 is a byte. If that isn't true, don't + /// call this routine. + /// FIXME: Heed the advice above. + /// + /// \param V The i8 value to splat. + /// \param Size The number of bytes in the output (assuming i8 is one byte) + Value *getIntegerSplat(Value *V, unsigned Size) { + assert(Size > 0 && "Expected a positive number of bytes."); + IntegerType *VTy = cast(V->getType()); + assert(VTy->getBitWidth() == 8 && "Expected an i8 value for the byte"); + if (Size == 1) + return V; + + Type *SplatIntTy = Type::getIntNTy(VTy->getContext(), Size*8); + V = IRB.CreateMul(IRB.CreateZExt(V, SplatIntTy, "zext"), + ConstantExpr::getUDiv( + Constant::getAllOnesValue(SplatIntTy), + ConstantExpr::getZExt( + Constant::getAllOnesValue(V->getType()), + SplatIntTy)), + "isplat"); + return V; + } + + /// \brief Compute a vector splat for a given element value. + Value *getVectorSplat(Value *V, unsigned NumElements) { + V = IRB.CreateVectorSplat(NumElements, V, "vsplat"); + DEBUG(dbgs() << " splat: " << *V << "\n"); + return V; } bool visitMemSetInst(MemSetInst &II) { DEBUG(dbgs() << " original: " << II << "\n"); - IRBuilder<> IRB(&II); assert(II.getRawDest() == OldPtr); // If the memset has a variable size, it cannot be split, just adjust the // pointer to the new alloca. if (!isa(II.getLength())) { - II.setDest(getAdjustedAllocaPtr(IRB, II.getRawDest()->getType())); + assert(!IsSplit); + assert(BeginOffset >= NewAllocaBeginOffset); + II.setDest( + getAdjustedAllocaPtr(IRB, BeginOffset, II.getRawDest()->getType())); Type *CstTy = II.getAlignmentCst()->getType(); - II.setAlignment(ConstantInt::get(CstTy, getPartitionAlign())); + II.setAlignment(ConstantInt::get(CstTy, getOffsetAlign(BeginOffset))); deleteIfTriviallyDead(OldPtr); return false; } // Record this instruction for deletion. - if (Pass.DeadSplitInsts.insert(&II)) - Pass.DeadInsts.push_back(&II); + Pass.DeadInsts.insert(&II); Type *AllocaTy = NewAI.getAllocatedType(); Type *ScalarTy = AllocaTy->getScalarType(); + // Compute the intersecting offset range. + assert(BeginOffset < NewAllocaEndOffset); + assert(EndOffset > NewAllocaBeginOffset); + uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); + uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); + uint64_t SliceOffset = NewBeginOffset - NewAllocaBeginOffset; + // If this doesn't map cleanly onto the alloca type, and that type isn't // a single value type, just emit a memset. - if (!VecTy && (BeginOffset != NewAllocaBeginOffset || - EndOffset != NewAllocaEndOffset || - !AllocaTy->isSingleValueType() || - !TD.isLegalInteger(TD.getTypeSizeInBits(ScalarTy)))) { + if (!VecTy && !IntTy && + (BeginOffset > NewAllocaBeginOffset || + EndOffset < NewAllocaEndOffset || + !AllocaTy->isSingleValueType() || + !DL.isLegalInteger(DL.getTypeSizeInBits(ScalarTy)) || + DL.getTypeSizeInBits(ScalarTy)%8 != 0)) { Type *SizeTy = II.getLength()->getType(); - Constant *Size = ConstantInt::get(SizeTy, EndOffset - BeginOffset); - CallInst *New - = IRB.CreateMemSet(getAdjustedAllocaPtr(IRB, - II.getRawDest()->getType()), - II.getValue(), Size, getPartitionAlign(), - II.isVolatile()); + Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); + CallInst *New = IRB.CreateMemSet( + getAdjustedAllocaPtr(IRB, NewBeginOffset, II.getRawDest()->getType()), + II.getValue(), Size, getOffsetAlign(SliceOffset), II.isVolatile()); (void)New; DEBUG(dbgs() << " to: " << *New << "\n"); return false; @@ -2562,56 +2353,59 @@ private: // If we can represent this as a simple value, we have to build the actual // value to store, which requires expanding the byte present in memset to // a sensible representation for the alloca type. This is essentially - // splatting the byte to a sufficiently wide integer, bitcasting to the - // desired scalar type, and splatting it across any desired vector type. - Value *V = II.getValue(); - IntegerType *VTy = cast(V->getType()); - Type *IntTy = Type::getIntNTy(VTy->getContext(), - TD.getTypeSizeInBits(ScalarTy)); - if (TD.getTypeSizeInBits(ScalarTy) > VTy->getBitWidth()) - V = IRB.CreateMul(IRB.CreateZExt(V, IntTy, getName(".zext")), - ConstantExpr::getUDiv( - Constant::getAllOnesValue(IntTy), - ConstantExpr::getZExt( - Constant::getAllOnesValue(V->getType()), - IntTy)), - getName(".isplat")); - if (V->getType() != ScalarTy) { - if (ScalarTy->isPointerTy()) - V = IRB.CreateIntToPtr(V, ScalarTy); - else if (ScalarTy->isPrimitiveType() || ScalarTy->isVectorTy()) - V = IRB.CreateBitCast(V, ScalarTy); - else if (ScalarTy->isIntegerTy()) - llvm_unreachable("Computed different integer types with equal widths"); - else - llvm_unreachable("Invalid scalar type"); - } + // splatting the byte to a sufficiently wide integer, splatting it across + // any desired vector width, and bitcasting to the final type. + Value *V; - // If this is an element-wide memset of a vectorizable alloca, insert it. - if (VecTy && (BeginOffset > NewAllocaBeginOffset || - EndOffset < NewAllocaEndOffset)) { - StoreInst *Store = IRB.CreateAlignedStore( - IRB.CreateInsertElement(IRB.CreateAlignedLoad(&NewAI, - NewAI.getAlignment(), - getName(".load")), - V, getIndex(IRB, BeginOffset), - getName(".insert")), - &NewAI, NewAI.getAlignment()); - (void)Store; - DEBUG(dbgs() << " to: " << *Store << "\n"); - return true; - } + if (VecTy) { + // If this is a memset of a vectorized alloca, insert it. + assert(ElementTy == ScalarTy); + + unsigned BeginIndex = getIndex(NewBeginOffset); + unsigned EndIndex = getIndex(NewEndOffset); + assert(EndIndex > BeginIndex && "Empty vector!"); + unsigned NumElements = EndIndex - BeginIndex; + assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); + + Value *Splat = + getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ElementTy) / 8); + Splat = convertValue(DL, IRB, Splat, ElementTy); + if (NumElements > 1) + Splat = getVectorSplat(Splat, NumElements); + + Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), + "oldload"); + V = insertVector(IRB, Old, Splat, BeginIndex, "vec"); + } else if (IntTy) { + // If this is a memset on an alloca where we can widen stores, insert the + // set integer. + assert(!II.isVolatile()); + + uint64_t Size = NewEndOffset - NewBeginOffset; + V = getIntegerSplat(II.getValue(), Size); + + if (IntTy && (BeginOffset != NewAllocaBeginOffset || + EndOffset != NewAllocaBeginOffset)) { + Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), + "oldload"); + Old = convertValue(DL, IRB, Old, IntTy); + uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; + V = insertInteger(DL, IRB, Old, V, Offset, "insert"); + } else { + assert(V->getType() == IntTy && + "Wrong type for an alloca wide integer!"); + } + V = convertValue(DL, IRB, V, AllocaTy); + } else { + // Established these invariants above. + assert(NewBeginOffset == NewAllocaBeginOffset); + assert(NewEndOffset == NewAllocaEndOffset); + + V = getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ScalarTy) / 8); + if (VectorType *AllocaVecTy = dyn_cast(AllocaTy)) + V = getVectorSplat(V, AllocaVecTy->getNumElements()); - // Splat to a vector if needed. - if (VectorType *VecTy = dyn_cast(AllocaTy)) { - VectorType *SplatSourceTy = VectorType::get(V->getType(), 1); - V = IRB.CreateShuffleVector( - IRB.CreateInsertElement(UndefValue::get(SplatSourceTy), V, - IRB.getInt32(0), getName(".vsplat.insert")), - UndefValue::get(SplatSourceTy), - ConstantVector::getSplat(VecTy->getNumElements(), IRB.getInt32(0)), - getName(".vsplat.shuffle")); - assert(V->getType() == VecTy); + V = convertValue(DL, IRB, V, AllocaTy); } Value *New = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(), @@ -2626,23 +2420,26 @@ private: // them into two categories: split intrinsics and unsplit intrinsics. DEBUG(dbgs() << " original: " << II << "\n"); - IRBuilder<> IRB(&II); + + // Compute the intersecting offset range. + assert(BeginOffset < NewAllocaEndOffset); + assert(EndOffset > NewAllocaBeginOffset); + uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); + uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); assert(II.getRawSource() == OldPtr || II.getRawDest() == OldPtr); bool IsDest = II.getRawDest() == OldPtr; - const AllocaPartitioning::MemTransferOffsets &MTO - = P.getMemTransferOffsets(II); - // Compute the relative offset within the transfer. - unsigned IntPtrWidth = TD.getPointerSizeInBits(); - APInt RelOffset(IntPtrWidth, BeginOffset - (IsDest ? MTO.DestBegin - : MTO.SourceBegin)); + unsigned IntPtrWidth = DL.getPointerSizeInBits(); + APInt RelOffset(IntPtrWidth, NewBeginOffset - BeginOffset); unsigned Align = II.getAlignment(); + uint64_t SliceOffset = NewBeginOffset - NewAllocaBeginOffset; if (Align > 1) - Align = MinAlign(RelOffset.zextOrTrunc(64).getZExtValue(), - MinAlign(II.getAlignment(), getPartitionAlign())); + Align = + MinAlign(RelOffset.zextOrTrunc(64).getZExtValue(), + MinAlign(II.getAlignment(), getOffsetAlign(SliceOffset))); // For unsplit intrinsics, we simply modify the source and destination // pointers in place. This isn't just an optimization, it is a matter of @@ -2651,12 +2448,14 @@ private: // a variable length. We may also be dealing with memmove instead of // memcpy, and so simply updating the pointers is the necessary for us to // update both source and dest of a single call. - if (!MTO.IsSplittable) { + if (!IsSplittable) { Value *OldOp = IsDest ? II.getRawDest() : II.getRawSource(); if (IsDest) - II.setDest(getAdjustedAllocaPtr(IRB, II.getRawDest()->getType())); + II.setDest( + getAdjustedAllocaPtr(IRB, BeginOffset, II.getRawDest()->getType())); else - II.setSource(getAdjustedAllocaPtr(IRB, II.getRawSource()->getType())); + II.setSource(getAdjustedAllocaPtr(IRB, BeginOffset, + II.getRawSource()->getType())); Type *CstTy = II.getAlignmentCst()->getType(); II.setAlignment(ConstantInt::get(CstTy, Align)); @@ -2674,57 +2473,46 @@ private: // If this doesn't map cleanly onto the alloca type, and that type isn't // a single value type, just emit a memcpy. bool EmitMemCpy - = !VecTy && (BeginOffset != NewAllocaBeginOffset || - EndOffset != NewAllocaEndOffset || - !NewAI.getAllocatedType()->isSingleValueType()); + = !VecTy && !IntTy && (BeginOffset > NewAllocaBeginOffset || + EndOffset < NewAllocaEndOffset || + !NewAI.getAllocatedType()->isSingleValueType()); // If we're just going to emit a memcpy, the alloca hasn't changed, and the // size hasn't been shrunk based on analysis of the viable range, this is // a no-op. if (EmitMemCpy && &OldAI == &NewAI) { - uint64_t OrigBegin = IsDest ? MTO.DestBegin : MTO.SourceBegin; - uint64_t OrigEnd = IsDest ? MTO.DestEnd : MTO.SourceEnd; // Ensure the start lines up. - assert(BeginOffset == OrigBegin); - (void)OrigBegin; + assert(NewBeginOffset == BeginOffset); // Rewrite the size as needed. - if (EndOffset != OrigEnd) + if (NewEndOffset != EndOffset) II.setLength(ConstantInt::get(II.getLength()->getType(), - EndOffset - BeginOffset)); + NewEndOffset - NewBeginOffset)); return false; } // Record this instruction for deletion. - if (Pass.DeadSplitInsts.insert(&II)) - Pass.DeadInsts.push_back(&II); - - bool IsVectorElement = VecTy && (BeginOffset > NewAllocaBeginOffset || - EndOffset < NewAllocaEndOffset); - - Type *OtherPtrTy = IsDest ? II.getRawSource()->getType() - : II.getRawDest()->getType(); - if (!EmitMemCpy) - OtherPtrTy = IsVectorElement ? VecTy->getElementType()->getPointerTo() - : NewAI.getType(); - - // Compute the other pointer, folding as much as possible to produce - // a single, simple GEP in most cases. - Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest(); - OtherPtr = getAdjustedPtr(IRB, TD, OtherPtr, RelOffset, OtherPtrTy, - getName("." + OtherPtr->getName())); + Pass.DeadInsts.insert(&II); // Strip all inbounds GEPs and pointer casts to try to dig out any root // alloca that should be re-examined after rewriting this instruction. + Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest(); if (AllocaInst *AI = dyn_cast(OtherPtr->stripInBoundsOffsets())) Pass.Worklist.insert(AI); if (EmitMemCpy) { - Value *OurPtr - = getAdjustedAllocaPtr(IRB, IsDest ? II.getRawDest()->getType() - : II.getRawSource()->getType()); + Type *OtherPtrTy = IsDest ? II.getRawSource()->getType() + : II.getRawDest()->getType(); + + // Compute the other pointer, folding as much as possible to produce + // a single, simple GEP in most cases. + OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, RelOffset, OtherPtrTy); + + Value *OurPtr = getAdjustedAllocaPtr( + IRB, NewBeginOffset, + IsDest ? II.getRawDest()->getType() : II.getRawSource()->getType()); Type *SizeTy = II.getLength()->getType(); - Constant *Size = ConstantInt::get(SizeTy, EndOffset - BeginOffset); + Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); CallInst *New = IRB.CreateMemCpy(IsDest ? OurPtr : OtherPtr, IsDest ? OtherPtr : OurPtr, @@ -2740,29 +2528,59 @@ private: if (!Align) Align = 1; - Value *SrcPtr = OtherPtr; + bool IsWholeAlloca = NewBeginOffset == NewAllocaBeginOffset && + NewEndOffset == NewAllocaEndOffset; + uint64_t Size = NewEndOffset - NewBeginOffset; + unsigned BeginIndex = VecTy ? getIndex(NewBeginOffset) : 0; + unsigned EndIndex = VecTy ? getIndex(NewEndOffset) : 0; + unsigned NumElements = EndIndex - BeginIndex; + IntegerType *SubIntTy + = IntTy ? Type::getIntNTy(IntTy->getContext(), Size*8) : 0; + + Type *OtherPtrTy = NewAI.getType(); + if (VecTy && !IsWholeAlloca) { + if (NumElements == 1) + OtherPtrTy = VecTy->getElementType(); + else + OtherPtrTy = VectorType::get(VecTy->getElementType(), NumElements); + + OtherPtrTy = OtherPtrTy->getPointerTo(); + } else if (IntTy && !IsWholeAlloca) { + OtherPtrTy = SubIntTy->getPointerTo(); + } + + Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, RelOffset, OtherPtrTy); Value *DstPtr = &NewAI; if (!IsDest) std::swap(SrcPtr, DstPtr); Value *Src; - if (IsVectorElement && !IsDest) { - // We have to extract rather than load. - Src = IRB.CreateExtractElement( - IRB.CreateAlignedLoad(SrcPtr, Align, getName(".copyload")), - getIndex(IRB, BeginOffset), - getName(".copyextract")); + if (VecTy && !IsWholeAlloca && !IsDest) { + Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), + "load"); + Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec"); + } else if (IntTy && !IsWholeAlloca && !IsDest) { + Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), + "load"); + Src = convertValue(DL, IRB, Src, IntTy); + uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; + Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract"); } else { Src = IRB.CreateAlignedLoad(SrcPtr, Align, II.isVolatile(), - getName(".copyload")); + "copyload"); } - if (IsVectorElement && IsDest) { - // We have to insert into a loaded copy before storing. - Src = IRB.CreateInsertElement( - IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), getName(".load")), - Src, getIndex(IRB, BeginOffset), - getName(".insert")); + if (VecTy && !IsWholeAlloca && IsDest) { + Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), + "oldload"); + Src = insertVector(IRB, Old, Src, BeginIndex, "vec"); + } else if (IntTy && !IsWholeAlloca && IsDest) { + Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), + "oldload"); + Old = convertValue(DL, IRB, Old, IntTy); + uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; + Src = insertInteger(DL, IRB, Old, Src, Offset, "insert"); + Src = convertValue(DL, IRB, Src, NewAllocaTy); } StoreInst *Store = cast( @@ -2776,64 +2594,93 @@ private: assert(II.getIntrinsicID() == Intrinsic::lifetime_start || II.getIntrinsicID() == Intrinsic::lifetime_end); DEBUG(dbgs() << " original: " << II << "\n"); - IRBuilder<> IRB(&II); assert(II.getArgOperand(1) == OldPtr); + // Compute the intersecting offset range. + assert(BeginOffset < NewAllocaEndOffset); + assert(EndOffset > NewAllocaBeginOffset); + uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); + uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); + // Record this instruction for deletion. - if (Pass.DeadSplitInsts.insert(&II)) - Pass.DeadInsts.push_back(&II); + Pass.DeadInsts.insert(&II); ConstantInt *Size = ConstantInt::get(cast(II.getArgOperand(0)->getType()), - EndOffset - BeginOffset); - Value *Ptr = getAdjustedAllocaPtr(IRB, II.getArgOperand(1)->getType()); + NewEndOffset - NewBeginOffset); + Value *Ptr = + getAdjustedAllocaPtr(IRB, NewBeginOffset, II.getArgOperand(1)->getType()); Value *New; if (II.getIntrinsicID() == Intrinsic::lifetime_start) New = IRB.CreateLifetimeStart(Ptr, Size); else New = IRB.CreateLifetimeEnd(Ptr, Size); + (void)New; DEBUG(dbgs() << " to: " << *New << "\n"); return true; } bool visitPHINode(PHINode &PN) { DEBUG(dbgs() << " original: " << PN << "\n"); + assert(BeginOffset >= NewAllocaBeginOffset && "PHIs are unsplittable"); + assert(EndOffset <= NewAllocaEndOffset && "PHIs are unsplittable"); // We would like to compute a new pointer in only one place, but have it be // as local as possible to the PHI. To do that, we re-use the location of // the old pointer, which necessarily must be in the right position to // dominate the PHI. - IRBuilder<> PtrBuilder(cast(OldPtr)); + IRBuilderTy PtrBuilder(OldPtr); + PtrBuilder.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) + + "."); - Value *NewPtr = getAdjustedAllocaPtr(PtrBuilder, OldPtr->getType()); + Value *NewPtr = + getAdjustedAllocaPtr(PtrBuilder, BeginOffset, OldPtr->getType()); // Replace the operands which were using the old pointer. - User::op_iterator OI = PN.op_begin(), OE = PN.op_end(); - for (; OI != OE; ++OI) - if (*OI == OldPtr) - *OI = NewPtr; + std::replace(PN.op_begin(), PN.op_end(), cast(OldPtr), NewPtr); DEBUG(dbgs() << " to: " << PN << "\n"); deleteIfTriviallyDead(OldPtr); - return false; + + // Check whether we can speculate this PHI node, and if so remember that + // fact and queue it up for another iteration after the speculation + // occurs. + if (isSafePHIToSpeculate(PN, &DL)) { + Pass.SpeculatablePHIs.insert(&PN); + IsUsedByRewrittenSpeculatableInstructions = true; + return true; + } + + return false; // PHIs can't be promoted on their own. } bool visitSelectInst(SelectInst &SI) { DEBUG(dbgs() << " original: " << SI << "\n"); - IRBuilder<> IRB(&SI); + assert((SI.getTrueValue() == OldPtr || SI.getFalseValue() == OldPtr) && + "Pointer isn't an operand!"); + assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable"); + assert(EndOffset <= NewAllocaEndOffset && "Selects are unsplittable"); - // Find the operand we need to rewrite here. - bool IsTrueVal = SI.getTrueValue() == OldPtr; - if (IsTrueVal) - assert(SI.getFalseValue() != OldPtr && "Pointer is both operands!"); - else - assert(SI.getFalseValue() == OldPtr && "Pointer isn't an operand!"); + Value *NewPtr = getAdjustedAllocaPtr(IRB, BeginOffset, OldPtr->getType()); + // Replace the operands which were using the old pointer. + if (SI.getOperand(1) == OldPtr) + SI.setOperand(1, NewPtr); + if (SI.getOperand(2) == OldPtr) + SI.setOperand(2, NewPtr); - Value *NewPtr = getAdjustedAllocaPtr(IRB, OldPtr->getType()); - SI.setOperand(IsTrueVal ? 1 : 2, NewPtr); DEBUG(dbgs() << " to: " << SI << "\n"); deleteIfTriviallyDead(OldPtr); - return false; + + // Check whether we can speculate this select instruction, and if so + // remember that fact and queue it up for another iteration after the + // speculation occurs. + if (isSafeSelectToSpeculate(SI, &DL)) { + Pass.SpeculatableSelects.insert(&SI); + IsUsedByRewrittenSpeculatableInstructions = true; + return true; + } + + return false; // Selects can't be promoted on their own. } }; @@ -2849,7 +2696,7 @@ class AggLoadStoreRewriter : public InstVisitor { // Befriend the base class so it can delegate to private visit methods. friend class llvm::InstVisitor; - const DataLayout &TD; + const DataLayout &DL; /// Queue of pointer uses to analyze and potentially rewrite. SmallVector Queue; @@ -2862,7 +2709,7 @@ class AggLoadStoreRewriter : public InstVisitor { Use *U; public: - AggLoadStoreRewriter(const DataLayout &TD) : TD(TD) {} + AggLoadStoreRewriter(const DataLayout &DL) : DL(DL) {} /// Rewrite loads and stores through a pointer and all pointers derived from /// it. @@ -2895,7 +2742,7 @@ private: class OpSplitter { protected: /// The builder used to form new instructions. - IRBuilder<> IRB; + IRBuilderTy IRB; /// The indices which to be used with insert- or extractvalue to select the /// appropriate value within the aggregate. SmallVector Indices; @@ -2972,9 +2819,8 @@ private: void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) { assert(Ty->isSingleValueType()); // Load the single value and insert it using the indices. - Value *Load = IRB.CreateLoad(IRB.CreateInBoundsGEP(Ptr, GEPIndices, - Name + ".gep"), - Name + ".load"); + Value *GEP = IRB.CreateInBoundsGEP(Ptr, GEPIndices, Name + ".gep"); + Value *Load = IRB.CreateLoad(GEP, Name + ".load"); Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert"); DEBUG(dbgs() << " to: " << *Load << "\n"); } @@ -3049,6 +2895,36 @@ private: }; } +/// \brief Strip aggregate type wrapping. +/// +/// This removes no-op aggregate types wrapping an underlying type. It will +/// strip as many layers of types as it can without changing either the type +/// size or the allocated size. +static Type *stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty) { + if (Ty->isSingleValueType()) + return Ty; + + uint64_t AllocSize = DL.getTypeAllocSize(Ty); + uint64_t TypeSize = DL.getTypeSizeInBits(Ty); + + Type *InnerTy; + if (ArrayType *ArrTy = dyn_cast(Ty)) { + InnerTy = ArrTy->getElementType(); + } else if (StructType *STy = dyn_cast(Ty)) { + const StructLayout *SL = DL.getStructLayout(STy); + unsigned Index = SL->getElementContainingOffset(0); + InnerTy = STy->getElementType(Index); + } else { + return Ty; + } + + if (AllocSize > DL.getTypeAllocSize(InnerTy) || + TypeSize > DL.getTypeSizeInBits(InnerTy)) + return Ty; + + return stripAggregateTypeWrapping(DL, InnerTy); +} + /// \brief Try to find a partition of the aggregate type passed in for a given /// offset and size. /// @@ -3062,10 +2938,13 @@ private: /// when the size or offset cause either end of type-based partition to be off. /// Also, this is a best-effort routine. It is reasonable to give up and not /// return a type if necessary. -static Type *getTypePartition(const DataLayout &TD, Type *Ty, +static Type *getTypePartition(const DataLayout &DL, Type *Ty, uint64_t Offset, uint64_t Size) { - if (Offset == 0 && TD.getTypeAllocSize(Ty) == Size) - return Ty; + if (Offset == 0 && DL.getTypeAllocSize(Ty) == Size) + return stripAggregateTypeWrapping(DL, Ty); + if (Offset > DL.getTypeAllocSize(Ty) || + (DL.getTypeAllocSize(Ty) - Offset) < Size) + return 0; if (SequentialType *SeqTy = dyn_cast(Ty)) { // We can't partition pointers... @@ -3073,14 +2952,15 @@ static Type *getTypePartition(const DataLayout &TD, Type *Ty, return 0; Type *ElementTy = SeqTy->getElementType(); - uint64_t ElementSize = TD.getTypeAllocSize(ElementTy); + uint64_t ElementSize = DL.getTypeAllocSize(ElementTy); uint64_t NumSkippedElements = Offset / ElementSize; - if (ArrayType *ArrTy = dyn_cast(SeqTy)) + if (ArrayType *ArrTy = dyn_cast(SeqTy)) { if (NumSkippedElements >= ArrTy->getNumElements()) return 0; - if (VectorType *VecTy = dyn_cast(SeqTy)) + } else if (VectorType *VecTy = dyn_cast(SeqTy)) { if (NumSkippedElements >= VecTy->getNumElements()) return 0; + } Offset -= NumSkippedElements * ElementSize; // First check if we need to recurse. @@ -3089,12 +2969,12 @@ static Type *getTypePartition(const DataLayout &TD, Type *Ty, if ((Offset + Size) > ElementSize) return 0; // Recurse through the element type trying to peel off offset bytes. - return getTypePartition(TD, ElementTy, Offset, Size); + return getTypePartition(DL, ElementTy, Offset, Size); } assert(Offset == 0); if (Size == ElementSize) - return ElementTy; + return stripAggregateTypeWrapping(DL, ElementTy); assert(Size > ElementSize); uint64_t NumElements = Size / ElementSize; if (NumElements * ElementSize != Size) @@ -3106,7 +2986,7 @@ static Type *getTypePartition(const DataLayout &TD, Type *Ty, if (!STy) return 0; - const StructLayout *SL = TD.getStructLayout(STy); + const StructLayout *SL = DL.getStructLayout(STy); if (Offset >= SL->getSizeInBytes()) return 0; uint64_t EndOffset = Offset + Size; @@ -3117,7 +2997,7 @@ static Type *getTypePartition(const DataLayout &TD, Type *Ty, Offset -= SL->getElementOffset(Index); Type *ElementTy = STy->getElementType(Index); - uint64_t ElementSize = TD.getTypeAllocSize(ElementTy); + uint64_t ElementSize = DL.getTypeAllocSize(ElementTy); if (Offset >= ElementSize) return 0; // The offset points into alignment padding. @@ -3125,12 +3005,12 @@ static Type *getTypePartition(const DataLayout &TD, Type *Ty, if (Offset > 0 || Size < ElementSize) { if ((Offset + Size) > ElementSize) return 0; - return getTypePartition(TD, ElementTy, Offset, Size); + return getTypePartition(DL, ElementTy, Offset, Size); } assert(Offset == 0); if (Size == ElementSize) - return ElementTy; + return stripAggregateTypeWrapping(DL, ElementTy); StructType::element_iterator EI = STy->element_begin() + Index, EE = STy->element_end(); @@ -3151,13 +3031,9 @@ static Type *getTypePartition(const DataLayout &TD, Type *Ty, } // Try to build up a sub-structure. - SmallVector ElementTys; - do { - ElementTys.push_back(*EI++); - } while (EI != EE); - StructType *SubTy = StructType::get(STy->getContext(), ElementTys, + StructType *SubTy = StructType::get(STy->getContext(), makeArrayRef(EI, EE), STy->isPacked()); - const StructLayout *SubSL = TD.getStructLayout(SubTy); + const StructLayout *SubSL = DL.getStructLayout(SubTy); if (Size != SubSL->getSizeInBytes()) return 0; // The sub-struct doesn't have quite the size needed. @@ -3174,113 +3050,280 @@ static Type *getTypePartition(const DataLayout &TD, Type *Ty, /// appropriate new offsets. It also evaluates how successful the rewrite was /// at enabling promotion and if it was successful queues the alloca to be /// promoted. -bool SROA::rewriteAllocaPartition(AllocaInst &AI, - AllocaPartitioning &P, - AllocaPartitioning::iterator PI) { - uint64_t AllocaSize = PI->EndOffset - PI->BeginOffset; - bool IsLive = false; - for (AllocaPartitioning::use_iterator UI = P.use_begin(PI), - UE = P.use_end(PI); - UI != UE && !IsLive; ++UI) - if (UI->U) - IsLive = true; - if (!IsLive) - return false; // No live uses left of this partition. - - DEBUG(dbgs() << "Speculating PHIs and selects in partition " - << "[" << PI->BeginOffset << "," << PI->EndOffset << ")\n"); - - PHIOrSelectSpeculator Speculator(*TD, P, *this); - DEBUG(dbgs() << " speculating "); - DEBUG(P.print(dbgs(), PI, "")); - Speculator.visitUsers(PI); +bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &S, + AllocaSlices::iterator B, AllocaSlices::iterator E, + int64_t BeginOffset, int64_t EndOffset, + ArrayRef SplitUses) { + assert(BeginOffset < EndOffset); + uint64_t SliceSize = EndOffset - BeginOffset; // Try to compute a friendly type for this partition of the alloca. This // won't always succeed, in which case we fall back to a legal integer type // or an i8 array of an appropriate size. - Type *AllocaTy = 0; - if (Type *PartitionTy = P.getCommonType(PI)) - if (TD->getTypeAllocSize(PartitionTy) >= AllocaSize) - AllocaTy = PartitionTy; - if (!AllocaTy) - if (Type *PartitionTy = getTypePartition(*TD, AI.getAllocatedType(), - PI->BeginOffset, AllocaSize)) - AllocaTy = PartitionTy; - if ((!AllocaTy || - (AllocaTy->isArrayTy() && - AllocaTy->getArrayElementType()->isIntegerTy())) && - TD->isLegalInteger(AllocaSize * 8)) - AllocaTy = Type::getIntNTy(*C, AllocaSize * 8); - if (!AllocaTy) - AllocaTy = ArrayType::get(Type::getInt8Ty(*C), AllocaSize); - assert(TD->getTypeAllocSize(AllocaTy) >= AllocaSize); + Type *SliceTy = 0; + if (Type *CommonUseTy = findCommonType(B, E, EndOffset)) + if (DL->getTypeAllocSize(CommonUseTy) >= SliceSize) + SliceTy = CommonUseTy; + if (!SliceTy) + if (Type *TypePartitionTy = getTypePartition(*DL, AI.getAllocatedType(), + BeginOffset, SliceSize)) + SliceTy = TypePartitionTy; + if ((!SliceTy || (SliceTy->isArrayTy() && + SliceTy->getArrayElementType()->isIntegerTy())) && + DL->isLegalInteger(SliceSize * 8)) + SliceTy = Type::getIntNTy(*C, SliceSize * 8); + if (!SliceTy) + SliceTy = ArrayType::get(Type::getInt8Ty(*C), SliceSize); + assert(DL->getTypeAllocSize(SliceTy) >= SliceSize); + + bool IsVectorPromotable = isVectorPromotionViable( + *DL, SliceTy, S, BeginOffset, EndOffset, B, E, SplitUses); + + bool IsIntegerPromotable = + !IsVectorPromotable && + isIntegerWideningViable(*DL, SliceTy, BeginOffset, S, B, E, SplitUses); // Check for the case where we're going to rewrite to a new alloca of the // exact same type as the original, and with the same access offsets. In that // case, re-use the existing alloca, but still run through the rewriter to - // performe phi and select speculation. + // perform phi and select speculation. AllocaInst *NewAI; - if (AllocaTy == AI.getAllocatedType()) { - assert(PI->BeginOffset == 0 && + if (SliceTy == AI.getAllocatedType()) { + assert(BeginOffset == 0 && "Non-zero begin offset but same alloca type"); - assert(PI == P.begin() && "Begin offset is zero on later partition"); NewAI = &AI; + // FIXME: We should be able to bail at this point with "nothing changed". + // FIXME: We might want to defer PHI speculation until after here. } else { unsigned Alignment = AI.getAlignment(); if (!Alignment) { // The minimum alignment which users can rely on when the explicit // alignment is omitted or zero is that required by the ABI for this // type. - Alignment = TD->getABITypeAlignment(AI.getAllocatedType()); + Alignment = DL->getABITypeAlignment(AI.getAllocatedType()); } - Alignment = MinAlign(Alignment, PI->BeginOffset); + Alignment = MinAlign(Alignment, BeginOffset); // If we will get at least this much alignment from the type alone, leave // the alloca's alignment unconstrained. - if (Alignment <= TD->getABITypeAlignment(AllocaTy)) + if (Alignment <= DL->getABITypeAlignment(SliceTy)) Alignment = 0; - NewAI = new AllocaInst(AllocaTy, 0, Alignment, - AI.getName() + ".sroa." + Twine(PI - P.begin()), - &AI); + NewAI = new AllocaInst(SliceTy, 0, Alignment, + AI.getName() + ".sroa." + Twine(B - S.begin()), &AI); ++NumNewAllocas; } DEBUG(dbgs() << "Rewriting alloca partition " - << "[" << PI->BeginOffset << "," << PI->EndOffset << ") to: " - << *NewAI << "\n"); + << "[" << BeginOffset << "," << EndOffset << ") to: " << *NewAI + << "\n"); - // Track the high watermark of the post-promotion worklist. We will reset it - // to this point if the alloca is not in fact scheduled for promotion. + // Track the high watermark on several worklists that are only relevant for + // promoted allocas. We will reset it to this point if the alloca is not in + // fact scheduled for promotion. unsigned PPWOldSize = PostPromotionWorklist.size(); - - AllocaPartitionRewriter Rewriter(*TD, P, PI, *this, AI, *NewAI, - PI->BeginOffset, PI->EndOffset); - DEBUG(dbgs() << " rewriting "); - DEBUG(P.print(dbgs(), PI, "")); - bool Promotable = Rewriter.visitUsers(P.use_begin(PI), P.use_end(PI)); - if (Promotable) { + unsigned SPOldSize = SpeculatablePHIs.size(); + unsigned SSOldSize = SpeculatableSelects.size(); + unsigned NumUses = 0; + + AllocaSliceRewriter Rewriter(*DL, S, *this, AI, *NewAI, BeginOffset, + EndOffset, IsVectorPromotable, + IsIntegerPromotable); + bool Promotable = true; + for (ArrayRef::const_iterator SUI = SplitUses.begin(), + SUE = SplitUses.end(); + SUI != SUE; ++SUI) { + DEBUG(dbgs() << " rewriting split "); + DEBUG(S.printSlice(dbgs(), *SUI, "")); + Promotable &= Rewriter.visit(*SUI); + ++NumUses; + } + for (AllocaSlices::iterator I = B; I != E; ++I) { + DEBUG(dbgs() << " rewriting "); + DEBUG(S.printSlice(dbgs(), I, "")); + Promotable &= Rewriter.visit(I); + ++NumUses; + } + + NumAllocaPartitionUses += NumUses; + MaxUsesPerAllocaPartition = + std::max(NumUses, MaxUsesPerAllocaPartition); + + if (Promotable && !Rewriter.isUsedByRewrittenSpeculatableInstructions()) { DEBUG(dbgs() << " and queuing for promotion\n"); PromotableAllocas.push_back(NewAI); - } else if (NewAI != &AI) { + } else if (NewAI != &AI || + (Promotable && + Rewriter.isUsedByRewrittenSpeculatableInstructions())) { // If we can't promote the alloca, iterate on it to check for new // refinements exposed by splitting the current alloca. Don't iterate on an // alloca which didn't actually change and didn't get promoted. + // + // Alternatively, if we could promote the alloca but have speculatable + // instructions then we will speculate them after finishing our processing + // of the original alloca. Mark the new one for re-visiting in the next + // iteration so the speculated operations can be rewritten. + // + // FIXME: We should actually track whether the rewriter changed anything. Worklist.insert(NewAI); } // Drop any post-promotion work items if promotion didn't happen. - if (!Promotable) + if (!Promotable) { while (PostPromotionWorklist.size() > PPWOldSize) PostPromotionWorklist.pop_back(); + while (SpeculatablePHIs.size() > SPOldSize) + SpeculatablePHIs.pop_back(); + while (SpeculatableSelects.size() > SSOldSize) + SpeculatableSelects.pop_back(); + } return true; } -/// \brief Walks the partitioning of an alloca rewriting uses of each partition. -bool SROA::splitAlloca(AllocaInst &AI, AllocaPartitioning &P) { +namespace { +struct IsSliceEndLessOrEqualTo { + uint64_t UpperBound; + + IsSliceEndLessOrEqualTo(uint64_t UpperBound) : UpperBound(UpperBound) {} + + bool operator()(const AllocaSlices::iterator &I) { + return I->endOffset() <= UpperBound; + } +}; +} + +static void +removeFinishedSplitUses(SmallVectorImpl &SplitUses, + uint64_t &MaxSplitUseEndOffset, uint64_t Offset) { + if (Offset >= MaxSplitUseEndOffset) { + SplitUses.clear(); + MaxSplitUseEndOffset = 0; + return; + } + + size_t SplitUsesOldSize = SplitUses.size(); + SplitUses.erase(std::remove_if(SplitUses.begin(), SplitUses.end(), + IsSliceEndLessOrEqualTo(Offset)), + SplitUses.end()); + if (SplitUsesOldSize == SplitUses.size()) + return; + + // Recompute the max. While this is linear, so is remove_if. + MaxSplitUseEndOffset = 0; + for (SmallVectorImpl::iterator + SUI = SplitUses.begin(), + SUE = SplitUses.end(); + SUI != SUE; ++SUI) + MaxSplitUseEndOffset = std::max((*SUI)->endOffset(), MaxSplitUseEndOffset); +} + +/// \brief Walks the slices of an alloca and form partitions based on them, +/// rewriting each of their uses. +bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &S) { + if (S.begin() == S.end()) + return false; + + unsigned NumPartitions = 0; bool Changed = false; - for (AllocaPartitioning::iterator PI = P.begin(), PE = P.end(); PI != PE; - ++PI) - Changed |= rewriteAllocaPartition(AI, P, PI); + SmallVector SplitUses; + uint64_t MaxSplitUseEndOffset = 0; + + uint64_t BeginOffset = S.begin()->beginOffset(); + + for (AllocaSlices::iterator SI = S.begin(), SJ = llvm::next(SI), SE = S.end(); + SI != SE; SI = SJ) { + uint64_t MaxEndOffset = SI->endOffset(); + + if (!SI->isSplittable()) { + // When we're forming an unsplittable region, it must always start at the + // first slice and will extend through its end. + assert(BeginOffset == SI->beginOffset()); + + // Form a partition including all of the overlapping slices with this + // unsplittable slice. + while (SJ != SE && SJ->beginOffset() < MaxEndOffset) { + if (!SJ->isSplittable()) + MaxEndOffset = std::max(MaxEndOffset, SJ->endOffset()); + ++SJ; + } + } else { + assert(SI->isSplittable()); // Established above. + + // Collect all of the overlapping splittable slices. + while (SJ != SE && SJ->beginOffset() < MaxEndOffset && + SJ->isSplittable()) { + MaxEndOffset = std::max(MaxEndOffset, SJ->endOffset()); + ++SJ; + } + + // Back up MaxEndOffset and SJ if we ended the span early when + // encountering an unsplittable slice. + if (SJ != SE && SJ->beginOffset() < MaxEndOffset) { + assert(!SJ->isSplittable()); + MaxEndOffset = SJ->beginOffset(); + } + } + + // Check if we have managed to move the end offset forward yet. If so, + // we'll have to rewrite uses and erase old split uses. + if (BeginOffset < MaxEndOffset) { + // Rewrite a sequence of overlapping slices. + Changed |= + rewritePartition(AI, S, SI, SJ, BeginOffset, MaxEndOffset, SplitUses); + ++NumPartitions; + + removeFinishedSplitUses(SplitUses, MaxSplitUseEndOffset, MaxEndOffset); + } + + // Accumulate all the splittable slices from the [SI,SJ) region which + // overlap going forward. + for (AllocaSlices::iterator SK = SI; SK != SJ; ++SK) + if (SK->isSplittable() && SK->endOffset() > MaxEndOffset) { + SplitUses.push_back(SK); + MaxSplitUseEndOffset = std::max(SK->endOffset(), MaxSplitUseEndOffset); + } + + // If we're already at the end and we have no split uses, we're done. + if (SJ == SE && SplitUses.empty()) + break; + + // If we have no split uses or no gap in offsets, we're ready to move to + // the next slice. + if (SplitUses.empty() || (SJ != SE && MaxEndOffset == SJ->beginOffset())) { + BeginOffset = SJ->beginOffset(); + continue; + } + + // Even if we have split slices, if the next slice is splittable and the + // split slices reach it, we can simply set up the beginning offset of the + // next iteration to bridge between them. + if (SJ != SE && SJ->isSplittable() && + MaxSplitUseEndOffset > SJ->beginOffset()) { + BeginOffset = MaxEndOffset; + continue; + } + + // Otherwise, we have a tail of split slices. Rewrite them with an empty + // range of slices. + uint64_t PostSplitEndOffset = + SJ == SE ? MaxSplitUseEndOffset : SJ->beginOffset(); + + Changed |= rewritePartition(AI, S, SJ, SJ, MaxEndOffset, PostSplitEndOffset, + SplitUses); + ++NumPartitions; + + if (SJ == SE) + break; // Skip the rest, we don't need to do any cleanup. + + removeFinishedSplitUses(SplitUses, MaxSplitUseEndOffset, + PostSplitEndOffset); + + // Now just reset the begin offset for the next iteration. + BeginOffset = SJ->beginOffset(); + } + + NumAllocaPartitions += NumPartitions; + MaxPartitionsPerAlloca = + std::max(NumPartitions, MaxPartitionsPerAlloca); return Changed; } @@ -3288,7 +3331,7 @@ bool SROA::splitAlloca(AllocaInst &AI, AllocaPartitioning &P) { /// \brief Analyze an alloca for SROA. /// /// This analyzes the alloca to ensure we can reason about it, builds -/// a partitioning of the alloca, and then hands it off to be split and +/// the slices of the alloca, and then hands it off to be split and /// rewritten as needed. bool SROA::runOnAlloca(AllocaInst &AI) { DEBUG(dbgs() << "SROA alloca: " << AI << "\n"); @@ -3302,32 +3345,32 @@ bool SROA::runOnAlloca(AllocaInst &AI) { // Skip alloca forms that this analysis can't handle. if (AI.isArrayAllocation() || !AI.getAllocatedType()->isSized() || - TD->getTypeAllocSize(AI.getAllocatedType()) == 0) + DL->getTypeAllocSize(AI.getAllocatedType()) == 0) return false; bool Changed = false; // First, split any FCA loads and stores touching this alloca to promote // better splitting and promotion opportunities. - AggLoadStoreRewriter AggRewriter(*TD); + AggLoadStoreRewriter AggRewriter(*DL); Changed |= AggRewriter.rewrite(AI); - // Build the partition set using a recursive instruction-visiting builder. - AllocaPartitioning P(*TD, AI); - DEBUG(P.print(dbgs())); - if (P.isEscaped()) + // Build the slices using a recursive instruction-visiting builder. + AllocaSlices S(*DL, AI); + DEBUG(S.print(dbgs())); + if (S.isEscaped()) return Changed; // Delete all the dead users of this alloca before splitting and rewriting it. - for (AllocaPartitioning::dead_user_iterator DI = P.dead_user_begin(), - DE = P.dead_user_end(); + for (AllocaSlices::dead_user_iterator DI = S.dead_user_begin(), + DE = S.dead_user_end(); DI != DE; ++DI) { Changed = true; (*DI)->replaceAllUsesWith(UndefValue::get((*DI)->getType())); - DeadInsts.push_back(*DI); + DeadInsts.insert(*DI); } - for (AllocaPartitioning::dead_op_iterator DO = P.dead_op_begin(), - DE = P.dead_op_end(); + for (AllocaSlices::dead_op_iterator DO = S.dead_op_begin(), + DE = S.dead_op_end(); DO != DE; ++DO) { Value *OldV = **DO; // Clobber the use with an undef value. @@ -3335,15 +3378,25 @@ bool SROA::runOnAlloca(AllocaInst &AI) { if (Instruction *OldI = dyn_cast(OldV)) if (isInstructionTriviallyDead(OldI)) { Changed = true; - DeadInsts.push_back(OldI); + DeadInsts.insert(OldI); } } - // No partitions to split. Leave the dead alloca for a later pass to clean up. - if (P.begin() == P.end()) + // No slices to split. Leave the dead alloca for a later pass to clean up. + if (S.begin() == S.end()) return Changed; - return splitAlloca(AI, P) || Changed; + Changed |= splitAlloca(AI, S); + + DEBUG(dbgs() << " Speculating PHIs\n"); + while (!SpeculatablePHIs.empty()) + speculatePHINodeLoads(*SpeculatablePHIs.pop_back_val()); + + DEBUG(dbgs() << " Speculating Selects\n"); + while (!SpeculatableSelects.empty()) + speculateSelectInstLoads(*SpeculatableSelects.pop_back_val()); + + return Changed; } /// \brief Delete the dead instructions accumulated in this run. @@ -3356,17 +3409,18 @@ bool SROA::runOnAlloca(AllocaInst &AI) { /// We also record the alloca instructions deleted here so that they aren't /// subsequently handed to mem2reg to promote. void SROA::deleteDeadInstructions(SmallPtrSet &DeletedAllocas) { - DeadSplitInsts.clear(); while (!DeadInsts.empty()) { Instruction *I = DeadInsts.pop_back_val(); DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n"); + I->replaceAllUsesWith(UndefValue::get(I->getType())); + for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) if (Instruction *U = dyn_cast(*OI)) { // Zero out the operand and see if it becomes trivially dead. *OI = 0; if (isInstructionTriviallyDead(U)) - DeadInsts.push_back(U); + DeadInsts.insert(U); } if (AllocaInst *AI = dyn_cast(I)) @@ -3377,6 +3431,15 @@ void SROA::deleteDeadInstructions(SmallPtrSet &DeletedAllocas) { } } +static void enqueueUsersInWorklist(Instruction &I, + SmallVectorImpl &Worklist, + SmallPtrSet &Visited) { + for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); UI != UE; + ++UI) + if (Visited.insert(cast(*UI))) + Worklist.push_back(cast(*UI)); +} + /// \brief Promote the allocas, using the best available technique. /// /// This attempts to promote whatever allocas have been identified as viable in @@ -3384,7 +3447,7 @@ void SROA::deleteDeadInstructions(SmallPtrSet &DeletedAllocas) { /// If there is a domtree available, we attempt to promote using the full power /// of mem2reg. Otherwise, we build and use the AllocaPromoter above which is /// based on the SSAUpdater utilities. This function returns whether any -/// promotion occured. +/// promotion occurred. bool SROA::promoteAllocas(Function &F) { if (PromotableAllocas.empty()) return false; @@ -3401,25 +3464,28 @@ bool SROA::promoteAllocas(Function &F) { DEBUG(dbgs() << "Promoting allocas with SSAUpdater...\n"); SSAUpdater SSA; DIBuilder DIB(*F.getParent()); - SmallVector Insts; + SmallVector Insts; + + // We need a worklist to walk the uses of each alloca. + SmallVector Worklist; + SmallPtrSet Visited; + SmallVector DeadInsts; for (unsigned Idx = 0, Size = PromotableAllocas.size(); Idx != Size; ++Idx) { AllocaInst *AI = PromotableAllocas[Idx]; - for (Value::use_iterator UI = AI->use_begin(), UE = AI->use_end(); - UI != UE;) { - Instruction *I = cast(*UI++); + Insts.clear(); + Worklist.clear(); + Visited.clear(); + + enqueueUsersInWorklist(*AI, Worklist, Visited); + + while (!Worklist.empty()) { + Instruction *I = Worklist.pop_back_val(); + // FIXME: Currently the SSAUpdater infrastructure doesn't reason about // lifetime intrinsics and so we strip them (and the bitcasts+GEPs // leading to them) here. Eventually it should use them to optimize the // scalar values produced. - if (isa(I) || isa(I)) { - assert(onlyUsedByLifetimeMarkers(I) && - "Found a bitcast used outside of a lifetime marker."); - while (!I->use_empty()) - cast(*I->use_begin())->eraseFromParent(); - I->eraseFromParent(); - continue; - } if (IntrinsicInst *II = dyn_cast(I)) { assert(II->getIntrinsicID() == Intrinsic::lifetime_start || II->getIntrinsicID() == Intrinsic::lifetime_end); @@ -3427,10 +3493,30 @@ bool SROA::promoteAllocas(Function &F) { continue; } - Insts.push_back(I); + // Push the loads and stores we find onto the list. SROA will already + // have validated that all loads and stores are viable candidates for + // promotion. + if (LoadInst *LI = dyn_cast(I)) { + assert(LI->getType() == AI->getAllocatedType()); + Insts.push_back(LI); + continue; + } + if (StoreInst *SI = dyn_cast(I)) { + assert(SI->getValueOperand()->getType() == AI->getAllocatedType()); + Insts.push_back(SI); + continue; + } + + // For everything else, we know that only no-op bitcasts and GEPs will + // make it this far, just recurse through them and recall them for later + // removal. + DeadInsts.push_back(I); + enqueueUsersInWorklist(*I, Worklist, Visited); } AllocaPromoter(Insts, SSA, *AI, DIB).run(Insts); - Insts.clear(); + while (!DeadInsts.empty()) + DeadInsts.pop_back_val()->eraseFromParent(); + AI->eraseFromParent(); } PromotableAllocas.clear(); @@ -3454,8 +3540,8 @@ namespace { bool SROA::runOnFunction(Function &F) { DEBUG(dbgs() << "SROA function: " << F.getName() << "\n"); C = &F.getContext(); - TD = getAnalysisIfAvailable(); - if (!TD) { + DL = getAnalysisIfAvailable(); + if (!DL) { DEBUG(dbgs() << " Skipping SROA -- no target data!\n"); return false; }