X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;ds=sidebyside;f=lib%2FTransforms%2FScalar%2FSROA.cpp;h=45b66674bc95fa44e79e852f143f25a98d4f710e;hb=dad20b2ae2544708d6a33abdb9bddd0a329f50e0;hp=9ec39d2528147aaf6c31d3f55f329a6c1378ad25;hpb=b7f27824fb63210a67040affd43a82890b6784fb;p=oota-llvm.git diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp index 9ec39d25281..45b66674bc9 100644 --- a/lib/Transforms/Scalar/SROA.cpp +++ b/lib/Transforms/Scalar/SROA.cpp @@ -23,40 +23,49 @@ /// //===----------------------------------------------------------------------===// -#define DEBUG_TYPE "sroa" #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" -#include "llvm/Analysis/Dominators.h" +#include "llvm/Analysis/AssumptionTracker.h" #include "llvm/Analysis/Loads.h" #include "llvm/Analysis/PtrUseVisitor.h" #include "llvm/Analysis/ValueTracking.h" -#include "llvm/DIBuilder.h" -#include "llvm/DebugInfo.h" #include "llvm/IR/Constants.h" +#include "llvm/IR/DIBuilder.h" #include "llvm/IR/DataLayout.h" +#include "llvm/IR/DebugInfo.h" #include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" +#include "llvm/IR/InstVisitor.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Operator.h" -#include "llvm/InstVisitor.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" +#include "llvm/Support/TimeValue.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/PromoteMemToReg.h" #include "llvm/Transforms/Utils/SSAUpdater.h" + +#if __cplusplus >= 201103L && !defined(NDEBUG) +// We only use this for a debug check in C++11 +#include +#endif + using namespace llvm; +#define DEBUG_TYPE "sroa" + STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement"); STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed"); STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions per alloca"); @@ -73,6 +82,16 @@ STATISTIC(NumVectorized, "Number of vectorized aggregates"); static cl::opt ForceSSAUpdater("force-ssa-updater", cl::init(false), cl::Hidden); +/// Hidden option to enable randomly shuffling the slices to help uncover +/// instability in their order. +static cl::opt SROARandomShuffleSlices("sroa-random-shuffle-slices", + cl::init(false), cl::Hidden); + +/// Hidden option to experiment with completely strict handling of inbounds +/// GEPs. +static cl::opt SROAStrictInbounds("sroa-strict-inbounds", + cl::init(false), cl::Hidden); + namespace { /// \brief A custom IRBuilder inserter which prefixes all names if they are /// preserved. @@ -142,8 +161,8 @@ public: Use *getUse() const { return UseAndIsSplittable.getPointer(); } - bool isDead() const { return getUse() == 0; } - void kill() { UseAndIsSplittable.setPointer(0); } + bool isDead() const { return getUse() == nullptr; } + void kill() { UseAndIsSplittable.setPointer(nullptr); } /// \brief Support for ordering ranges. /// @@ -206,36 +225,26 @@ public: /// \brief Support for iterating over the slices. /// @{ typedef SmallVectorImpl::iterator iterator; + typedef iterator_range range; iterator begin() { return Slices.begin(); } iterator end() { return Slices.end(); } typedef SmallVectorImpl::const_iterator const_iterator; + typedef iterator_range const_range; const_iterator begin() const { return Slices.begin(); } const_iterator end() const { return Slices.end(); } /// @} - /// \brief Allow iterating the dead users for this alloca. - /// - /// These are instructions which will never actually use the alloca as they - /// are outside the allocated range. They are safe to replace with undef and - /// delete. - /// @{ - typedef SmallVectorImpl::const_iterator dead_user_iterator; - dead_user_iterator dead_user_begin() const { return DeadUsers.begin(); } - dead_user_iterator dead_user_end() const { return DeadUsers.end(); } - /// @} + /// \brief Access the dead users for this alloca. + ArrayRef getDeadUsers() const { return DeadUsers; } - /// \brief Allow iterating the dead expressions referring to this alloca. + /// \brief Access the dead operands referring to this alloca. /// /// These are operands which have cannot actually be used to refer to the /// alloca as they are outside its range and the user doesn't correct for /// that. These mostly consist of PHI node inputs and the like which we just /// need to replace with undef. - /// @{ - typedef SmallVectorImpl::const_iterator dead_op_iterator; - dead_op_iterator dead_op_begin() const { return DeadOperands.begin(); } - dead_op_iterator dead_op_end() const { return DeadOperands.end(); } - /// @} + ArrayRef getDeadOperands() const { return DeadOperands; } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void print(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const; @@ -244,8 +253,8 @@ public: void printUse(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const; void print(raw_ostream &OS) const; - void LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED dump(const_iterator I) const; - void LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED dump() const; + void dump(const_iterator I) const; + void dump() const; #endif private: @@ -303,7 +312,16 @@ static Value *foldSelectInst(SelectInst &SI) { if (SI.getOperand(1) == SI.getOperand(2)) return SI.getOperand(1); - return 0; + return nullptr; +} + +/// \brief A helper that folds a PHI node or a select. +static Value *foldPHINodeOrSelectInst(Instruction &I) { + if (PHINode *PN = dyn_cast(&I)) { + // If PN merges together the same value, return that value. + return PN->hasConstantValue(); + } + return foldSelectInst(cast(I)); } /// \brief Builder for the alloca slices. @@ -316,7 +334,7 @@ class AllocaSlices::SliceBuilder : public PtrUseVisitor { typedef PtrUseVisitor Base; const uint64_t AllocSize; - AllocaSlices &S; + AllocaSlices &AS; SmallDenseMap MemTransferSliceMap; SmallDenseMap PHIOrSelectSizes; @@ -325,25 +343,25 @@ class AllocaSlices::SliceBuilder : public PtrUseVisitor { SmallPtrSet VisitedDeadInsts; public: - SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &S) + SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &AS) : PtrUseVisitor(DL), - AllocSize(DL.getTypeAllocSize(AI.getAllocatedType())), S(S) {} + AllocSize(DL.getTypeAllocSize(AI.getAllocatedType())), AS(AS) {} private: void markAsDead(Instruction &I) { - if (VisitedDeadInsts.insert(&I)) - S.DeadUsers.push_back(&I); + if (VisitedDeadInsts.insert(&I).second) + AS.DeadUsers.push_back(&I); } void insertUse(Instruction &I, const APInt &Offset, uint64_t Size, bool IsSplittable = false) { // Completely skip uses which have a zero size or start either before or // past the end of the allocation. - if (Size == 0 || Offset.isNegative() || Offset.uge(AllocSize)) { + if (Size == 0 || Offset.uge(AllocSize)) { DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset << " which has zero size or starts outside of the " << AllocSize << " byte alloca:\n" - << " alloca: " << S.AI << "\n" + << " alloca: " << AS.AI << "\n" << " use: " << I << "\n"); return markAsDead(I); } @@ -361,12 +379,12 @@ private: if (Size > AllocSize - BeginOffset) { DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset << " to remain within the " << AllocSize << " byte alloca:\n" - << " alloca: " << S.AI << "\n" + << " alloca: " << AS.AI << "\n" << " use: " << I << "\n"); EndOffset = AllocSize; } - S.Slices.push_back(Slice(BeginOffset, EndOffset, U, IsSplittable)); + AS.Slices.push_back(Slice(BeginOffset, EndOffset, U, IsSplittable)); } void visitBitCastInst(BitCastInst &BC) { @@ -380,6 +398,43 @@ private: if (GEPI.use_empty()) return markAsDead(GEPI); + if (SROAStrictInbounds && GEPI.isInBounds()) { + // FIXME: This is a manually un-factored variant of the basic code inside + // of GEPs with checking of the inbounds invariant specified in the + // langref in a very strict sense. If we ever want to enable + // SROAStrictInbounds, this code should be factored cleanly into + // PtrUseVisitor, but it is easier to experiment with SROAStrictInbounds + // by writing out the code here where we have tho underlying allocation + // size readily available. + APInt GEPOffset = Offset; + for (gep_type_iterator GTI = gep_type_begin(GEPI), + GTE = gep_type_end(GEPI); + GTI != GTE; ++GTI) { + ConstantInt *OpC = dyn_cast(GTI.getOperand()); + if (!OpC) + break; + + // Handle a struct index, which adds its field offset to the pointer. + if (StructType *STy = dyn_cast(*GTI)) { + unsigned ElementIdx = OpC->getZExtValue(); + const StructLayout *SL = DL.getStructLayout(STy); + GEPOffset += + APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx)); + } else { + // For array or vector indices, scale the index by the size of the type. + APInt Index = OpC->getValue().sextOrTrunc(Offset.getBitWidth()); + GEPOffset += Index * APInt(Offset.getBitWidth(), + DL.getTypeAllocSize(GTI.getIndexedType())); + } + + // If this index has computed an intermediate pointer which is not + // inbounds, then the result of the GEP is a poison value and we can + // delete it and all uses. + if (GEPOffset.ugt(AllocSize)) + return markAsDead(GEPI); + } + } + return Base::visitGetElementPtrInst(GEPI); } @@ -426,12 +481,11 @@ private: // risk of overflow. // FIXME: We should instead consider the pointer to have escaped if this // function is being instrumented for addressing bugs or race conditions. - if (Offset.isNegative() || Size > AllocSize || - Offset.ugt(AllocSize - Size)) { + if (Size > AllocSize || Offset.ugt(AllocSize - Size)) { DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @" << Offset << " which extends past the end of the " << AllocSize << " byte alloca:\n" - << " alloca: " << S.AI << "\n" + << " alloca: " << AS.AI << "\n" << " use: " << SI << "\n"); return markAsDead(SI); } @@ -446,7 +500,7 @@ private: assert(II.getRawDest() == *U && "Pointer use is not the destination?"); ConstantInt *Length = dyn_cast(II.getLength()); if ((Length && Length->getValue() == 0) || - (IsOffsetKnown && !Offset.isNegative() && Offset.uge(AllocSize))) + (IsOffsetKnown && Offset.uge(AllocSize))) // Zero-length mem transfer intrinsics can be ignored entirely. return markAsDead(II); @@ -461,14 +515,30 @@ private: void visitMemTransferInst(MemTransferInst &II) { ConstantInt *Length = dyn_cast(II.getLength()); - if ((Length && Length->getValue() == 0) || - (IsOffsetKnown && !Offset.isNegative() && Offset.uge(AllocSize))) + if (Length && Length->getValue() == 0) // Zero-length mem transfer intrinsics can be ignored entirely. return markAsDead(II); + // Because we can visit these intrinsics twice, also check to see if the + // first time marked this instruction as dead. If so, skip it. + if (VisitedDeadInsts.count(&II)) + return; + if (!IsOffsetKnown) return PI.setAborted(&II); + // This side of the transfer is completely out-of-bounds, and so we can + // nuke the entire transfer. However, we also need to nuke the other side + // if already added to our partitions. + // FIXME: Yet another place we really should bypass this when + // instrumenting for ASan. + if (Offset.uge(AllocSize)) { + SmallDenseMap::iterator MTPI = MemTransferSliceMap.find(&II); + if (MTPI != MemTransferSliceMap.end()) + AS.Slices[MTPI->second].kill(); + return markAsDead(II); + } + uint64_t RawOffset = Offset.getLimitedValue(); uint64_t Size = Length ? Length->getLimitedValue() : AllocSize - RawOffset; @@ -487,11 +557,11 @@ private: // they both point to the same alloca. bool Inserted; SmallDenseMap::iterator MTPI; - llvm::tie(MTPI, Inserted) = - MemTransferSliceMap.insert(std::make_pair(&II, S.Slices.size())); + std::tie(MTPI, Inserted) = + MemTransferSliceMap.insert(std::make_pair(&II, AS.Slices.size())); unsigned PrevIdx = MTPI->second; if (!Inserted) { - Slice &PrevP = S.Slices[PrevIdx]; + Slice &PrevP = AS.Slices[PrevIdx]; // Check if the begin offsets match and this is a non-volatile transfer. // In that case, we can completely elide the transfer. @@ -509,7 +579,7 @@ private: insertUse(II, Offset, Size, /*IsSplittable=*/Inserted && Length); // Check that we ended up with a valid index in the map. - assert(S.Slices[PrevIdx].getUse()->getUser() == &II && + assert(AS.Slices[PrevIdx].getUse()->getUser() == &II && "Map index doesn't point back to a slice with this user."); } @@ -546,7 +616,7 @@ private: Size = 0; do { Instruction *I, *UsedI; - llvm::tie(UsedI, I) = Uses.pop_back_val(); + std::tie(UsedI, I) = Uses.pop_back_val(); if (LoadInst *LI = dyn_cast(I)) { Size = std::max(Size, DL.getTypeStoreSize(LI->getType())); @@ -568,67 +638,48 @@ private: return I; } - for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); UI != UE; - ++UI) - if (Visited.insert(cast(*UI))) - Uses.push_back(std::make_pair(I, cast(*UI))); + for (User *U : I->users()) + if (Visited.insert(cast(U)).second) + Uses.push_back(std::make_pair(I, cast(U))); } while (!Uses.empty()); - return 0; + return nullptr; } - void visitPHINode(PHINode &PN) { - if (PN.use_empty()) - return markAsDead(PN); - if (!IsOffsetKnown) - return PI.setAborted(&PN); - - // See if we already have computed info on this node. - uint64_t &PHISize = PHIOrSelectSizes[&PN]; - if (!PHISize) { - // This is a new PHI node, check for an unsafe use of the PHI node. - if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&PN, PHISize)) - return PI.setAborted(UnsafeI); - } - - // For PHI and select operands outside the alloca, we can't nuke the entire - // phi or select -- the other side might still be relevant, so we special - // case them here and use a separate structure to track the operands - // themselves which should be replaced with undef. - // FIXME: This should instead be escaped in the event we're instrumenting - // for address sanitization. - if ((Offset.isNegative() && (-Offset).uge(PHISize)) || - (!Offset.isNegative() && Offset.uge(AllocSize))) { - S.DeadOperands.push_back(U); - return; - } - - insertUse(PN, Offset, PHISize); - } + void visitPHINodeOrSelectInst(Instruction &I) { + assert(isa(I) || isa(I)); + if (I.use_empty()) + return markAsDead(I); - void visitSelectInst(SelectInst &SI) { - if (SI.use_empty()) - return markAsDead(SI); - if (Value *Result = foldSelectInst(SI)) { + // TODO: We could use SimplifyInstruction here to fold PHINodes and + // SelectInsts. However, doing so requires to change the current + // dead-operand-tracking mechanism. For instance, suppose neither loading + // from %U nor %other traps. Then "load (select undef, %U, %other)" does not + // trap either. However, if we simply replace %U with undef using the + // current dead-operand-tracking mechanism, "load (select undef, undef, + // %other)" may trap because the select may return the first operand + // "undef". + if (Value *Result = foldPHINodeOrSelectInst(I)) { if (Result == *U) // If the result of the constant fold will be the pointer, recurse - // through the select as if we had RAUW'ed it. - enqueueUsers(SI); + // through the PHI/select as if we had RAUW'ed it. + enqueueUsers(I); else - // Otherwise the operand to the select is dead, and we can replace it - // with undef. - S.DeadOperands.push_back(U); + // Otherwise the operand to the PHI/select is dead, and we can replace + // it with undef. + AS.DeadOperands.push_back(U); return; } + if (!IsOffsetKnown) - return PI.setAborted(&SI); + return PI.setAborted(&I); // See if we already have computed info on this node. - uint64_t &SelectSize = PHIOrSelectSizes[&SI]; - if (!SelectSize) { - // This is a new Select, check for an unsafe use of it. - if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&SI, SelectSize)) + uint64_t &Size = PHIOrSelectSizes[&I]; + if (!Size) { + // This is a new PHI/Select, check for an unsafe use of it. + if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&I, Size)) return PI.setAborted(UnsafeI); } @@ -638,13 +689,20 @@ private: // themselves which should be replaced with undef. // FIXME: This should instead be escaped in the event we're instrumenting // for address sanitization. - if ((Offset.isNegative() && Offset.uge(SelectSize)) || - (!Offset.isNegative() && Offset.uge(AllocSize))) { - S.DeadOperands.push_back(U); + if (Offset.uge(AllocSize)) { + AS.DeadOperands.push_back(U); return; } - insertUse(SI, Offset, SelectSize); + insertUse(I, Offset, Size); + } + + void visitPHINode(PHINode &PN) { + visitPHINodeOrSelectInst(PN); + } + + void visitSelectInst(SelectInst &SI) { + visitPHINodeOrSelectInst(SI); } /// \brief Disable SROA entirely if there are unhandled users of the alloca. @@ -658,7 +716,7 @@ AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI) #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) AI(AI), #endif - PointerEscapingInstr(0) { + PointerEscapingInstr(nullptr) { SliceBuilder PB(DL, AI, *this); SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI); if (PtrI.isEscaped() || PtrI.isAborted()) { @@ -674,6 +732,13 @@ AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI) std::mem_fun_ref(&Slice::isDead)), Slices.end()); +#if __cplusplus >= 201103L && !defined(NDEBUG) + if (SROARandomShuffleSlices) { + std::mt19937 MT(static_cast(sys::TimeValue::now().msec())); + std::shuffle(Slices.begin(), Slices.end(), MT); + } +#endif + // Sort the uses. This arranges for the offsets to be in ascending order, // and the sizes to be in descending order. std::sort(Slices.begin(), Slices.end()); @@ -712,8 +777,10 @@ void AllocaSlices::print(raw_ostream &OS) const { print(OS, I); } -void AllocaSlices::dump(const_iterator I) const { print(dbgs(), I); } -void AllocaSlices::dump() const { print(dbgs()); } +LLVM_DUMP_METHOD void AllocaSlices::dump(const_iterator I) const { + print(dbgs(), I); +} +LLVM_DUMP_METHOD void AllocaSlices::dump() const { print(dbgs()); } #endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) @@ -733,50 +800,69 @@ class AllocaPromoter : public LoadAndStorePromoter { SmallVector DVIs; public: - AllocaPromoter(const SmallVectorImpl &Insts, SSAUpdater &S, + AllocaPromoter(const SmallVectorImpl &Insts, SSAUpdater &S, AllocaInst &AI, DIBuilder &DIB) - : LoadAndStorePromoter(Insts, S), AI(AI), DIB(DIB) {} + : LoadAndStorePromoter(Insts, S), AI(AI), DIB(DIB) {} void run(const SmallVectorImpl &Insts) { - // Remember which alloca we're promoting (for isInstInList). - if (MDNode *DebugNode = MDNode::getIfExists(AI.getContext(), &AI)) { - for (Value::use_iterator UI = DebugNode->use_begin(), - UE = DebugNode->use_end(); - UI != UE; ++UI) - if (DbgDeclareInst *DDI = dyn_cast(*UI)) - DDIs.push_back(DDI); - else if (DbgValueInst *DVI = dyn_cast(*UI)) - DVIs.push_back(DVI); + // Retain the debug information attached to the alloca for use when + // rewriting loads and stores. + if (auto *L = LocalAsMetadata::getIfExists(&AI)) { + if (auto *DebugNode = MetadataAsValue::getIfExists(AI.getContext(), L)) { + for (User *U : DebugNode->users()) + if (DbgDeclareInst *DDI = dyn_cast(U)) + DDIs.push_back(DDI); + else if (DbgValueInst *DVI = dyn_cast(U)) + DVIs.push_back(DVI); + } } LoadAndStorePromoter::run(Insts); - AI.eraseFromParent(); + + // While we have the debug information, clear it off of the alloca. The + // caller takes care of deleting the alloca. while (!DDIs.empty()) DDIs.pop_back_val()->eraseFromParent(); while (!DVIs.empty()) DVIs.pop_back_val()->eraseFromParent(); } - virtual bool isInstInList(Instruction *I, - const SmallVectorImpl &Insts) const { + bool isInstInList(Instruction *I, + const SmallVectorImpl &Insts) const override { + Value *Ptr; if (LoadInst *LI = dyn_cast(I)) - return LI->getOperand(0) == &AI; - return cast(I)->getPointerOperand() == &AI; + Ptr = LI->getOperand(0); + else + Ptr = cast(I)->getPointerOperand(); + + // Only used to detect cycles, which will be rare and quickly found as + // we're walking up a chain of defs rather than down through uses. + SmallPtrSet Visited; + + do { + if (Ptr == &AI) + return true; + + if (BitCastInst *BCI = dyn_cast(Ptr)) + Ptr = BCI->getOperand(0); + else if (GetElementPtrInst *GEPI = dyn_cast(Ptr)) + Ptr = GEPI->getPointerOperand(); + else + return false; + + } while (Visited.insert(Ptr).second); + + return false; } - virtual void updateDebugInfo(Instruction *Inst) const { - for (SmallVectorImpl::const_iterator I = DDIs.begin(), - E = DDIs.end(); I != E; ++I) { - DbgDeclareInst *DDI = *I; + void updateDebugInfo(Instruction *Inst) const override { + for (DbgDeclareInst *DDI : DDIs) if (StoreInst *SI = dyn_cast(Inst)) ConvertDebugDeclareToDebugValue(DDI, SI, DIB); else if (LoadInst *LI = dyn_cast(Inst)) ConvertDebugDeclareToDebugValue(DDI, LI, DIB); - } - for (SmallVectorImpl::const_iterator I = DVIs.begin(), - E = DVIs.end(); I != E; ++I) { - DbgValueInst *DVI = *I; - Value *Arg = 0; + for (DbgValueInst *DVI : DVIs) { + Value *Arg = nullptr; if (StoreInst *SI = dyn_cast(Inst)) { // If an argument is zero extended then use argument directly. The ZExt // may be zapped by an optimization pass in future. @@ -792,8 +878,8 @@ public: continue; } Instruction *DbgVal = - DIB.insertDbgValueIntrinsic(Arg, 0, DIVariable(DVI->getVariable()), - Inst); + DIB.insertDbgValueIntrinsic(Arg, 0, DIVariable(DVI->getVariable()), + DIExpression(DVI->getExpression()), Inst); DbgVal->setDebugLoc(DVI->getDebugLoc()); } } @@ -826,6 +912,7 @@ class SROA : public FunctionPass { LLVMContext *C; const DataLayout *DL; DominatorTree *DT; + AssumptionTracker *AT; /// \brief Worklist of alloca instructions to simplify. /// @@ -872,26 +959,27 @@ class SROA : public FunctionPass { public: SROA(bool RequiresDomTree = true) : FunctionPass(ID), RequiresDomTree(RequiresDomTree), - C(0), DL(0), DT(0) { + C(nullptr), DL(nullptr), DT(nullptr) { initializeSROAPass(*PassRegistry::getPassRegistry()); } - bool runOnFunction(Function &F); - void getAnalysisUsage(AnalysisUsage &AU) const; + bool runOnFunction(Function &F) override; + void getAnalysisUsage(AnalysisUsage &AU) const override; - const char *getPassName() const { return "SROA"; } + const char *getPassName() const override { return "SROA"; } static char ID; private: friend class PHIOrSelectSpeculator; friend class AllocaSliceRewriter; - bool rewritePartition(AllocaInst &AI, AllocaSlices &S, + bool rewritePartition(AllocaInst &AI, AllocaSlices &AS, AllocaSlices::iterator B, AllocaSlices::iterator E, int64_t BeginOffset, int64_t EndOffset, ArrayRef SplitUses); - bool splitAlloca(AllocaInst &AI, AllocaSlices &S); + bool splitAlloca(AllocaInst &AI, AllocaSlices &AS); bool runOnAlloca(AllocaInst &AI); - void deleteDeadInstructions(SmallPtrSet &DeletedAllocas); + void clobberUse(Use &U); + void deleteDeadInstructions(SmallPtrSetImpl &DeletedAllocas); bool promoteAllocas(Function &F); }; } @@ -904,7 +992,8 @@ FunctionPass *llvm::createSROAPass(bool RequiresDomTree) { INITIALIZE_PASS_BEGIN(SROA, "sroa", "Scalar Replacement Of Aggregates", false, false) -INITIALIZE_PASS_DEPENDENCY(DominatorTree) +INITIALIZE_PASS_DEPENDENCY(AssumptionTracker) +INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_END(SROA, "sroa", "Scalar Replacement Of Aggregates", false, false) @@ -913,7 +1002,12 @@ INITIALIZE_PASS_END(SROA, "sroa", "Scalar Replacement Of Aggregates", static Type *findCommonType(AllocaSlices::const_iterator B, AllocaSlices::const_iterator E, uint64_t EndOffset) { - Type *Ty = 0; + Type *Ty = nullptr; + bool TyIsCommon = true; + IntegerType *ITy = nullptr; + + // Note that we need to look at *every* alloca slice's Use to ensure we + // always get consistent results regardless of the order of slices. for (AllocaSlices::const_iterator I = B; I != E; ++I) { Use *U = I->getUse(); if (isa(*U->getUser())) @@ -921,34 +1015,37 @@ static Type *findCommonType(AllocaSlices::const_iterator B, if (I->beginOffset() != B->beginOffset() || I->endOffset() != EndOffset) continue; - Type *UserTy = 0; - if (LoadInst *LI = dyn_cast(U->getUser())) + Type *UserTy = nullptr; + if (LoadInst *LI = dyn_cast(U->getUser())) { UserTy = LI->getType(); - else if (StoreInst *SI = dyn_cast(U->getUser())) + } else if (StoreInst *SI = dyn_cast(U->getUser())) { UserTy = SI->getValueOperand()->getType(); - else - return 0; // Bail if we have weird uses. + } - if (IntegerType *ITy = dyn_cast(UserTy)) { + if (IntegerType *UserITy = dyn_cast_or_null(UserTy)) { // If the type is larger than the partition, skip it. We only encounter // this for split integer operations where we want to use the type of the - // entity causing the split. - if (ITy->getBitWidth() / 8 > (EndOffset - B->beginOffset())) + // entity causing the split. Also skip if the type is not a byte width + // multiple. + if (UserITy->getBitWidth() % 8 != 0 || + UserITy->getBitWidth() / 8 > (EndOffset - B->beginOffset())) continue; - // If we have found an integer type use covering the alloca, use that - // regardless of the other types, as integers are often used for a - // "bucket - // of bits" type. - return ITy; + // Track the largest bitwidth integer type used in this way in case there + // is no common type. + if (!ITy || ITy->getBitWidth() < UserITy->getBitWidth()) + ITy = UserITy; } - if (Ty && Ty != UserTy) - return 0; - - Ty = UserTy; + // To avoid depending on the order of slices, Ty and TyIsCommon must not + // depend on types skipped above. + if (!UserTy || (Ty && Ty != UserTy)) + TyIsCommon = false; // Give up on anything but an iN type. + else + Ty = UserTy; } - return Ty; + + return TyIsCommon ? Ty : ITy; } /// PHI instructions that use an alloca and are subsequently loaded can be @@ -970,7 +1067,7 @@ static Type *findCommonType(AllocaSlices::const_iterator B, /// FIXME: This should be hoisted into a generic utility, likely in /// Transforms/Util/Local.h static bool isSafePHIToSpeculate(PHINode &PN, - const DataLayout *DL = 0) { + const DataLayout *DL = nullptr) { // For now, we can only do this promotion if the load is in the same block // as the PHI, and if there are no stores between the phi and load. // TODO: Allow recursive phi users. @@ -978,10 +1075,9 @@ static bool isSafePHIToSpeculate(PHINode &PN, BasicBlock *BB = PN.getParent(); unsigned MaxAlign = 0; bool HaveLoad = false; - for (Value::use_iterator UI = PN.use_begin(), UE = PN.use_end(); UI != UE; - ++UI) { - LoadInst *LI = dyn_cast(*UI); - if (LI == 0 || !LI->isSimple()) + for (User *U : PN.users()) { + LoadInst *LI = dyn_cast(U); + if (!LI || !LI->isSimple()) return false; // For now we only allow loads in the same block as the PHI. This is @@ -1024,7 +1120,7 @@ static bool isSafePHIToSpeculate(PHINode &PN, // If this pointer is always safe to load, or if we can prove that there // is already a load in the block, then we can move the load to the pred // block. - if (InVal->isDereferenceablePointer() || + if (InVal->isDereferenceablePointer(DL) || isSafeToLoadUnconditionally(InVal, TI, MaxAlign, DL)) continue; @@ -1042,15 +1138,17 @@ static void speculatePHINodeLoads(PHINode &PN) { PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(), PN.getName() + ".sroa.speculated"); - // Get the TBAA tag and alignment to use from one of the loads. It doesn't + // Get the AA tags and alignment to use from one of the loads. It doesn't // matter which one we get and if any differ. - LoadInst *SomeLoad = cast(*PN.use_begin()); - MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa); + LoadInst *SomeLoad = cast(PN.user_back()); + + AAMDNodes AATags; + SomeLoad->getAAMetadata(AATags); unsigned Align = SomeLoad->getAlignment(); // Rewrite all loads of the PN to use the new PHI. while (!PN.use_empty()) { - LoadInst *LI = cast(*PN.use_begin()); + LoadInst *LI = cast(PN.user_back()); LI->replaceAllUsesWith(NewPN); LI->eraseFromParent(); } @@ -1066,8 +1164,8 @@ static void speculatePHINodeLoads(PHINode &PN) { InVal, (PN.getName() + ".sroa.speculate.load." + Pred->getName())); ++NumLoadsSpeculated; Load->setAlignment(Align); - if (TBAATag) - Load->setMetadata(LLVMContext::MD_tbaa, TBAATag); + if (AATags) + Load->setAAMetadata(AATags); NewPN->addIncoming(Load, Pred); } @@ -1088,16 +1186,16 @@ static void speculatePHINodeLoads(PHINode &PN) { /// /// We can do this to a select if its only uses are loads and if the operand /// to the select can be loaded unconditionally. -static bool isSafeSelectToSpeculate(SelectInst &SI, const DataLayout *DL = 0) { +static bool isSafeSelectToSpeculate(SelectInst &SI, + const DataLayout *DL = nullptr) { Value *TValue = SI.getTrueValue(); Value *FValue = SI.getFalseValue(); - bool TDerefable = TValue->isDereferenceablePointer(); - bool FDerefable = FValue->isDereferenceablePointer(); + bool TDerefable = TValue->isDereferenceablePointer(DL); + bool FDerefable = FValue->isDereferenceablePointer(DL); - for (Value::use_iterator UI = SI.use_begin(), UE = SI.use_end(); UI != UE; - ++UI) { - LoadInst *LI = dyn_cast(*UI); - if (LI == 0 || !LI->isSimple()) + for (User *U : SI.users()) { + LoadInst *LI = dyn_cast(U); + if (!LI || !LI->isSimple()) return false; // Both operands to the select need to be dereferencable, either @@ -1122,7 +1220,7 @@ static void speculateSelectInstLoads(SelectInst &SI) { Value *FV = SI.getFalseValue(); // Replace the loads of the select with a select of two loads. while (!SI.use_empty()) { - LoadInst *LI = cast(*SI.use_begin()); + LoadInst *LI = cast(SI.user_back()); assert(LI->isSimple() && "We only speculate simple loads"); IRB.SetInsertPoint(LI); @@ -1132,12 +1230,15 @@ static void speculateSelectInstLoads(SelectInst &SI) { IRB.CreateLoad(FV, LI->getName() + ".sroa.speculate.load.false"); NumLoadsSpeculated += 2; - // Transfer alignment and TBAA info if present. + // Transfer alignment and AA info if present. TL->setAlignment(LI->getAlignment()); FL->setAlignment(LI->getAlignment()); - if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa)) { - TL->setMetadata(LLVMContext::MD_tbaa, Tag); - FL->setMetadata(LLVMContext::MD_tbaa, Tag); + + AAMDNodes Tags; + LI->getAAMetadata(Tags); + if (Tags) { + TL->setAAMetadata(Tags); + FL->setAAMetadata(Tags); } Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL, @@ -1155,7 +1256,7 @@ static void speculateSelectInstLoads(SelectInst &SI) { /// This will return the BasePtr if that is valid, or build a new GEP /// instruction using the IRBuilder if GEP-ing is needed. static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr, - SmallVectorImpl &Indices) { + SmallVectorImpl &Indices, Twine NamePrefix) { if (Indices.empty()) return BasePtr; @@ -1164,7 +1265,7 @@ static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr, if (Indices.size() == 1 && cast(Indices.back())->isZero()) return BasePtr; - return IRB.CreateInBoundsGEP(BasePtr, Indices, "idx"); + return IRB.CreateInBoundsGEP(BasePtr, Indices, NamePrefix + "sroa_idx"); } /// \brief Get a natural GEP off of the BasePtr walking through Ty toward @@ -1178,9 +1279,13 @@ static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr, /// indicated by Indices to have the correct offset. static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL, Value *BasePtr, Type *Ty, Type *TargetTy, - SmallVectorImpl &Indices) { + SmallVectorImpl &Indices, + Twine NamePrefix) { if (Ty == TargetTy) - return buildGEP(IRB, BasePtr, Indices); + return buildGEP(IRB, BasePtr, Indices, NamePrefix); + + // Pointer size to use for the indices. + unsigned PtrSize = DL.getPointerTypeSizeInBits(BasePtr->getType()); // See if we can descend into a struct and locate a field with the correct // type. @@ -1189,11 +1294,13 @@ static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL, do { if (ElementTy->isPointerTy()) break; - if (SequentialType *SeqTy = dyn_cast(ElementTy)) { - ElementTy = SeqTy->getElementType(); - // Note that we use the default address space as this index is over an - // array or a vector, not a pointer. - Indices.push_back(IRB.getInt(APInt(DL.getPointerSizeInBits(0), 0))); + + if (ArrayType *ArrayTy = dyn_cast(ElementTy)) { + ElementTy = ArrayTy->getElementType(); + Indices.push_back(IRB.getIntN(PtrSize, 0)); + } else if (VectorType *VectorTy = dyn_cast(ElementTy)) { + ElementTy = VectorTy->getElementType(); + Indices.push_back(IRB.getInt32(0)); } else if (StructType *STy = dyn_cast(ElementTy)) { if (STy->element_begin() == STy->element_end()) break; // Nothing left to descend into. @@ -1207,7 +1314,7 @@ static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL, if (ElementTy != TargetTy) Indices.erase(Indices.end() - NumLayers, Indices.end()); - return buildGEP(IRB, BasePtr, Indices); + return buildGEP(IRB, BasePtr, Indices, NamePrefix); } /// \brief Recursively compute indices for a natural GEP. @@ -1217,29 +1324,32 @@ static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL, static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, Type *Ty, APInt &Offset, Type *TargetTy, - SmallVectorImpl &Indices) { + SmallVectorImpl &Indices, + Twine NamePrefix) { if (Offset == 0) - return getNaturalGEPWithType(IRB, DL, Ptr, Ty, TargetTy, Indices); + return getNaturalGEPWithType(IRB, DL, Ptr, Ty, TargetTy, Indices, NamePrefix); // We can't recurse through pointer types. if (Ty->isPointerTy()) - return 0; + return nullptr; // We try to analyze GEPs over vectors here, but note that these GEPs are // extremely poorly defined currently. The long-term goal is to remove GEPing // over a vector from the IR completely. if (VectorType *VecTy = dyn_cast(Ty)) { unsigned ElementSizeInBits = DL.getTypeSizeInBits(VecTy->getScalarType()); - if (ElementSizeInBits % 8) - return 0; // GEPs over non-multiple of 8 size vector elements are invalid. + if (ElementSizeInBits % 8 != 0) { + // GEPs over non-multiple of 8 size vector elements are invalid. + return nullptr; + } APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8); APInt NumSkippedElements = Offset.sdiv(ElementSize); if (NumSkippedElements.ugt(VecTy->getNumElements())) - return 0; + return nullptr; Offset -= NumSkippedElements * ElementSize; Indices.push_back(IRB.getInt(NumSkippedElements)); return getNaturalGEPRecursively(IRB, DL, Ptr, VecTy->getElementType(), - Offset, TargetTy, Indices); + Offset, TargetTy, Indices, NamePrefix); } if (ArrayType *ArrTy = dyn_cast(Ty)) { @@ -1247,31 +1357,31 @@ static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL, APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy)); APInt NumSkippedElements = Offset.sdiv(ElementSize); if (NumSkippedElements.ugt(ArrTy->getNumElements())) - return 0; + return nullptr; Offset -= NumSkippedElements * ElementSize; Indices.push_back(IRB.getInt(NumSkippedElements)); return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, - Indices); + Indices, NamePrefix); } StructType *STy = dyn_cast(Ty); if (!STy) - return 0; + return nullptr; const StructLayout *SL = DL.getStructLayout(STy); uint64_t StructOffset = Offset.getZExtValue(); if (StructOffset >= SL->getSizeInBytes()) - return 0; + return nullptr; unsigned Index = SL->getElementContainingOffset(StructOffset); Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index)); Type *ElementTy = STy->getElementType(Index); if (Offset.uge(DL.getTypeAllocSize(ElementTy))) - return 0; // The offset points into alignment padding. + return nullptr; // The offset points into alignment padding. Indices.push_back(IRB.getInt32(Index)); return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, - Indices); + Indices, NamePrefix); } /// \brief Get a natural GEP from a base pointer to a particular offset and @@ -1286,26 +1396,27 @@ static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL, /// If no natural GEP can be constructed, this function returns null. static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, APInt Offset, Type *TargetTy, - SmallVectorImpl &Indices) { + SmallVectorImpl &Indices, + Twine NamePrefix) { PointerType *Ty = cast(Ptr->getType()); // Don't consider any GEPs through an i8* as natural unless the TargetTy is // an i8. - if (Ty == IRB.getInt8PtrTy() && TargetTy->isIntegerTy(8)) - return 0; + if (Ty == IRB.getInt8PtrTy(Ty->getAddressSpace()) && TargetTy->isIntegerTy(8)) + return nullptr; Type *ElementTy = Ty->getElementType(); if (!ElementTy->isSized()) - return 0; // We can't GEP through an unsized element. + return nullptr; // We can't GEP through an unsized element. APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy)); if (ElementSize == 0) - return 0; // Zero-length arrays can't help us build a natural GEP. + return nullptr; // Zero-length arrays can't help us build a natural GEP. APInt NumSkippedElements = Offset.sdiv(ElementSize); Offset -= NumSkippedElements * ElementSize; Indices.push_back(IRB.getInt(NumSkippedElements)); return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, - Indices); + Indices, NamePrefix); } /// \brief Compute an adjusted pointer from Ptr by Offset bytes where the @@ -1323,8 +1434,9 @@ static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL, /// properties. The algorithm tries to fold as many constant indices into /// a single GEP as possible, thus making each GEP more independent of the /// surrounding code. -static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, - Value *Ptr, APInt Offset, Type *PointerTy) { +static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, + APInt Offset, Type *PointerTy, + Twine NamePrefix) { // Even though we don't look through PHI nodes, we could be called on an // instruction in an unreachable block, which may be on a cycle. SmallPtrSet Visited; @@ -1334,11 +1446,11 @@ static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, // We may end up computing an offset pointer that has the wrong type. If we // never are able to compute one directly that has the correct type, we'll // fall back to it, so keep it around here. - Value *OffsetPtr = 0; + Value *OffsetPtr = nullptr; // Remember any i8 pointer we come across to re-use if we need to do a raw // byte offset. - Value *Int8Ptr = 0; + Value *Int8Ptr = nullptr; APInt Int8PtrOffset(Offset.getBitWidth(), 0); Type *TargetTy = PointerTy->getPointerElementType(); @@ -1351,14 +1463,14 @@ static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, break; Offset += GEPOffset; Ptr = GEP->getPointerOperand(); - if (!Visited.insert(Ptr)) + if (!Visited.insert(Ptr).second) break; } // See if we can perform a natural GEP here. Indices.clear(); if (Value *P = getNaturalGEPWithOffset(IRB, DL, Ptr, Offset, TargetTy, - Indices)) { + Indices, NamePrefix)) { if (P->getType() == PointerTy) { // Zap any offset pointer that we ended up computing in previous rounds. if (OffsetPtr && OffsetPtr->use_empty()) @@ -1388,24 +1500,25 @@ static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, break; } assert(Ptr->getType()->isPointerTy() && "Unexpected operand type!"); - } while (Visited.insert(Ptr)); + } while (Visited.insert(Ptr).second); if (!OffsetPtr) { if (!Int8Ptr) { - Int8Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy(), - "raw_cast"); + Int8Ptr = IRB.CreateBitCast( + Ptr, IRB.getInt8PtrTy(PointerTy->getPointerAddressSpace()), + NamePrefix + "sroa_raw_cast"); Int8PtrOffset = Offset; } OffsetPtr = Int8PtrOffset == 0 ? Int8Ptr : IRB.CreateInBoundsGEP(Int8Ptr, IRB.getInt(Int8PtrOffset), - "raw_idx"); + NamePrefix + "sroa_raw_idx"); } Ptr = OffsetPtr; // On the off chance we were targeting i8*, guard the bitcast here. if (Ptr->getType() != PointerTy) - Ptr = IRB.CreateBitCast(Ptr, PointerTy, "cast"); + Ptr = IRB.CreateBitCast(Ptr, PointerTy, NamePrefix + "sroa_cast"); return Ptr; } @@ -1428,6 +1541,10 @@ static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) { if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType()) return false; + // We can convert pointers to integers and vice-versa. Same for vectors + // of pointers and integers. + OldTy = OldTy->getScalarType(); + NewTy = NewTy->getScalarType(); if (NewTy->isPointerTy() || OldTy->isPointerTy()) { if (NewTy->isPointerTy() && OldTy->isPointerTy()) return true; @@ -1446,60 +1563,96 @@ static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) { /// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test /// two types for viability with this routine. static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V, - Type *Ty) { - assert(canConvertValue(DL, V->getType(), Ty) && - "Value not convertable to type"); - if (V->getType() == Ty) + Type *NewTy) { + Type *OldTy = V->getType(); + assert(canConvertValue(DL, OldTy, NewTy) && "Value not convertable to type"); + + if (OldTy == NewTy) return V; - if (IntegerType *OldITy = dyn_cast(V->getType())) - if (IntegerType *NewITy = dyn_cast(Ty)) + + if (IntegerType *OldITy = dyn_cast(OldTy)) + if (IntegerType *NewITy = dyn_cast(NewTy)) if (NewITy->getBitWidth() > OldITy->getBitWidth()) return IRB.CreateZExt(V, NewITy); - if (V->getType()->isIntegerTy() && Ty->isPointerTy()) - return IRB.CreateIntToPtr(V, Ty); - if (V->getType()->isPointerTy() && Ty->isIntegerTy()) - return IRB.CreatePtrToInt(V, Ty); - return IRB.CreateBitCast(V, Ty); + // See if we need inttoptr for this type pair. A cast involving both scalars + // and vectors requires and additional bitcast. + if (OldTy->getScalarType()->isIntegerTy() && + NewTy->getScalarType()->isPointerTy()) { + // Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8* + if (OldTy->isVectorTy() && !NewTy->isVectorTy()) + return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)), + NewTy); + + // Expand i128 to <2 x i8*> --> i128 to <2 x i64> to <2 x i8*> + if (!OldTy->isVectorTy() && NewTy->isVectorTy()) + return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)), + NewTy); + + return IRB.CreateIntToPtr(V, NewTy); + } + + // See if we need ptrtoint for this type pair. A cast involving both scalars + // and vectors requires and additional bitcast. + if (OldTy->getScalarType()->isPointerTy() && + NewTy->getScalarType()->isIntegerTy()) { + // Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128 + if (OldTy->isVectorTy() && !NewTy->isVectorTy()) + return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)), + NewTy); + + // Expand i8* to <2 x i32> --> i8* to i64 to <2 x i32> + if (!OldTy->isVectorTy() && NewTy->isVectorTy()) + return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)), + NewTy); + + return IRB.CreatePtrToInt(V, NewTy); + } + + return IRB.CreateBitCast(V, NewTy); } /// \brief Test whether the given slice use can be promoted to a vector. /// /// This function is called to test each entry in a partioning which is slated /// for a single slice. -static bool isVectorPromotionViableForSlice( - const DataLayout &DL, AllocaSlices &S, uint64_t SliceBeginOffset, - uint64_t SliceEndOffset, VectorType *Ty, uint64_t ElementSize, - AllocaSlices::const_iterator I) { +static bool +isVectorPromotionViableForSlice(const DataLayout &DL, uint64_t SliceBeginOffset, + uint64_t SliceEndOffset, VectorType *Ty, + uint64_t ElementSize, const Slice &S) { // First validate the slice offsets. uint64_t BeginOffset = - std::max(I->beginOffset(), SliceBeginOffset) - SliceBeginOffset; + std::max(S.beginOffset(), SliceBeginOffset) - SliceBeginOffset; uint64_t BeginIndex = BeginOffset / ElementSize; if (BeginIndex * ElementSize != BeginOffset || BeginIndex >= Ty->getNumElements()) return false; uint64_t EndOffset = - std::min(I->endOffset(), SliceEndOffset) - SliceBeginOffset; + std::min(S.endOffset(), SliceEndOffset) - SliceBeginOffset; uint64_t EndIndex = EndOffset / ElementSize; if (EndIndex * ElementSize != EndOffset || EndIndex > Ty->getNumElements()) return false; assert(EndIndex > BeginIndex && "Empty vector!"); uint64_t NumElements = EndIndex - BeginIndex; - Type *SliceTy = - (NumElements == 1) ? Ty->getElementType() - : VectorType::get(Ty->getElementType(), NumElements); + Type *SliceTy = (NumElements == 1) + ? Ty->getElementType() + : VectorType::get(Ty->getElementType(), NumElements); Type *SplitIntTy = Type::getIntNTy(Ty->getContext(), NumElements * ElementSize * 8); - Use *U = I->getUse(); + Use *U = S.getUse(); if (MemIntrinsic *MI = dyn_cast(U->getUser())) { if (MI->isVolatile()) return false; - if (!I->isSplittable()) + if (!S.isSplittable()) return false; // Skip any unsplittable intrinsics. + } else if (IntrinsicInst *II = dyn_cast(U->getUser())) { + if (II->getIntrinsicID() != Intrinsic::lifetime_start && + II->getIntrinsicID() != Intrinsic::lifetime_end) + return false; } else if (U->get()->getType()->getPointerElementType()->isStructTy()) { // Disable vector promotion when there are loads or stores of an FCA. return false; @@ -1507,8 +1660,7 @@ static bool isVectorPromotionViableForSlice( if (LI->isVolatile()) return false; Type *LTy = LI->getType(); - if (SliceBeginOffset > I->beginOffset() || - SliceEndOffset < I->endOffset()) { + if (SliceBeginOffset > S.beginOffset() || SliceEndOffset < S.endOffset()) { assert(LTy->isIntegerTy()); LTy = SplitIntTy; } @@ -1518,8 +1670,7 @@ static bool isVectorPromotionViableForSlice( if (SI->isVolatile()) return false; Type *STy = SI->getValueOperand()->getType(); - if (SliceBeginOffset > I->beginOffset() || - SliceEndOffset < I->endOffset()) { + if (SliceBeginOffset > S.beginOffset() || SliceEndOffset < S.endOffset()) { assert(STy->isIntegerTy()); STy = SplitIntTy; } @@ -1541,39 +1692,112 @@ static bool isVectorPromotionViableForSlice( /// SSA value. We only can ensure this for a limited set of operations, and we /// don't want to do the rewrites unless we are confident that the result will /// be promotable, so we have an early test here. -static bool -isVectorPromotionViable(const DataLayout &DL, Type *AllocaTy, AllocaSlices &S, +static VectorType * +isVectorPromotionViable(const DataLayout &DL, uint64_t SliceBeginOffset, uint64_t SliceEndOffset, - AllocaSlices::const_iterator I, - AllocaSlices::const_iterator E, + AllocaSlices::const_range Slices, ArrayRef SplitUses) { - VectorType *Ty = dyn_cast(AllocaTy); - if (!Ty) - return false; + // Collect the candidate types for vector-based promotion. Also track whether + // we have different element types. + SmallVector CandidateTys; + Type *CommonEltTy = nullptr; + bool HaveCommonEltTy = true; + auto CheckCandidateType = [&](Type *Ty) { + if (auto *VTy = dyn_cast(Ty)) { + CandidateTys.push_back(VTy); + if (!CommonEltTy) + CommonEltTy = VTy->getElementType(); + else if (CommonEltTy != VTy->getElementType()) + HaveCommonEltTy = false; + } + }; + // Consider any loads or stores that are the exact size of the slice. + for (const auto &S : Slices) + if (S.beginOffset() == SliceBeginOffset && + S.endOffset() == SliceEndOffset) { + if (auto *LI = dyn_cast(S.getUse()->getUser())) + CheckCandidateType(LI->getType()); + else if (auto *SI = dyn_cast(S.getUse()->getUser())) + CheckCandidateType(SI->getValueOperand()->getType()); + } - uint64_t ElementSize = DL.getTypeSizeInBits(Ty->getScalarType()); + // If we didn't find a vector type, nothing to do here. + if (CandidateTys.empty()) + return nullptr; + + // Remove non-integer vector types if we had multiple common element types. + // FIXME: It'd be nice to replace them with integer vector types, but we can't + // do that until all the backends are known to produce good code for all + // integer vector types. + if (!HaveCommonEltTy) { + CandidateTys.erase(std::remove_if(CandidateTys.begin(), CandidateTys.end(), + [](VectorType *VTy) { + return !VTy->getElementType()->isIntegerTy(); + }), + CandidateTys.end()); + + // If there were no integer vector types, give up. + if (CandidateTys.empty()) + return nullptr; + + // Rank the remaining candidate vector types. This is easy because we know + // they're all integer vectors. We sort by ascending number of elements. + auto RankVectorTypes = [&DL](VectorType *RHSTy, VectorType *LHSTy) { + assert(DL.getTypeSizeInBits(RHSTy) == DL.getTypeSizeInBits(LHSTy) && + "Cannot have vector types of different sizes!"); + assert(RHSTy->getElementType()->isIntegerTy() && + "All non-integer types eliminated!"); + assert(LHSTy->getElementType()->isIntegerTy() && + "All non-integer types eliminated!"); + return RHSTy->getNumElements() < LHSTy->getNumElements(); + }; + std::sort(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes); + CandidateTys.erase( + std::unique(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes), + CandidateTys.end()); + } else { +// The only way to have the same element type in every vector type is to +// have the same vector type. Check that and remove all but one. +#ifndef NDEBUG + for (VectorType *VTy : CandidateTys) { + assert(VTy->getElementType() == CommonEltTy && + "Unaccounted for element type!"); + assert(VTy == CandidateTys[0] && + "Different vector types with the same element type!"); + } +#endif + CandidateTys.resize(1); + } - // While the definition of LLVM vectors is bitpacked, we don't support sizes - // that aren't byte sized. - if (ElementSize % 8) - return false; - assert((DL.getTypeSizeInBits(Ty) % 8) == 0 && - "vector size not a multiple of element size?"); - ElementSize /= 8; + // Try each vector type, and return the one which works. + auto CheckVectorTypeForPromotion = [&](VectorType *VTy) { + uint64_t ElementSize = DL.getTypeSizeInBits(VTy->getElementType()); - for (; I != E; ++I) - if (!isVectorPromotionViableForSlice(DL, S, SliceBeginOffset, - SliceEndOffset, Ty, ElementSize, I)) + // While the definition of LLVM vectors is bitpacked, we don't support sizes + // that aren't byte sized. + if (ElementSize % 8) return false; + assert((DL.getTypeSizeInBits(VTy) % 8) == 0 && + "vector size not a multiple of element size?"); + ElementSize /= 8; - for (ArrayRef::const_iterator SUI = SplitUses.begin(), - SUE = SplitUses.end(); - SUI != SUE; ++SUI) - if (!isVectorPromotionViableForSlice(DL, S, SliceBeginOffset, - SliceEndOffset, Ty, ElementSize, *SUI)) - return false; + for (const auto &S : Slices) + if (!isVectorPromotionViableForSlice(DL, SliceBeginOffset, SliceEndOffset, + VTy, ElementSize, S)) + return false; - return true; + for (const auto &SI : SplitUses) + if (!isVectorPromotionViableForSlice(DL, SliceBeginOffset, SliceEndOffset, + VTy, ElementSize, *SI)) + return false; + + return true; + }; + for (VectorType *VTy : CandidateTys) + if (CheckVectorTypeForPromotion(VTy)) + return VTy; + + return nullptr; } /// \brief Test whether a slice of an alloca is valid for integer widening. @@ -1583,23 +1807,26 @@ isVectorPromotionViable(const DataLayout &DL, Type *AllocaTy, AllocaSlices &S, static bool isIntegerWideningViableForSlice(const DataLayout &DL, Type *AllocaTy, uint64_t AllocBeginOffset, - uint64_t Size, AllocaSlices &S, - AllocaSlices::const_iterator I, + uint64_t Size, + const Slice &S, bool &WholeAllocaOp) { - uint64_t RelBegin = I->beginOffset() - AllocBeginOffset; - uint64_t RelEnd = I->endOffset() - AllocBeginOffset; + uint64_t RelBegin = S.beginOffset() - AllocBeginOffset; + uint64_t RelEnd = S.endOffset() - AllocBeginOffset; // We can't reasonably handle cases where the load or store extends past // the end of the aloca's type and into its padding. if (RelEnd > Size) return false; - Use *U = I->getUse(); + Use *U = S.getUse(); if (LoadInst *LI = dyn_cast(U->getUser())) { if (LI->isVolatile()) return false; - if (RelBegin == 0 && RelEnd == Size) + // Note that we don't count vector loads or stores as whole-alloca + // operations which enable integer widening because we would prefer to use + // vector widening instead. + if (!isa(LI->getType()) && RelBegin == 0 && RelEnd == Size) WholeAllocaOp = true; if (IntegerType *ITy = dyn_cast(LI->getType())) { if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy)) @@ -1614,7 +1841,10 @@ static bool isIntegerWideningViableForSlice(const DataLayout &DL, Type *ValueTy = SI->getValueOperand()->getType(); if (SI->isVolatile()) return false; - if (RelBegin == 0 && RelEnd == Size) + // Note that we don't count vector loads or stores as whole-alloca + // operations which enable integer widening because we would prefer to use + // vector widening instead. + if (!isa(ValueTy) && RelBegin == 0 && RelEnd == Size) WholeAllocaOp = true; if (IntegerType *ITy = dyn_cast(ValueTy)) { if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy)) @@ -1628,7 +1858,7 @@ static bool isIntegerWideningViableForSlice(const DataLayout &DL, } else if (MemIntrinsic *MI = dyn_cast(U->getUser())) { if (MI->isVolatile() || !isa(MI->getLength())) return false; - if (!I->isSplittable()) + if (!S.isSplittable()) return false; // Skip any unsplittable intrinsics. } else if (IntrinsicInst *II = dyn_cast(U->getUser())) { if (II->getIntrinsicID() != Intrinsic::lifetime_start && @@ -1649,9 +1879,8 @@ static bool isIntegerWideningViableForSlice(const DataLayout &DL, /// promote the resulting alloca. static bool isIntegerWideningViable(const DataLayout &DL, Type *AllocaTy, - uint64_t AllocBeginOffset, AllocaSlices &S, - AllocaSlices::const_iterator I, - AllocaSlices::const_iterator E, + uint64_t AllocBeginOffset, + AllocaSlices::const_range Slices, ArrayRef SplitUses) { uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy); // Don't create integer types larger than the maximum bitwidth. @@ -1677,18 +1906,17 @@ isIntegerWideningViable(const DataLayout &DL, Type *AllocaTy, // promote due to some other unsplittable entry (which we may make splittable // later). However, if there are only splittable uses, go ahead and assume // that we cover the alloca. - bool WholeAllocaOp = (I != E) ? false : DL.isLegalInteger(SizeInBits); + bool WholeAllocaOp = + Slices.begin() != Slices.end() ? false : DL.isLegalInteger(SizeInBits); - for (; I != E; ++I) + for (const auto &S : Slices) if (!isIntegerWideningViableForSlice(DL, AllocaTy, AllocBeginOffset, Size, - S, I, WholeAllocaOp)) + S, WholeAllocaOp)) return false; - for (ArrayRef::const_iterator SUI = SplitUses.begin(), - SUE = SplitUses.end(); - SUI != SUE; ++SUI) + for (const auto &SI : SplitUses) if (!isIntegerWideningViableForSlice(DL, AllocaTy, AllocBeginOffset, Size, - S, *SUI, WholeAllocaOp)) + *SI, WholeAllocaOp)) return false; return WholeAllocaOp; @@ -1837,12 +2065,18 @@ class AllocaSliceRewriter : public InstVisitor { typedef llvm::InstVisitor Base; const DataLayout &DL; - AllocaSlices &S; + AllocaSlices &AS; SROA &Pass; AllocaInst &OldAI, &NewAI; const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset; Type *NewAllocaTy; + // This is a convenience and flag variable that will be null unless the new + // alloca's integer operations should be widened to this integer type due to + // passing isIntegerWideningViable above. If it is non-null, the desired + // integer type will be stored here for easy access during rewriting. + IntegerType *IntTy; + // If we are rewriting an alloca partition which can be written as pure // vector operations, we stash extra information here. When VecTy is // non-null, we have some strict guarantees about the rewritten alloca: @@ -1856,54 +2090,56 @@ class AllocaSliceRewriter : public InstVisitor { Type *ElementTy; uint64_t ElementSize; - // This is a convenience and flag variable that will be null unless the new - // alloca's integer operations should be widened to this integer type due to - // passing isIntegerWideningViable above. If it is non-null, the desired - // integer type will be stored here for easy access during rewriting. - IntegerType *IntTy; - - // The offset of the slice currently being rewritten. + // The original offset of the slice currently being rewritten relative to + // the original alloca. uint64_t BeginOffset, EndOffset; + // The new offsets of the slice currently being rewritten relative to the + // original alloca. + uint64_t NewBeginOffset, NewEndOffset; + + uint64_t SliceSize; bool IsSplittable; bool IsSplit; Use *OldUse; Instruction *OldPtr; - // Output members carrying state about the result of visiting and rewriting - // the slice of the alloca. - bool IsUsedByRewrittenSpeculatableInstructions; + // Track post-rewrite users which are PHI nodes and Selects. + SmallPtrSetImpl &PHIUsers; + SmallPtrSetImpl &SelectUsers; // Utility IR builder, whose name prefix is setup for each visited use, and // the insertion point is set to point to the user. IRBuilderTy IRB; public: - AllocaSliceRewriter(const DataLayout &DL, AllocaSlices &S, SROA &Pass, + AllocaSliceRewriter(const DataLayout &DL, AllocaSlices &AS, SROA &Pass, AllocaInst &OldAI, AllocaInst &NewAI, - uint64_t NewBeginOffset, uint64_t NewEndOffset, - bool IsVectorPromotable = false, - bool IsIntegerPromotable = false) - : DL(DL), S(S), Pass(Pass), OldAI(OldAI), NewAI(NewAI), - NewAllocaBeginOffset(NewBeginOffset), NewAllocaEndOffset(NewEndOffset), + uint64_t NewAllocaBeginOffset, + uint64_t NewAllocaEndOffset, bool IsIntegerPromotable, + VectorType *PromotableVecTy, + SmallPtrSetImpl &PHIUsers, + SmallPtrSetImpl &SelectUsers) + : DL(DL), AS(AS), Pass(Pass), OldAI(OldAI), NewAI(NewAI), + NewAllocaBeginOffset(NewAllocaBeginOffset), + NewAllocaEndOffset(NewAllocaEndOffset), NewAllocaTy(NewAI.getAllocatedType()), - VecTy(IsVectorPromotable ? cast(NewAllocaTy) : 0), - ElementTy(VecTy ? VecTy->getElementType() : 0), - ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy) / 8 : 0), IntTy(IsIntegerPromotable ? Type::getIntNTy( NewAI.getContext(), DL.getTypeSizeInBits(NewAI.getAllocatedType())) - : 0), + : nullptr), + VecTy(PromotableVecTy), + ElementTy(VecTy ? VecTy->getElementType() : nullptr), + ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy) / 8 : 0), BeginOffset(), EndOffset(), IsSplittable(), IsSplit(), OldUse(), - OldPtr(), IsUsedByRewrittenSpeculatableInstructions(false), + OldPtr(), PHIUsers(PHIUsers), SelectUsers(SelectUsers), IRB(NewAI.getContext(), ConstantFolder()) { if (VecTy) { assert((DL.getTypeSizeInBits(ElementTy) % 8) == 0 && "Only multiple-of-8 sized vector elements are viable"); ++NumVectorized; } - assert((!IsVectorPromotable && !IsIntegerPromotable) || - IsVectorPromotable != IsIntegerPromotable); + assert((!IntTy && !VecTy) || (IntTy && !VecTy) || (!IntTy && VecTy)); } bool visit(AllocaSlices::const_iterator I) { @@ -1914,6 +2150,14 @@ public: IsSplit = BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset; + // Compute the intersecting offset range. + assert(BeginOffset < NewAllocaEndOffset); + assert(EndOffset > NewAllocaBeginOffset); + NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); + NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); + + SliceSize = NewEndOffset - NewBeginOffset; + OldUse = I->getUse(); OldPtr = cast(OldUse->get()); @@ -1928,20 +2172,6 @@ public: return CanSROA; } - /// \brief Query whether this slice is used by speculatable instructions after - /// rewriting. - /// - /// These instructions (PHIs and Selects currently) require the alloca slice - /// to run back through the rewriter. Thus, they are promotable, but not on - /// this iteration. This is distinct from a slice which is unpromotable for - /// some other reason, in which case we don't even want to perform the - /// speculation. This can be querried at any time and reflects whether (at - /// that point) a visit call has rewritten a speculatable instruction on the - /// current slice. - bool isUsedByRewrittenSpeculatableInstructions() const { - return IsUsedByRewrittenSpeculatableInstructions; - } - private: // Make sure the other visit overloads are visible. using Base::visit; @@ -1952,30 +2182,53 @@ private: llvm_unreachable("No rewrite rule for this instruction!"); } - Value *getAdjustedAllocaPtr(IRBuilderTy &IRB, uint64_t Offset, - Type *PointerTy) { - assert(Offset >= NewAllocaBeginOffset); - return getAdjustedPtr(IRB, DL, &NewAI, APInt(DL.getPointerSizeInBits(), - Offset - NewAllocaBeginOffset), - PointerTy); + Value *getNewAllocaSlicePtr(IRBuilderTy &IRB, Type *PointerTy) { + // Note that the offset computation can use BeginOffset or NewBeginOffset + // interchangeably for unsplit slices. + assert(IsSplit || BeginOffset == NewBeginOffset); + uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; + +#ifndef NDEBUG + StringRef OldName = OldPtr->getName(); + // Skip through the last '.sroa.' component of the name. + size_t LastSROAPrefix = OldName.rfind(".sroa."); + if (LastSROAPrefix != StringRef::npos) { + OldName = OldName.substr(LastSROAPrefix + strlen(".sroa.")); + // Look for an SROA slice index. + size_t IndexEnd = OldName.find_first_not_of("0123456789"); + if (IndexEnd != StringRef::npos && OldName[IndexEnd] == '.') { + // Strip the index and look for the offset. + OldName = OldName.substr(IndexEnd + 1); + size_t OffsetEnd = OldName.find_first_not_of("0123456789"); + if (OffsetEnd != StringRef::npos && OldName[OffsetEnd] == '.') + // Strip the offset. + OldName = OldName.substr(OffsetEnd + 1); + } + } + // Strip any SROA suffixes as well. + OldName = OldName.substr(0, OldName.find(".sroa_")); +#endif + + return getAdjustedPtr(IRB, DL, &NewAI, + APInt(DL.getPointerSizeInBits(), Offset), PointerTy, +#ifndef NDEBUG + Twine(OldName) + "." +#else + Twine() +#endif + ); } - /// \brief Compute suitable alignment to access an offset into the new alloca. - unsigned getOffsetAlign(uint64_t Offset) { + /// \brief Compute suitable alignment to access this slice of the *new* alloca. + /// + /// You can optionally pass a type to this routine and if that type's ABI + /// alignment is itself suitable, this will return zero. + unsigned getSliceAlign(Type *Ty = nullptr) { unsigned NewAIAlign = NewAI.getAlignment(); if (!NewAIAlign) NewAIAlign = DL.getABITypeAlignment(NewAI.getAllocatedType()); - return MinAlign(NewAIAlign, Offset); - } - - /// \brief Compute suitable alignment to access a type at an offset of the - /// new alloca. - /// - /// \returns zero if the type's ABI alignment is a suitable alignment, - /// otherwise returns the maximal suitable alignment. - unsigned getOffsetTypeAlign(Type *Ty, uint64_t Offset) { - unsigned Align = getOffsetAlign(Offset); - return Align == DL.getABITypeAlignment(Ty) ? 0 : Align; + unsigned Align = MinAlign(NewAIAlign, NewBeginOffset - NewAllocaBeginOffset); + return (Ty && Align == DL.getABITypeAlignment(Ty)) ? 0 : Align; } unsigned getIndex(uint64_t Offset) { @@ -1993,8 +2246,7 @@ private: Pass.DeadInsts.insert(I); } - Value *rewriteVectorizedLoadInst(uint64_t NewBeginOffset, - uint64_t NewEndOffset) { + Value *rewriteVectorizedLoadInst() { unsigned BeginIndex = getIndex(NewBeginOffset); unsigned EndIndex = getIndex(NewEndOffset); assert(EndIndex > BeginIndex && "Empty vector!"); @@ -2004,8 +2256,7 @@ private: return extractVector(IRB, V, BeginIndex, EndIndex, "vec"); } - Value *rewriteIntegerLoad(LoadInst &LI, uint64_t NewBeginOffset, - uint64_t NewEndOffset) { + Value *rewriteIntegerLoad(LoadInst &LI) { assert(IntTy && "We cannot insert an integer to the alloca"); assert(!LI.isVolatile()); Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), @@ -2024,32 +2275,23 @@ private: Value *OldOp = LI.getOperand(0); assert(OldOp == OldPtr); - // Compute the intersecting offset range. - assert(BeginOffset < NewAllocaEndOffset); - assert(EndOffset > NewAllocaBeginOffset); - uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); - uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); - - uint64_t Size = NewEndOffset - NewBeginOffset; - - Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), Size * 8) + Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), SliceSize * 8) : LI.getType(); bool IsPtrAdjusted = false; Value *V; if (VecTy) { - V = rewriteVectorizedLoadInst(NewBeginOffset, NewEndOffset); + V = rewriteVectorizedLoadInst(); } else if (IntTy && LI.getType()->isIntegerTy()) { - V = rewriteIntegerLoad(LI, NewBeginOffset, NewEndOffset); + V = rewriteIntegerLoad(LI); } else if (NewBeginOffset == NewAllocaBeginOffset && canConvertValue(DL, NewAllocaTy, LI.getType())) { V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), - LI.isVolatile(), "load"); + LI.isVolatile(), LI.getName()); } else { Type *LTy = TargetTy->getPointerTo(); - V = IRB.CreateAlignedLoad( - getAdjustedAllocaPtr(IRB, NewBeginOffset, LTy), - getOffsetTypeAlign(TargetTy, NewBeginOffset - NewAllocaBeginOffset), - LI.isVolatile(), "load"); + V = IRB.CreateAlignedLoad(getNewAllocaSlicePtr(IRB, LTy), + getSliceAlign(TargetTy), LI.isVolatile(), + LI.getName()); IsPtrAdjusted = true; } V = convertValue(DL, IRB, V, TargetTy); @@ -2058,13 +2300,13 @@ private: assert(!LI.isVolatile()); assert(LI.getType()->isIntegerTy() && "Only integer type loads and stores are split"); - assert(Size < DL.getTypeStoreSize(LI.getType()) && + assert(SliceSize < DL.getTypeStoreSize(LI.getType()) && "Split load isn't smaller than original load"); assert(LI.getType()->getIntegerBitWidth() == DL.getTypeStoreSizeInBits(LI.getType()) && "Non-byte-multiple bit width"); // Move the insertion point just past the load so that we can refer to it. - IRB.SetInsertPoint(llvm::next(BasicBlock::iterator(&LI))); + IRB.SetInsertPoint(std::next(BasicBlock::iterator(&LI))); // Create a placeholder value with the same type as LI to use as the // basis for the new value. This allows us to replace the uses of LI with // the computed value, and then replace the placeholder with LI, leaving @@ -2086,9 +2328,7 @@ private: return !LI.isVolatile() && !IsPtrAdjusted; } - bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp, - uint64_t NewBeginOffset, - uint64_t NewEndOffset) { + bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp) { if (V->getType() != VecTy) { unsigned BeginIndex = getIndex(NewBeginOffset); unsigned EndIndex = getIndex(NewEndOffset); @@ -2114,8 +2354,7 @@ private: return true; } - bool rewriteIntegerStore(Value *V, StoreInst &SI, - uint64_t NewBeginOffset, uint64_t NewEndOffset) { + bool rewriteIntegerStore(Value *V, StoreInst &SI) { assert(IntTy && "We cannot extract an integer from the alloca"); assert(!SI.isVolatile()); if (DL.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) { @@ -2148,30 +2387,22 @@ private: if (AllocaInst *AI = dyn_cast(V->stripInBoundsOffsets())) Pass.PostPromotionWorklist.insert(AI); - // Compute the intersecting offset range. - assert(BeginOffset < NewAllocaEndOffset); - assert(EndOffset > NewAllocaBeginOffset); - uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); - uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); - - uint64_t Size = NewEndOffset - NewBeginOffset; - if (Size < DL.getTypeStoreSize(V->getType())) { + if (SliceSize < DL.getTypeStoreSize(V->getType())) { assert(!SI.isVolatile()); assert(V->getType()->isIntegerTy() && "Only integer type loads and stores are split"); assert(V->getType()->getIntegerBitWidth() == DL.getTypeStoreSizeInBits(V->getType()) && "Non-byte-multiple bit width"); - IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), Size * 8); + IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), SliceSize * 8); V = extractInteger(DL, IRB, V, NarrowTy, NewBeginOffset, "extract"); } if (VecTy) - return rewriteVectorizedStoreInst(V, SI, OldOp, NewBeginOffset, - NewEndOffset); + return rewriteVectorizedStoreInst(V, SI, OldOp); if (IntTy && V->getType()->isIntegerTy()) - return rewriteIntegerStore(V, SI, NewBeginOffset, NewEndOffset); + return rewriteIntegerStore(V, SI); StoreInst *NewSI; if (NewBeginOffset == NewAllocaBeginOffset && @@ -2181,12 +2412,9 @@ private: NewSI = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(), SI.isVolatile()); } else { - Value *NewPtr = getAdjustedAllocaPtr(IRB, NewBeginOffset, - V->getType()->getPointerTo()); - NewSI = IRB.CreateAlignedStore( - V, NewPtr, getOffsetTypeAlign( - V->getType(), NewBeginOffset - NewAllocaBeginOffset), - SI.isVolatile()); + Value *NewPtr = getNewAllocaSlicePtr(IRB, V->getType()->getPointerTo()); + NewSI = IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(V->getType()), + SI.isVolatile()); } (void)NewSI; Pass.DeadInsts.insert(&SI); @@ -2238,11 +2466,10 @@ private: // pointer to the new alloca. if (!isa(II.getLength())) { assert(!IsSplit); - assert(BeginOffset >= NewAllocaBeginOffset); - II.setDest( - getAdjustedAllocaPtr(IRB, BeginOffset, II.getRawDest()->getType())); + assert(NewBeginOffset == BeginOffset); + II.setDest(getNewAllocaSlicePtr(IRB, OldPtr->getType())); Type *CstTy = II.getAlignmentCst()->getType(); - II.setAlignment(ConstantInt::get(CstTy, getOffsetAlign(BeginOffset))); + II.setAlignment(ConstantInt::get(CstTy, getSliceAlign())); deleteIfTriviallyDead(OldPtr); return false; @@ -2254,26 +2481,20 @@ private: Type *AllocaTy = NewAI.getAllocatedType(); Type *ScalarTy = AllocaTy->getScalarType(); - // Compute the intersecting offset range. - assert(BeginOffset < NewAllocaEndOffset); - assert(EndOffset > NewAllocaBeginOffset); - uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); - uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); - uint64_t SliceOffset = NewBeginOffset - NewAllocaBeginOffset; - // If this doesn't map cleanly onto the alloca type, and that type isn't // a single value type, just emit a memset. if (!VecTy && !IntTy && (BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset || + SliceSize != DL.getTypeStoreSize(AllocaTy) || !AllocaTy->isSingleValueType() || !DL.isLegalInteger(DL.getTypeSizeInBits(ScalarTy)) || DL.getTypeSizeInBits(ScalarTy)%8 != 0)) { Type *SizeTy = II.getLength()->getType(); Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); CallInst *New = IRB.CreateMemSet( - getAdjustedAllocaPtr(IRB, NewBeginOffset, II.getRawDest()->getType()), - II.getValue(), Size, getOffsetAlign(SliceOffset), II.isVolatile()); + getNewAllocaSlicePtr(IRB, OldPtr->getType()), II.getValue(), Size, + getSliceAlign(), II.isVolatile()); (void)New; DEBUG(dbgs() << " to: " << *New << "\n"); return false; @@ -2350,25 +2571,11 @@ private: DEBUG(dbgs() << " original: " << II << "\n"); - // Compute the intersecting offset range. - assert(BeginOffset < NewAllocaEndOffset); - assert(EndOffset > NewAllocaBeginOffset); - uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); - uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); - - assert(II.getRawSource() == OldPtr || II.getRawDest() == OldPtr); - bool IsDest = II.getRawDest() == OldPtr; + bool IsDest = &II.getRawDestUse() == OldUse; + assert((IsDest && II.getRawDest() == OldPtr) || + (!IsDest && II.getRawSource() == OldPtr)); - // Compute the relative offset within the transfer. - unsigned IntPtrWidth = DL.getPointerSizeInBits(); - APInt RelOffset(IntPtrWidth, NewBeginOffset - BeginOffset); - - unsigned Align = II.getAlignment(); - uint64_t SliceOffset = NewBeginOffset - NewAllocaBeginOffset; - if (Align > 1) - Align = - MinAlign(RelOffset.zextOrTrunc(64).getZExtValue(), - MinAlign(II.getAlignment(), getOffsetAlign(SliceOffset))); + unsigned SliceAlign = getSliceAlign(); // For unsplit intrinsics, we simply modify the source and destination // pointers in place. This isn't just an optimization, it is a matter of @@ -2378,19 +2585,20 @@ private: // memcpy, and so simply updating the pointers is the necessary for us to // update both source and dest of a single call. if (!IsSplittable) { - Value *OldOp = IsDest ? II.getRawDest() : II.getRawSource(); + Value *AdjustedPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); if (IsDest) - II.setDest( - getAdjustedAllocaPtr(IRB, BeginOffset, II.getRawDest()->getType())); + II.setDest(AdjustedPtr); else - II.setSource(getAdjustedAllocaPtr(IRB, BeginOffset, - II.getRawSource()->getType())); + II.setSource(AdjustedPtr); - Type *CstTy = II.getAlignmentCst()->getType(); - II.setAlignment(ConstantInt::get(CstTy, Align)); + if (II.getAlignment() > SliceAlign) { + Type *CstTy = II.getAlignmentCst()->getType(); + II.setAlignment( + ConstantInt::get(CstTy, MinAlign(II.getAlignment(), SliceAlign))); + } DEBUG(dbgs() << " to: " << II << "\n"); - deleteIfTriviallyDead(OldOp); + deleteIfTriviallyDead(OldPtr); return false; } // For split transfer intrinsics we have an incredibly useful assurance: @@ -2401,10 +2609,11 @@ private: // If this doesn't map cleanly onto the alloca type, and that type isn't // a single value type, just emit a memcpy. - bool EmitMemCpy - = !VecTy && !IntTy && (BeginOffset > NewAllocaBeginOffset || - EndOffset < NewAllocaEndOffset || - !NewAI.getAllocatedType()->isSingleValueType()); + bool EmitMemCpy = + !VecTy && !IntTy && + (BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset || + SliceSize != DL.getTypeStoreSize(NewAI.getAllocatedType()) || + !NewAI.getAllocatedType()->isSingleValueType()); // If we're just going to emit a memcpy, the alloca hasn't changed, and the // size hasn't been shrunk based on analysis of the viable range, this is @@ -2426,37 +2635,39 @@ private: // alloca that should be re-examined after rewriting this instruction. Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest(); if (AllocaInst *AI - = dyn_cast(OtherPtr->stripInBoundsOffsets())) + = dyn_cast(OtherPtr->stripInBoundsOffsets())) { + assert(AI != &OldAI && AI != &NewAI && + "Splittable transfers cannot reach the same alloca on both ends."); Pass.Worklist.insert(AI); + } - if (EmitMemCpy) { - Type *OtherPtrTy = IsDest ? II.getRawSource()->getType() - : II.getRawDest()->getType(); + Type *OtherPtrTy = OtherPtr->getType(); + unsigned OtherAS = OtherPtrTy->getPointerAddressSpace(); + // Compute the relative offset for the other pointer within the transfer. + unsigned IntPtrWidth = DL.getPointerSizeInBits(OtherAS); + APInt OtherOffset(IntPtrWidth, NewBeginOffset - BeginOffset); + unsigned OtherAlign = MinAlign(II.getAlignment() ? II.getAlignment() : 1, + OtherOffset.zextOrTrunc(64).getZExtValue()); + + if (EmitMemCpy) { // Compute the other pointer, folding as much as possible to produce // a single, simple GEP in most cases. - OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, RelOffset, OtherPtrTy); + OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, + OtherPtr->getName() + "."); - Value *OurPtr = getAdjustedAllocaPtr( - IRB, NewBeginOffset, - IsDest ? II.getRawDest()->getType() : II.getRawSource()->getType()); + Value *OurPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); Type *SizeTy = II.getLength()->getType(); Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); - CallInst *New = IRB.CreateMemCpy(IsDest ? OurPtr : OtherPtr, - IsDest ? OtherPtr : OurPtr, - Size, Align, II.isVolatile()); + CallInst *New = IRB.CreateMemCpy( + IsDest ? OurPtr : OtherPtr, IsDest ? OtherPtr : OurPtr, Size, + MinAlign(SliceAlign, OtherAlign), II.isVolatile()); (void)New; DEBUG(dbgs() << " to: " << *New << "\n"); return false; } - // Note that we clamp the alignment to 1 here as a 0 alignment for a memcpy - // is equivalent to 1, but that isn't true if we end up rewriting this as - // a load or store. - if (!Align) - Align = 1; - bool IsWholeAlloca = NewBeginOffset == NewAllocaBeginOffset && NewEndOffset == NewAllocaEndOffset; uint64_t Size = NewEndOffset - NewBeginOffset; @@ -2464,24 +2675,32 @@ private: unsigned EndIndex = VecTy ? getIndex(NewEndOffset) : 0; unsigned NumElements = EndIndex - BeginIndex; IntegerType *SubIntTy - = IntTy ? Type::getIntNTy(IntTy->getContext(), Size*8) : 0; + = IntTy ? Type::getIntNTy(IntTy->getContext(), Size*8) : nullptr; - Type *OtherPtrTy = NewAI.getType(); + // Reset the other pointer type to match the register type we're going to + // use, but using the address space of the original other pointer. if (VecTy && !IsWholeAlloca) { if (NumElements == 1) OtherPtrTy = VecTy->getElementType(); else OtherPtrTy = VectorType::get(VecTy->getElementType(), NumElements); - OtherPtrTy = OtherPtrTy->getPointerTo(); + OtherPtrTy = OtherPtrTy->getPointerTo(OtherAS); } else if (IntTy && !IsWholeAlloca) { - OtherPtrTy = SubIntTy->getPointerTo(); + OtherPtrTy = SubIntTy->getPointerTo(OtherAS); + } else { + OtherPtrTy = NewAllocaTy->getPointerTo(OtherAS); } - Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, RelOffset, OtherPtrTy); + Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, + OtherPtr->getName() + "."); + unsigned SrcAlign = OtherAlign; Value *DstPtr = &NewAI; - if (!IsDest) + unsigned DstAlign = SliceAlign; + if (!IsDest) { std::swap(SrcPtr, DstPtr); + std::swap(SrcAlign, DstAlign); + } Value *Src; if (VecTy && !IsWholeAlloca && !IsDest) { @@ -2495,7 +2714,7 @@ private: uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract"); } else { - Src = IRB.CreateAlignedLoad(SrcPtr, Align, II.isVolatile(), + Src = IRB.CreateAlignedLoad(SrcPtr, SrcAlign, II.isVolatile(), "copyload"); } @@ -2513,7 +2732,7 @@ private: } StoreInst *Store = cast( - IRB.CreateAlignedStore(Src, DstPtr, Align, II.isVolatile())); + IRB.CreateAlignedStore(Src, DstPtr, DstAlign, II.isVolatile())); (void)Store; DEBUG(dbgs() << " to: " << *Store << "\n"); return !II.isVolatile(); @@ -2525,20 +2744,13 @@ private: DEBUG(dbgs() << " original: " << II << "\n"); assert(II.getArgOperand(1) == OldPtr); - // Compute the intersecting offset range. - assert(BeginOffset < NewAllocaEndOffset); - assert(EndOffset > NewAllocaBeginOffset); - uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); - uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); - // Record this instruction for deletion. Pass.DeadInsts.insert(&II); ConstantInt *Size = ConstantInt::get(cast(II.getArgOperand(0)->getType()), NewEndOffset - NewBeginOffset); - Value *Ptr = - getAdjustedAllocaPtr(IRB, NewBeginOffset, II.getArgOperand(1)->getType()); + Value *Ptr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); Value *New; if (II.getIntrinsicID() == Intrinsic::lifetime_start) New = IRB.CreateLifetimeStart(Ptr, Size); @@ -2559,28 +2771,25 @@ private: // as local as possible to the PHI. To do that, we re-use the location of // the old pointer, which necessarily must be in the right position to // dominate the PHI. - IRBuilderTy PtrBuilder(OldPtr); - PtrBuilder.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) + - "."); + IRBuilderTy PtrBuilder(IRB); + if (isa(OldPtr)) + PtrBuilder.SetInsertPoint(OldPtr->getParent()->getFirstInsertionPt()); + else + PtrBuilder.SetInsertPoint(OldPtr); + PtrBuilder.SetCurrentDebugLocation(OldPtr->getDebugLoc()); - Value *NewPtr = - getAdjustedAllocaPtr(PtrBuilder, BeginOffset, OldPtr->getType()); + Value *NewPtr = getNewAllocaSlicePtr(PtrBuilder, OldPtr->getType()); // Replace the operands which were using the old pointer. std::replace(PN.op_begin(), PN.op_end(), cast(OldPtr), NewPtr); DEBUG(dbgs() << " to: " << PN << "\n"); deleteIfTriviallyDead(OldPtr); - // Check whether we can speculate this PHI node, and if so remember that - // fact and queue it up for another iteration after the speculation - // occurs. - if (isSafePHIToSpeculate(PN, &DL)) { - Pass.SpeculatablePHIs.insert(&PN); - IsUsedByRewrittenSpeculatableInstructions = true; - return true; - } - - return false; // PHIs can't be promoted on their own. + // PHIs can't be promoted on their own, but often can be speculated. We + // check the speculation outside of the rewriter so that we see the + // fully-rewritten alloca. + PHIUsers.insert(&PN); + return true; } bool visitSelectInst(SelectInst &SI) { @@ -2590,7 +2799,7 @@ private: assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable"); assert(EndOffset <= NewAllocaEndOffset && "Selects are unsplittable"); - Value *NewPtr = getAdjustedAllocaPtr(IRB, BeginOffset, OldPtr->getType()); + Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); // Replace the operands which were using the old pointer. if (SI.getOperand(1) == OldPtr) SI.setOperand(1, NewPtr); @@ -2600,16 +2809,11 @@ private: DEBUG(dbgs() << " to: " << SI << "\n"); deleteIfTriviallyDead(OldPtr); - // Check whether we can speculate this select instruction, and if so - // remember that fact and queue it up for another iteration after the - // speculation occurs. - if (isSafeSelectToSpeculate(SI, &DL)) { - Pass.SpeculatableSelects.insert(&SI); - IsUsedByRewrittenSpeculatableInstructions = true; - return true; - } - - return false; // Selects can't be promoted on their own. + // Selects can't be promoted on their own, but often can be speculated. We + // check the speculation outside of the rewriter so that we see the + // fully-rewritten alloca. + SelectUsers.insert(&SI); + return true; } }; @@ -2657,10 +2861,9 @@ private: /// Enqueue all the users of the given instruction for further processing. /// This uses a set to de-duplicate users. void enqueueUsers(Instruction &I) { - for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); UI != UE; - ++UI) - if (Visited.insert(*UI)) - Queue.push_back(&UI.getUse()); + for (Use &U : I.uses()) + if (Visited.insert(U.getUser()).second) + Queue.push_back(&U); } // Conservative default is to not rewrite anything. @@ -2873,22 +3076,22 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty, return stripAggregateTypeWrapping(DL, Ty); if (Offset > DL.getTypeAllocSize(Ty) || (DL.getTypeAllocSize(Ty) - Offset) < Size) - return 0; + return nullptr; if (SequentialType *SeqTy = dyn_cast(Ty)) { // We can't partition pointers... if (SeqTy->isPointerTy()) - return 0; + return nullptr; Type *ElementTy = SeqTy->getElementType(); uint64_t ElementSize = DL.getTypeAllocSize(ElementTy); uint64_t NumSkippedElements = Offset / ElementSize; if (ArrayType *ArrTy = dyn_cast(SeqTy)) { if (NumSkippedElements >= ArrTy->getNumElements()) - return 0; + return nullptr; } else if (VectorType *VecTy = dyn_cast(SeqTy)) { if (NumSkippedElements >= VecTy->getNumElements()) - return 0; + return nullptr; } Offset -= NumSkippedElements * ElementSize; @@ -2896,7 +3099,7 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty, if (Offset > 0 || Size < ElementSize) { // Bail if the partition ends in a different array element. if ((Offset + Size) > ElementSize) - return 0; + return nullptr; // Recurse through the element type trying to peel off offset bytes. return getTypePartition(DL, ElementTy, Offset, Size); } @@ -2907,20 +3110,20 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty, assert(Size > ElementSize); uint64_t NumElements = Size / ElementSize; if (NumElements * ElementSize != Size) - return 0; + return nullptr; return ArrayType::get(ElementTy, NumElements); } StructType *STy = dyn_cast(Ty); if (!STy) - return 0; + return nullptr; const StructLayout *SL = DL.getStructLayout(STy); if (Offset >= SL->getSizeInBytes()) - return 0; + return nullptr; uint64_t EndOffset = Offset + Size; if (EndOffset > SL->getSizeInBytes()) - return 0; + return nullptr; unsigned Index = SL->getElementContainingOffset(Offset); Offset -= SL->getElementOffset(Index); @@ -2928,12 +3131,12 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty, Type *ElementTy = STy->getElementType(Index); uint64_t ElementSize = DL.getTypeAllocSize(ElementTy); if (Offset >= ElementSize) - return 0; // The offset points into alignment padding. + return nullptr; // The offset points into alignment padding. // See if any partition must be contained by the element. if (Offset > 0 || Size < ElementSize) { if ((Offset + Size) > ElementSize) - return 0; + return nullptr; return getTypePartition(DL, ElementTy, Offset, Size); } assert(Offset == 0); @@ -2946,14 +3149,14 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty, if (EndOffset < SL->getSizeInBytes()) { unsigned EndIndex = SL->getElementContainingOffset(EndOffset); if (Index == EndIndex) - return 0; // Within a single element and its padding. + return nullptr; // Within a single element and its padding. // Don't try to form "natural" types if the elements don't line up with the // expected size. // FIXME: We could potentially recurse down through the last element in the // sub-struct to find a natural end point. if (SL->getElementOffset(EndIndex) != EndOffset) - return 0; + return nullptr; assert(Index < EndIndex); EE = STy->element_begin() + EndIndex; @@ -2964,7 +3167,7 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty, STy->isPacked()); const StructLayout *SubSL = DL.getStructLayout(SubTy); if (Size != SubSL->getSizeInBytes()) - return 0; // The sub-struct doesn't have quite the size needed. + return nullptr; // The sub-struct doesn't have quite the size needed. return SubTy; } @@ -2979,7 +3182,7 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty, /// appropriate new offsets. It also evaluates how successful the rewrite was /// at enabling promotion and if it was successful queues the alloca to be /// promoted. -bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &S, +bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS, AllocaSlices::iterator B, AllocaSlices::iterator E, int64_t BeginOffset, int64_t EndOffset, ArrayRef SplitUses) { @@ -2989,7 +3192,7 @@ bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &S, // Try to compute a friendly type for this partition of the alloca. This // won't always succeed, in which case we fall back to a legal integer type // or an i8 array of an appropriate size. - Type *SliceTy = 0; + Type *SliceTy = nullptr; if (Type *CommonUseTy = findCommonType(B, E, EndOffset)) if (DL->getTypeAllocSize(CommonUseTy) >= SliceSize) SliceTy = CommonUseTy; @@ -3005,12 +3208,16 @@ bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &S, SliceTy = ArrayType::get(Type::getInt8Ty(*C), SliceSize); assert(DL->getTypeAllocSize(SliceTy) >= SliceSize); - bool IsVectorPromotable = isVectorPromotionViable( - *DL, SliceTy, S, BeginOffset, EndOffset, B, E, SplitUses); + bool IsIntegerPromotable = isIntegerWideningViable( + *DL, SliceTy, BeginOffset, AllocaSlices::const_range(B, E), SplitUses); - bool IsIntegerPromotable = - !IsVectorPromotable && - isIntegerWideningViable(*DL, SliceTy, BeginOffset, S, B, E, SplitUses); + VectorType *VecTy = + IsIntegerPromotable + ? nullptr + : isVectorPromotionViable(*DL, BeginOffset, EndOffset, + AllocaSlices::const_range(B, E), SplitUses); + if (VecTy) + SliceTy = VecTy; // Check for the case where we're going to rewrite to a new alloca of the // exact same type as the original, and with the same access offsets. In that @@ -3036,8 +3243,9 @@ bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &S, // the alloca's alignment unconstrained. if (Alignment <= DL->getABITypeAlignment(SliceTy)) Alignment = 0; - NewAI = new AllocaInst(SliceTy, 0, Alignment, - AI.getName() + ".sroa." + Twine(B - S.begin()), &AI); + NewAI = + new AllocaInst(SliceTy, nullptr, Alignment, + AI.getName() + ".sroa." + Twine(B - AS.begin()), &AI); ++NumNewAllocas; } @@ -3045,90 +3253,85 @@ bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &S, << "[" << BeginOffset << "," << EndOffset << ") to: " << *NewAI << "\n"); - // Track the high watermark on several worklists that are only relevant for + // Track the high watermark on the worklist as it is only relevant for // promoted allocas. We will reset it to this point if the alloca is not in // fact scheduled for promotion. unsigned PPWOldSize = PostPromotionWorklist.size(); - unsigned SPOldSize = SpeculatablePHIs.size(); - unsigned SSOldSize = SpeculatableSelects.size(); - -#if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS) unsigned NumUses = 0; -#endif + SmallPtrSet PHIUsers; + SmallPtrSet SelectUsers; - AllocaSliceRewriter Rewriter(*DL, S, *this, AI, *NewAI, BeginOffset, - EndOffset, IsVectorPromotable, - IsIntegerPromotable); + AllocaSliceRewriter Rewriter(*DL, AS, *this, AI, *NewAI, BeginOffset, + EndOffset, IsIntegerPromotable, VecTy, PHIUsers, + SelectUsers); bool Promotable = true; - for (ArrayRef::const_iterator SUI = SplitUses.begin(), - SUE = SplitUses.end(); - SUI != SUE; ++SUI) { + for (auto & SplitUse : SplitUses) { DEBUG(dbgs() << " rewriting split "); - DEBUG(S.printSlice(dbgs(), *SUI, "")); - Promotable &= Rewriter.visit(*SUI); -#if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS) + DEBUG(AS.printSlice(dbgs(), SplitUse, "")); + Promotable &= Rewriter.visit(SplitUse); ++NumUses; -#endif } for (AllocaSlices::iterator I = B; I != E; ++I) { DEBUG(dbgs() << " rewriting "); - DEBUG(S.printSlice(dbgs(), I, "")); + DEBUG(AS.printSlice(dbgs(), I, "")); Promotable &= Rewriter.visit(I); -#if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS) ++NumUses; -#endif } -#if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS) NumAllocaPartitionUses += NumUses; MaxUsesPerAllocaPartition = std::max(NumUses, MaxUsesPerAllocaPartition); -#endif - if (Promotable && !Rewriter.isUsedByRewrittenSpeculatableInstructions()) { - DEBUG(dbgs() << " and queuing for promotion\n"); - PromotableAllocas.push_back(NewAI); - } else if (NewAI != &AI || - (Promotable && - Rewriter.isUsedByRewrittenSpeculatableInstructions())) { + // Now that we've processed all the slices in the new partition, check if any + // PHIs or Selects would block promotion. + for (SmallPtrSetImpl::iterator I = PHIUsers.begin(), + E = PHIUsers.end(); + I != E; ++I) + if (!isSafePHIToSpeculate(**I, DL)) { + Promotable = false; + PHIUsers.clear(); + SelectUsers.clear(); + break; + } + for (SmallPtrSetImpl::iterator I = SelectUsers.begin(), + E = SelectUsers.end(); + I != E; ++I) + if (!isSafeSelectToSpeculate(**I, DL)) { + Promotable = false; + PHIUsers.clear(); + SelectUsers.clear(); + break; + } + + if (Promotable) { + if (PHIUsers.empty() && SelectUsers.empty()) { + // Promote the alloca. + PromotableAllocas.push_back(NewAI); + } else { + // If we have either PHIs or Selects to speculate, add them to those + // worklists and re-queue the new alloca so that we promote in on the + // next iteration. + for (PHINode *PHIUser : PHIUsers) + SpeculatablePHIs.insert(PHIUser); + for (SelectInst *SelectUser : SelectUsers) + SpeculatableSelects.insert(SelectUser); + Worklist.insert(NewAI); + } + } else { // If we can't promote the alloca, iterate on it to check for new // refinements exposed by splitting the current alloca. Don't iterate on an // alloca which didn't actually change and didn't get promoted. - // - // Alternatively, if we could promote the alloca but have speculatable - // instructions then we will speculate them after finishing our processing - // of the original alloca. Mark the new one for re-visiting in the next - // iteration so the speculated operations can be rewritten. - // - // FIXME: We should actually track whether the rewriter changed anything. - Worklist.insert(NewAI); - } - - // Drop any post-promotion work items if promotion didn't happen. - if (!Promotable) { + if (NewAI != &AI) + Worklist.insert(NewAI); + + // Drop any post-promotion work items if promotion didn't happen. while (PostPromotionWorklist.size() > PPWOldSize) PostPromotionWorklist.pop_back(); - while (SpeculatablePHIs.size() > SPOldSize) - SpeculatablePHIs.pop_back(); - while (SpeculatableSelects.size() > SSOldSize) - SpeculatableSelects.pop_back(); } return true; } -namespace { -struct IsSliceEndLessOrEqualTo { - uint64_t UpperBound; - - IsSliceEndLessOrEqualTo(uint64_t UpperBound) : UpperBound(UpperBound) {} - - bool operator()(const AllocaSlices::iterator &I) { - return I->endOffset() <= UpperBound; - } -}; -} - static void removeFinishedSplitUses(SmallVectorImpl &SplitUses, uint64_t &MaxSplitUseEndOffset, uint64_t Offset) { @@ -3140,37 +3343,35 @@ removeFinishedSplitUses(SmallVectorImpl &SplitUses, size_t SplitUsesOldSize = SplitUses.size(); SplitUses.erase(std::remove_if(SplitUses.begin(), SplitUses.end(), - IsSliceEndLessOrEqualTo(Offset)), + [Offset](const AllocaSlices::iterator &I) { + return I->endOffset() <= Offset; + }), SplitUses.end()); if (SplitUsesOldSize == SplitUses.size()) return; // Recompute the max. While this is linear, so is remove_if. MaxSplitUseEndOffset = 0; - for (SmallVectorImpl::iterator - SUI = SplitUses.begin(), - SUE = SplitUses.end(); - SUI != SUE; ++SUI) - MaxSplitUseEndOffset = std::max((*SUI)->endOffset(), MaxSplitUseEndOffset); + for (AllocaSlices::iterator SplitUse : SplitUses) + MaxSplitUseEndOffset = + std::max(SplitUse->endOffset(), MaxSplitUseEndOffset); } /// \brief Walks the slices of an alloca and form partitions based on them, /// rewriting each of their uses. -bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &S) { - if (S.begin() == S.end()) +bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) { + if (AS.begin() == AS.end()) return false; -#if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS) unsigned NumPartitions = 0; -#endif - bool Changed = false; SmallVector SplitUses; uint64_t MaxSplitUseEndOffset = 0; - uint64_t BeginOffset = S.begin()->beginOffset(); + uint64_t BeginOffset = AS.begin()->beginOffset(); - for (AllocaSlices::iterator SI = S.begin(), SJ = llvm::next(SI), SE = S.end(); + for (AllocaSlices::iterator SI = AS.begin(), SJ = std::next(SI), + SE = AS.end(); SI != SE; SI = SJ) { uint64_t MaxEndOffset = SI->endOffset(); @@ -3208,11 +3409,9 @@ bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &S) { // we'll have to rewrite uses and erase old split uses. if (BeginOffset < MaxEndOffset) { // Rewrite a sequence of overlapping slices. - Changed |= - rewritePartition(AI, S, SI, SJ, BeginOffset, MaxEndOffset, SplitUses); -#if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS) + Changed |= rewritePartition(AI, AS, SI, SJ, BeginOffset, MaxEndOffset, + SplitUses); ++NumPartitions; -#endif removeFinishedSplitUses(SplitUses, MaxSplitUseEndOffset, MaxEndOffset); } @@ -3250,11 +3449,9 @@ bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &S) { uint64_t PostSplitEndOffset = SJ == SE ? MaxSplitUseEndOffset : SJ->beginOffset(); - Changed |= rewritePartition(AI, S, SJ, SJ, MaxEndOffset, PostSplitEndOffset, - SplitUses); -#if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS) + Changed |= rewritePartition(AI, AS, SJ, SJ, MaxEndOffset, + PostSplitEndOffset, SplitUses); ++NumPartitions; -#endif if (SJ == SE) break; // Skip the rest, we don't need to do any cleanup. @@ -3266,15 +3463,28 @@ bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &S) { BeginOffset = SJ->beginOffset(); } -#if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS) NumAllocaPartitions += NumPartitions; MaxPartitionsPerAlloca = std::max(NumPartitions, MaxPartitionsPerAlloca); -#endif return Changed; } +/// \brief Clobber a use with undef, deleting the used value if it becomes dead. +void SROA::clobberUse(Use &U) { + Value *OldV = U; + // Replace the use with an undef value. + U = UndefValue::get(OldV->getType()); + + // Check for this making an instruction dead. We have to garbage collect + // all the dead instructions to ensure the uses of any alloca end up being + // minimal. + if (Instruction *OldI = dyn_cast(OldV)) + if (isInstructionTriviallyDead(OldI)) { + DeadInsts.insert(OldI); + } +} + /// \brief Analyze an alloca for SROA. /// /// This analyzes the alloca to ensure we can reason about it, builds @@ -3303,37 +3513,34 @@ bool SROA::runOnAlloca(AllocaInst &AI) { Changed |= AggRewriter.rewrite(AI); // Build the slices using a recursive instruction-visiting builder. - AllocaSlices S(*DL, AI); - DEBUG(S.print(dbgs())); - if (S.isEscaped()) + AllocaSlices AS(*DL, AI); + DEBUG(AS.print(dbgs())); + if (AS.isEscaped()) return Changed; // Delete all the dead users of this alloca before splitting and rewriting it. - for (AllocaSlices::dead_user_iterator DI = S.dead_user_begin(), - DE = S.dead_user_end(); - DI != DE; ++DI) { + for (Instruction *DeadUser : AS.getDeadUsers()) { + // Free up everything used by this instruction. + for (Use &DeadOp : DeadUser->operands()) + clobberUse(DeadOp); + + // Now replace the uses of this instruction. + DeadUser->replaceAllUsesWith(UndefValue::get(DeadUser->getType())); + + // And mark it for deletion. + DeadInsts.insert(DeadUser); + Changed = true; + } + for (Use *DeadOp : AS.getDeadOperands()) { + clobberUse(*DeadOp); Changed = true; - (*DI)->replaceAllUsesWith(UndefValue::get((*DI)->getType())); - DeadInsts.insert(*DI); - } - for (AllocaSlices::dead_op_iterator DO = S.dead_op_begin(), - DE = S.dead_op_end(); - DO != DE; ++DO) { - Value *OldV = **DO; - // Clobber the use with an undef value. - **DO = UndefValue::get(OldV->getType()); - if (Instruction *OldI = dyn_cast(OldV)) - if (isInstructionTriviallyDead(OldI)) { - Changed = true; - DeadInsts.insert(OldI); - } } // No slices to split. Leave the dead alloca for a later pass to clean up. - if (S.begin() == S.end()) + if (AS.begin() == AS.end()) return Changed; - Changed |= splitAlloca(AI, S); + Changed |= splitAlloca(AI, AS); DEBUG(dbgs() << " Speculating PHIs\n"); while (!SpeculatablePHIs.empty()) @@ -3355,17 +3562,17 @@ bool SROA::runOnAlloca(AllocaInst &AI) { /// /// We also record the alloca instructions deleted here so that they aren't /// subsequently handed to mem2reg to promote. -void SROA::deleteDeadInstructions(SmallPtrSet &DeletedAllocas) { +void SROA::deleteDeadInstructions(SmallPtrSetImpl &DeletedAllocas) { while (!DeadInsts.empty()) { Instruction *I = DeadInsts.pop_back_val(); DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n"); I->replaceAllUsesWith(UndefValue::get(I->getType())); - for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) - if (Instruction *U = dyn_cast(*OI)) { + for (Use &Operand : I->operands()) + if (Instruction *U = dyn_cast(Operand)) { // Zero out the operand and see if it becomes trivially dead. - *OI = 0; + Operand = nullptr; if (isInstructionTriviallyDead(U)) DeadInsts.insert(U); } @@ -3378,6 +3585,14 @@ void SROA::deleteDeadInstructions(SmallPtrSet &DeletedAllocas) { } } +static void enqueueUsersInWorklist(Instruction &I, + SmallVectorImpl &Worklist, + SmallPtrSetImpl &Visited) { + for (User *U : I.users()) + if (Visited.insert(cast(U)).second) + Worklist.push_back(cast(U)); +} + /// \brief Promote the allocas, using the best available technique. /// /// This attempts to promote whatever allocas have been identified as viable in @@ -3394,33 +3609,36 @@ bool SROA::promoteAllocas(Function &F) { if (DT && !ForceSSAUpdater) { DEBUG(dbgs() << "Promoting allocas with mem2reg...\n"); - PromoteMemToReg(PromotableAllocas, *DT); + PromoteMemToReg(PromotableAllocas, *DT, nullptr, AT); PromotableAllocas.clear(); return true; } DEBUG(dbgs() << "Promoting allocas with SSAUpdater...\n"); SSAUpdater SSA; - DIBuilder DIB(*F.getParent()); - SmallVector Insts; + DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false); + SmallVector Insts; + + // We need a worklist to walk the uses of each alloca. + SmallVector Worklist; + SmallPtrSet Visited; + SmallVector DeadInsts; for (unsigned Idx = 0, Size = PromotableAllocas.size(); Idx != Size; ++Idx) { AllocaInst *AI = PromotableAllocas[Idx]; - for (Value::use_iterator UI = AI->use_begin(), UE = AI->use_end(); - UI != UE;) { - Instruction *I = cast(*UI++); + Insts.clear(); + Worklist.clear(); + Visited.clear(); + + enqueueUsersInWorklist(*AI, Worklist, Visited); + + while (!Worklist.empty()) { + Instruction *I = Worklist.pop_back_val(); + // FIXME: Currently the SSAUpdater infrastructure doesn't reason about // lifetime intrinsics and so we strip them (and the bitcasts+GEPs // leading to them) here. Eventually it should use them to optimize the // scalar values produced. - if (isa(I) || isa(I)) { - assert(onlyUsedByLifetimeMarkers(I) && - "Found a bitcast used outside of a lifetime marker."); - while (!I->use_empty()) - cast(*I->use_begin())->eraseFromParent(); - I->eraseFromParent(); - continue; - } if (IntrinsicInst *II = dyn_cast(I)) { assert(II->getIntrinsicID() == Intrinsic::lifetime_start || II->getIntrinsicID() == Intrinsic::lifetime_end); @@ -3428,42 +3646,55 @@ bool SROA::promoteAllocas(Function &F) { continue; } - Insts.push_back(I); + // Push the loads and stores we find onto the list. SROA will already + // have validated that all loads and stores are viable candidates for + // promotion. + if (LoadInst *LI = dyn_cast(I)) { + assert(LI->getType() == AI->getAllocatedType()); + Insts.push_back(LI); + continue; + } + if (StoreInst *SI = dyn_cast(I)) { + assert(SI->getValueOperand()->getType() == AI->getAllocatedType()); + Insts.push_back(SI); + continue; + } + + // For everything else, we know that only no-op bitcasts and GEPs will + // make it this far, just recurse through them and recall them for later + // removal. + DeadInsts.push_back(I); + enqueueUsersInWorklist(*I, Worklist, Visited); } AllocaPromoter(Insts, SSA, *AI, DIB).run(Insts); - Insts.clear(); + while (!DeadInsts.empty()) + DeadInsts.pop_back_val()->eraseFromParent(); + AI->eraseFromParent(); } PromotableAllocas.clear(); return true; } -namespace { - /// \brief A predicate to test whether an alloca belongs to a set. - class IsAllocaInSet { - typedef SmallPtrSet SetType; - const SetType &Set; - - public: - typedef AllocaInst *argument_type; - - IsAllocaInSet(const SetType &Set) : Set(Set) {} - bool operator()(AllocaInst *AI) const { return Set.count(AI); } - }; -} - bool SROA::runOnFunction(Function &F) { + if (skipOptnoneFunction(F)) + return false; + DEBUG(dbgs() << "SROA function: " << F.getName() << "\n"); C = &F.getContext(); - DL = getAnalysisIfAvailable(); - if (!DL) { + DataLayoutPass *DLP = getAnalysisIfAvailable(); + if (!DLP) { DEBUG(dbgs() << " Skipping SROA -- no target data!\n"); return false; } - DT = getAnalysisIfAvailable(); + DL = &DLP->getDataLayout(); + DominatorTreeWrapperPass *DTWP = + getAnalysisIfAvailable(); + DT = DTWP ? &DTWP->getDomTree() : nullptr; + AT = &getAnalysis(); BasicBlock &EntryBB = F.getEntryBlock(); - for (BasicBlock::iterator I = EntryBB.begin(), E = llvm::prior(EntryBB.end()); + for (BasicBlock::iterator I = EntryBB.begin(), E = std::prev(EntryBB.end()); I != E; ++I) if (AllocaInst *AI = dyn_cast(I)) Worklist.insert(AI); @@ -3481,11 +3712,14 @@ bool SROA::runOnFunction(Function &F) { // Remove the deleted allocas from various lists so that we don't try to // continue processing them. if (!DeletedAllocas.empty()) { - Worklist.remove_if(IsAllocaInSet(DeletedAllocas)); - PostPromotionWorklist.remove_if(IsAllocaInSet(DeletedAllocas)); + auto IsInSet = [&](AllocaInst *AI) { + return DeletedAllocas.count(AI); + }; + Worklist.remove_if(IsInSet); + PostPromotionWorklist.remove_if(IsInSet); PromotableAllocas.erase(std::remove_if(PromotableAllocas.begin(), PromotableAllocas.end(), - IsAllocaInSet(DeletedAllocas)), + IsInSet), PromotableAllocas.end()); DeletedAllocas.clear(); } @@ -3501,7 +3735,8 @@ bool SROA::runOnFunction(Function &F) { } void SROA::getAnalysisUsage(AnalysisUsage &AU) const { + AU.addRequired(); if (RequiresDomTree) - AU.addRequired(); + AU.addRequired(); AU.setPreservesCFG(); }